From 309cc709f6d7bc77419eff9adc941d701446db4c Mon Sep 17 00:00:00 2001 From: Colin Taylor Date: Fri, 3 Oct 2025 16:31:59 -0700 Subject: [PATCH] [monarch] [OSS] fix crawler example to use updated API --- docs/source/examples/crawler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/examples/crawler.py b/docs/source/examples/crawler.py index d5bd65862..ddeb4238a 100644 --- a/docs/source/examples/crawler.py +++ b/docs/source/examples/crawler.py @@ -126,20 +126,20 @@ async def main(): start_time = time.time() # Start up a ProcMesh. - local_proc_mesh: ProcMesh = await this_host().spawn_procs( + local_proc_mesh: ProcMesh = this_host().spawn_procs( per_host={"procs": NUM_CRAWLERS} ) # Create queues across the mesh and use slice to target the first one; we will not use the rest. # TODO: One ProcMesh::slice is implemented, avoid spawning the extra ones here. - all_queues = await local_proc_mesh.spawn("queues", QueueActor) + all_queues = local_proc_mesh.spawn("queues", QueueActor) target_queue = all_queues.slice(procs=slice(0, 1)) # Prime the queue with the base URL we want to crawl. await target_queue.insert.call_one(BASE, DEPTH) # Make the crawlers and pass in the queues; crawlers will just use the first one as well. - crawlers = await local_proc_mesh.spawn("crawlers", CrawlActor, all_queues) + crawlers = local_proc_mesh.spawn("crawlers", CrawlActor, all_queues) # Run the crawlers; display the count of documents they crawled when done. results = await crawlers.crawl.call()