{"documentCount":378,"nextId":378,"documentIds":{"0":"hatchet://docs/v1/index","1":"hatchet://docs/v1/index#using-these-docs","2":"hatchet://docs/v1/index#concepts","3":"hatchet://docs/v1/index#use-cases","4":"hatchet://docs/v1/index#self-hosting","5":"hatchet://docs/v1/index#production-readiness","6":"hatchet://docs/v1/index#ready-to-get-started","7":"hatchet://docs/v1/quickstart","8":"hatchet://docs/v1/quickstart#next-steps","9":"hatchet://docs/v1/using-coding-agents","10":"hatchet://docs/v1/using-coding-agents#agent-skills","11":"hatchet://docs/v1/using-coding-agents#mcp-server","12":"hatchet://docs/v1/using-coding-agents#llmstxt","13":"hatchet://docs/v1/tasks","14":"hatchet://docs/v1/tasks#defining-a-task","15":"hatchet://docs/v1/tasks#input-and-output","16":"hatchet://docs/v1/tasks#the-context-object","17":"hatchet://docs/v1/tasks#configuration","18":"hatchet://docs/v1/tasks#how-tasks-execute-on-workers","19":"hatchet://docs/v1/workers","20":"hatchet://docs/v1/workers#declaring-a-worker","21":"hatchet://docs/v1/workers#starting-a-worker","22":"hatchet://docs/v1/workers#slots","23":"hatchet://docs/v1/running-your-task","24":"hatchet://docs/v1/running-your-task#fire-and-wait","25":"hatchet://docs/v1/running-your-task#fire-and-forget","26":"hatchet://docs/v1/running-your-task#crons","27":"hatchet://docs/v1/running-your-task#scheduled-runs","28":"hatchet://docs/v1/running-your-task#triggering-from-the-dashboard","29":"hatchet://docs/v1/durable-execution","30":"hatchet://docs/v1/durable-execution#guarantees","31":"hatchet://docs/v1/durable-execution#core-assumptions","32":"hatchet://docs/v1/durable-execution#example-uses","33":"hatchet://docs/v1/durable-execution#learn-more","34":"hatchet://docs/v1/scheduled-runs","35":"hatchet://docs/v1/scheduled-runs#programmatically-creating-scheduled-runs","36":"hatchet://docs/v1/scheduled-runs#managing-scheduled-runs-in-the-hatchet-dashboard","37":"hatchet://docs/v1/scheduled-runs#scheduled-run-considerations","38":"hatchet://docs/v1/cron-runs","39":"hatchet://docs/v1/cron-runs#defining-a-cron-in-your-task-definition","40":"hatchet://docs/v1/cron-runs#programmatically-creating-cron-triggers","41":"hatchet://docs/v1/cron-runs#managing-cron-triggers-in-the-hatchet-dashboard","42":"hatchet://docs/v1/cron-runs#cron-considerations","43":"hatchet://docs/v1/bulk-run","44":"hatchet://docs/v1/webhooks","45":"hatchet://docs/v1/webhooks#guides","46":"hatchet://docs/v1/webhooks#creating-a-webhook","47":"hatchet://docs/v1/webhooks#usage","48":"hatchet://docs/v1/external-events/pushing-events","49":"hatchet://docs/v1/external-events/run-on-event","50":"hatchet://docs/v1/external-events/run-on-event#declaring-event-triggers","51":"hatchet://docs/v1/external-events/event-filters","52":"hatchet://docs/v1/external-events/event-filters#basic-usage","53":"hatchet://docs/v1/external-events/event-filters#accessing-the-filter-payload","54":"hatchet://docs/v1/external-events/event-filters#advanced-usage","55":"hatchet://docs/v1/inter-service-triggering#invoking-tasks-from-other-services","56":"hatchet://docs/v1/retry-policies","57":"hatchet://docs/v1/retry-policies#how-it-works","58":"hatchet://docs/v1/retry-policies#how-to-use-task-level-retries","59":"hatchet://docs/v1/retry-policies#accessing-the-retry-count-in-a-running-task","60":"hatchet://docs/v1/retry-policies#exponential-backoff","61":"hatchet://docs/v1/retry-policies#bypassing-retry-logic","62":"hatchet://docs/v1/retry-policies#python-sdk-client-retry-behavior","63":"hatchet://docs/v1/retry-policies#conclusion","64":"hatchet://docs/v1/timeouts","65":"hatchet://docs/v1/timeouts#timeout-format","66":"hatchet://docs/v1/timeouts#refreshing-timeouts","67":"hatchet://docs/v1/cancellation","68":"hatchet://docs/v1/cancellation#cancellation-mechanisms","69":"hatchet://docs/v1/cancellation#cancellation-best-practices","70":"hatchet://docs/v1/cancellation#additional-features","71":"hatchet://docs/v1/cancellation#conclusion","72":"hatchet://docs/v1/bulk-retries-and-cancellations","73":"hatchet://docs/v1/bulk-retries-and-cancellations#a-note-on-dead-letter-queues","74":"hatchet://docs/v1/concurrency","75":"hatchet://docs/v1/concurrency#group-round-robin","76":"hatchet://docs/v1/concurrency#cancel-in-progress","77":"hatchet://docs/v1/concurrency#cancel-newest","78":"hatchet://docs/v1/concurrency#multiple-concurrency-strategies","79":"hatchet://docs/v1/rate-limits","80":"hatchet://docs/v1/rate-limits#dynamic-vs-static-rate-limits","81":"hatchet://docs/v1/rate-limits#dynamic-rate-limits","82":"hatchet://docs/v1/rate-limits#static-rate-limits","83":"hatchet://docs/v1/priority","84":"hatchet://docs/v1/priority#setting-priority-for-a-task-or-workflow","85":"hatchet://docs/v1/patterns/durable-task-execution","86":"hatchet://docs/v1/patterns/durable-task-execution#when-to-use-durable-tasks","87":"hatchet://docs/v1/patterns/durable-task-execution#how-it-works","88":"hatchet://docs/v1/patterns/durable-task-execution#the-durable-context","89":"hatchet://docs/v1/patterns/durable-task-execution#example-task","90":"hatchet://docs/v1/patterns/durable-task-execution#spawning-child-tasks","91":"hatchet://docs/v1/patterns/directed-acyclic-graphs","92":"hatchet://docs/v1/patterns/directed-acyclic-graphs#how-dag-workflows-work","93":"hatchet://docs/v1/patterns/directed-acyclic-graphs#defining-a-workflow","94":"hatchet://docs/v1/patterns/directed-acyclic-graphs#defining-a-task","95":"hatchet://docs/v1/patterns/directed-acyclic-graphs#building-a-dag-with-task-dependencies","96":"hatchet://docs/v1/patterns/directed-acyclic-graphs#accessing-parent-task-outputs","97":"hatchet://docs/v1/patterns/directed-acyclic-graphs#running-a-workflow","98":"hatchet://docs/v1/patterns/directed-acyclic-graphs#pre-determined-pipelines","99":"hatchet://docs/v1/patterns/mixing-patterns","100":"hatchet://docs/v1/patterns/mixing-patterns#choosing-a-pattern","101":"hatchet://docs/v1/patterns/mixing-patterns#mixing-patterns","102":"hatchet://docs/v1/patterns/mixing-patterns#determinism-in-durable-tasks","103":"hatchet://docs/v1/child-spawning","104":"hatchet://docs/v1/child-spawning#spawning-from-durable-tasks","105":"hatchet://docs/v1/child-spawning#spawning-from-dag-tasks","106":"hatchet://docs/v1/child-spawning#common-patterns","107":"hatchet://docs/v1/child-spawning#use-cases","108":"hatchet://docs/v1/sleep","109":"hatchet://docs/v1/sleep#durable-sleep","110":"hatchet://docs/v1/sleep#sleep-conditions","111":"hatchet://docs/v1/events","112":"hatchet://docs/v1/events#wait-for-events","113":"hatchet://docs/v1/events#event-conditions","114":"hatchet://docs/v1/conditions","115":"hatchet://docs/v1/conditions#procedural-branching","116":"hatchet://docs/v1/conditions#or-groups","117":"hatchet://docs/v1/conditions#parent-conditions","118":"hatchet://docs/v1/conditions#or-groups-2","119":"hatchet://docs/v1/on-failure","120":"hatchet://docs/v1/on-failure#trycatch-in-durable-tasks","121":"hatchet://docs/v1/on-failure#on-failure-tasks","122":"hatchet://docs/v1/task-eviction","123":"hatchet://docs/v1/task-eviction#task-eviction","124":"hatchet://docs/v1/task-eviction#no-eviction-needed","125":"hatchet://docs/v1/docker","126":"hatchet://docs/v1/docker#entrypoint-configuration-for-hatchet","127":"hatchet://docs/v1/docker#example-dockerfiles","128":"hatchet://docs/v1/autoscaling-workers","129":"hatchet://docs/v1/autoscaling-workers#task-stats-api","130":"hatchet://docs/v1/autoscaling-workers#autoscaling-with-keda","131":"hatchet://docs/v1/advanced-assignment/sticky-assignment","132":"hatchet://docs/v1/advanced-assignment/sticky-assignment#setting-sticky-assignment","133":"hatchet://docs/v1/advanced-assignment/sticky-assignment#sticky-child-tasks","134":"hatchet://docs/v1/advanced-assignment/worker-affinity","135":"hatchet://docs/v1/advanced-assignment/worker-affinity#specifying-worker-labels","136":"hatchet://docs/v1/advanced-assignment/worker-affinity#specifying-step-desired-labels","137":"hatchet://docs/v1/advanced-assignment/manual-slot-release","138":"hatchet://docs/v1/advanced-assignment/manual-slot-release#using-manual-slot-release","139":"hatchet://docs/v1/advanced-assignment/manual-slot-release#use-cases","140":"hatchet://docs/v1/logging","141":"hatchet://docs/v1/logging#using-the-built-in-logging-package","142":"hatchet://docs/v1/logging#using-the-contextlog-method","143":"hatchet://docs/v1/opentelemetry","144":"hatchet://docs/v1/opentelemetry#setup","145":"hatchet://docs/v1/opentelemetry#spans","146":"hatchet://docs/v1/worker-healthchecks","147":"hatchet://docs/v1/prometheus-metrics","148":"hatchet://docs/v1/prometheus-metrics#tenant-metrics","149":"hatchet://docs/v1/additional-metadata","150":"hatchet://docs/v1/additional-metadata#filtering-in-the-dashboard","151":"hatchet://docs/v1/additional-metadata#use-cases","152":"hatchet://docs/v1/middleware","153":"hatchet://docs/v1/middleware#defining-middleware","154":"hatchet://docs/v1/middleware#how-middleware-executes","155":"hatchet://docs/v1/middleware#using-middleware-in-tasks","156":"hatchet://docs/v1/middleware#running-a-worker","157":"hatchet://docs/v1/middleware#practical-examples","158":"hatchet://docs/v1/middleware#faq","159":"hatchet://docs/v1/streaming","160":"hatchet://docs/v1/streaming#publishing-stream-events","161":"hatchet://docs/v1/streaming#consuming-streams","162":"hatchet://docs/v1/streaming#streaming-to-a-web-application","163":"hatchet://docs/v1/environments","164":"hatchet://docs/v1/environments#multiple-developers-one-orchestrator","165":"hatchet://docs/v1/troubleshooting/index","166":"hatchet://docs/v1/troubleshooting/index#quick-debugging-checklist","167":"hatchet://docs/v1/troubleshooting/index#could-not-send-task-to-worker","168":"hatchet://docs/v1/troubleshooting/index#no-workers-visible-in-dashboard","169":"hatchet://docs/v1/troubleshooting/index#tasks-stuck-in-queued-state","170":"hatchet://docs/v1/troubleshooting/index#worker-keeps-disconnecting","171":"hatchet://docs/v1/troubleshooting/index#phantom-workers-active-in-dashboard","172":"hatchet://docs/v1/architecture-and-guarantees","173":"hatchet://docs/v1/architecture-and-guarantees#architecture-overview","174":"hatchet://docs/v1/architecture-and-guarantees#core-components","175":"hatchet://docs/v1/architecture-and-guarantees#guarantees-tradeoffs","176":"hatchet://docs/v1/architecture-and-guarantees#core-reliability-guarantees","177":"hatchet://docs/v1/architecture-and-guarantees#performance-expectations","178":"hatchet://docs/v1/architecture-and-guarantees#next-steps","179":"hatchet://docs/v1/cloud-vs-oss","180":"hatchet://docs/v1/cloud-vs-oss#quick-decision-guide","181":"hatchet://docs/v1/cloud-vs-oss#whats-the-same-in-both","182":"hatchet://docs/v1/cloud-vs-oss#what-changes-operational-responsibilities","183":"hatchet://docs/v1/cloud-vs-oss#migrating-between-cloud-and-self-hosted","184":"hatchet://docs/v1/cloud-vs-oss#next-steps","185":"hatchet://docs/v1/security/index","186":"hatchet://docs/v1/security/index#trust-center","187":"hatchet://docs/v1/security/index#same-source-same-security","188":"hatchet://docs/v1/security/index#hatchet-cloud","189":"hatchet://docs/v1/security/index#self-hosted","190":"hatchet://docs/v1/security/audit-logs","191":"hatchet://docs/v1/security/audit-logs#what-gets-logged","192":"hatchet://docs/v1/security/audit-logs#audited-actions","193":"hatchet://docs/v1/security/audit-logs#actor-types","194":"hatchet://docs/v1/security/audit-logs#retention","195":"hatchet://docs/v1/security/audit-logs#viewing-audit-logs","196":"hatchet://docs/v1/security/audit-logs#api-access","197":"hatchet://docs/v1/region-availability","198":"hatchet://docs/v1/region-availability#current-regions","199":"hatchet://docs/v1/region-availability#request-a-region","200":"hatchet://docs/v1/uptime","201":"hatchet://docs/v1/uptime#hatchet-cloud-status-page","202":"hatchet://docs/v1/uptime#self-hosted-deployments","203":"hatchet://docs/v1/developer-experience","204":"hatchet://docs/v1/developer-experience#workflows-as-code","205":"hatchet://docs/v1/developer-experience#dashboard-ui","206":"hatchet://docs/v1/developer-experience#cli","207":"hatchet://docs/v1/developer-experience#coding-agents-mcp","208":"hatchet://docs/v1/faq","209":"hatchet://docs/v1/faq#how-do-i-choose-how-many-slots-to-set-on-my-worker","210":"hatchet://docs/v1/faq#why-am-i-seeing-missed-heartbeats-and-task-reassignments","211":"hatchet://docs/cookbooks/index","212":"hatchet://docs/cookbooks/index#webhooks","213":"hatchet://docs/cookbooks/webhooks-stripe","214":"hatchet://docs/cookbooks/webhooks-stripe#setup","215":"hatchet://docs/cookbooks/webhooks-github","216":"hatchet://docs/cookbooks/webhooks-github#setup","217":"hatchet://docs/cookbooks/webhooks-slack","218":"hatchet://docs/cookbooks/webhooks-slack#slack-app-setup","219":"hatchet://docs/cookbooks/webhooks-slack#event-subscriptions","220":"hatchet://docs/cookbooks/webhooks-slack#slash-commands","221":"hatchet://docs/cookbooks/webhooks-slack#interactive-components","222":"hatchet://docs/self-hosting/index","223":"hatchet://docs/self-hosting/index#what-youre-self-hosting","224":"hatchet://docs/self-hosting/index#deployment-options","225":"hatchet://docs/self-hosting/hatchet-lite","226":"hatchet://docs/self-hosting/docker-compose","227":"hatchet://docs/self-hosting/docker-compose#quickstart","228":"hatchet://docs/self-hosting/docker-compose#run-tasks-against-the-hatchet-instance","229":"hatchet://docs/self-hosting/docker-compose#repulling-images","230":"hatchet://docs/self-hosting/docker-compose#connecting-to-the-engine-from-within-docker","231":"hatchet://docs/self-hosting/docker-compose#additional-docker-configuration","232":"hatchet://docs/self-hosting/kubernetes-quickstart","233":"hatchet://docs/self-hosting/kubernetes-quickstart#prerequisites","234":"hatchet://docs/self-hosting/kubernetes-quickstart#quickstart","235":"hatchet://docs/self-hosting/kubernetes-glasskube","236":"hatchet://docs/self-hosting/kubernetes-glasskube#prerequisites","237":"hatchet://docs/self-hosting/kubernetes-glasskube#what-is-glasskube","238":"hatchet://docs/self-hosting/kubernetes-glasskube#quickstart","239":"hatchet://docs/self-hosting/networking","240":"hatchet://docs/self-hosting/networking#overview","241":"hatchet://docs/self-hosting/networking#example-nginx-ingress","242":"hatchet://docs/self-hosting/kubernetes-helm-configuration","243":"hatchet://docs/self-hosting/kubernetes-helm-configuration#shared-config","244":"hatchet://docs/self-hosting/kubernetes-external-database","245":"hatchet://docs/self-hosting/kubernetes-external-database#connecting-to-postgres","246":"hatchet://docs/self-hosting/kubernetes-external-database#mounting-environment-variables","247":"hatchet://docs/self-hosting/kubernetes-external-database#migrations","248":"hatchet://docs/self-hosting/high-availability","249":"hatchet://docs/self-hosting/high-availability#ha-helm-chart","250":"hatchet://docs/self-hosting/configuration-options","251":"hatchet://docs/self-hosting/configuration-options#environment-variable-prefixes","252":"hatchet://docs/self-hosting/configuration-options#required-environment-variables","253":"hatchet://docs/self-hosting/configuration-options#minimal-configuration-example","254":"hatchet://docs/self-hosting/configuration-options#runtime-configuration","255":"hatchet://docs/self-hosting/configuration-options#database-configuration","256":"hatchet://docs/self-hosting/configuration-options#security-check-configuration","257":"hatchet://docs/self-hosting/configuration-options#limit-configuration","258":"hatchet://docs/self-hosting/configuration-options#alerting-configuration","259":"hatchet://docs/self-hosting/configuration-options#encryption-configuration","260":"hatchet://docs/self-hosting/configuration-options#authentication-configuration","261":"hatchet://docs/self-hosting/configuration-options#task-queue-configuration","262":"hatchet://docs/self-hosting/configuration-options#tls-configuration","263":"hatchet://docs/self-hosting/configuration-options#logging-configuration","264":"hatchet://docs/self-hosting/configuration-options#opentelemetry-configuration","265":"hatchet://docs/self-hosting/configuration-options#tenant-alerting-configuration","266":"hatchet://docs/self-hosting/configuration-options#cron-operations-configuration","267":"hatchet://docs/self-hosting/configuration-options#olap-database-configuration","268":"hatchet://docs/self-hosting/prometheus-metrics#prometheus-metrics-for-hatchet","269":"hatchet://docs/self-hosting/worker-configuration-options","270":"hatchet://docs/self-hosting/worker-configuration-options#basic-configuration","271":"hatchet://docs/self-hosting/worker-configuration-options#worker-runtime-configuration","272":"hatchet://docs/self-hosting/worker-configuration-options#worker-healthcheck-server-python-sdk","273":"hatchet://docs/self-hosting/worker-configuration-options#tls-configuration","274":"hatchet://docs/self-hosting/worker-configuration-options#logging-configuration","275":"hatchet://docs/self-hosting/upgrading-downgrading","276":"hatchet://docs/self-hosting/upgrading-downgrading#overview","277":"hatchet://docs/self-hosting/upgrading-downgrading#step-1-take-a-database-snapshot","278":"hatchet://docs/self-hosting/upgrading-downgrading#step-2-upgrade-engine-versions","279":"hatchet://docs/self-hosting/upgrading-downgrading#step-3-downgrade-if-needed","280":"hatchet://docs/self-hosting/downgrading-db-schema-manually","281":"hatchet://docs/self-hosting/downgrading-db-schema-manually#overview","282":"hatchet://docs/self-hosting/downgrading-db-schema-manually#prerequisites","283":"hatchet://docs/self-hosting/downgrading-db-schema-manually#finding-the-target-migration-version","284":"hatchet://docs/self-hosting/downgrading-db-schema-manually#running-down-migrations","285":"hatchet://docs/self-hosting/downgrading-db-schema-manually#deploying-the-older-version","286":"hatchet://docs/self-hosting/benchmarking","287":"hatchet://docs/self-hosting/benchmarking#throughput","288":"hatchet://docs/self-hosting/benchmarking#latency","289":"hatchet://docs/self-hosting/benchmarking#running-your-own-benchmarks","290":"hatchet://docs/self-hosting/benchmarking#setup","291":"hatchet://docs/self-hosting/data-retention","292":"hatchet://docs/self-hosting/improving-performance","293":"hatchet://docs/self-hosting/improving-performance#database-connection-pooling","294":"hatchet://docs/self-hosting/improving-performance#high-database-cpu","295":"hatchet://docs/self-hosting/improving-performance#slow-time-to-start","296":"hatchet://docs/self-hosting/improving-performance#database-settings-and-autovacuum","297":"hatchet://docs/self-hosting/improving-performance#scaling-the-hatchet-engine","298":"hatchet://docs/self-hosting/read-replicas","299":"hatchet://docs/self-hosting/read-replicas#configuration-options","300":"hatchet://docs/self-hosting/read-replicas#limitations","301":"hatchet://docs/self-hosting/sampling","302":"hatchet://docs/self-hosting/smtp-server","303":"hatchet://docs/self-hosting/smtp-server#prerequisites","304":"hatchet://docs/self-hosting/smtp-server#configuration","305":"hatchet://docs/self-hosting/smtp-server#provider-reference","306":"hatchet://docs/reference/python/client","307":"hatchet://docs/reference/python/client#the-hatchet-python-client","308":"hatchet://docs/reference/python/context","309":"hatchet://docs/reference/python/context#context","310":"hatchet://docs/reference/python/context#durablecontext","311":"hatchet://docs/reference/python/feature-clients/cron","312":"hatchet://docs/reference/python/feature-clients/filters","313":"hatchet://docs/reference/python/feature-clients/logs","314":"hatchet://docs/reference/python/feature-clients/metrics","315":"hatchet://docs/reference/python/feature-clients/rate_limits","316":"hatchet://docs/reference/python/feature-clients/runs","317":"hatchet://docs/reference/python/feature-clients/scheduled","318":"hatchet://docs/reference/python/feature-clients/webhooks","319":"hatchet://docs/reference/python/feature-clients/workers","320":"hatchet://docs/reference/python/feature-clients/workflows","321":"hatchet://docs/reference/python/runnables","322":"hatchet://docs/reference/python/runnables#workflow","323":"hatchet://docs/reference/python/runnables#task","324":"hatchet://docs/reference/python/runnables#standalone","325":"hatchet://docs/reference/python/asyncio","326":"hatchet://docs/reference/python/pydantic","327":"hatchet://docs/reference/python/lifespans","328":"hatchet://docs/reference/python/lifespans#usage","329":"hatchet://docs/reference/python/dependency-injection","330":"hatchet://docs/reference/python/dependency-injection#usage","331":"hatchet://docs/reference/python/dataclasses","332":"hatchet://docs/reference/typescript/client","333":"hatchet://docs/reference/typescript/Context","334":"hatchet://docs/reference/typescript/feature-clients/crons","335":"hatchet://docs/reference/typescript/feature-clients/filters","336":"hatchet://docs/reference/typescript/feature-clients/logs","337":"hatchet://docs/reference/typescript/feature-clients/logs#type-aliases","338":"hatchet://docs/reference/typescript/feature-clients/metrics","339":"hatchet://docs/reference/typescript/feature-clients/ratelimits","340":"hatchet://docs/reference/typescript/feature-clients/runs","341":"hatchet://docs/reference/typescript/feature-clients/schedules","342":"hatchet://docs/reference/typescript/feature-clients/webhooks","343":"hatchet://docs/reference/typescript/feature-clients/workers","344":"hatchet://docs/reference/typescript/feature-clients/workflows","345":"hatchet://docs/reference/typescript/Runnables","346":"hatchet://docs/reference/typescript/Runnables#functions","347":"hatchet://docs/reference/cli/index","348":"hatchet://docs/reference/cli/index#features","349":"hatchet://docs/reference/cli/index#installation","350":"hatchet://docs/reference/cli/index#verifying-installation","351":"hatchet://docs/reference/cli/profiles","352":"hatchet://docs/reference/cli/profiles#creating-a-profile","353":"hatchet://docs/reference/cli/profiles#listing-profiles","354":"hatchet://docs/reference/cli/profiles#setting-a-default-profile","355":"hatchet://docs/reference/cli/profiles#using-a-profile","356":"hatchet://docs/reference/cli/profiles#updating-a-profile","357":"hatchet://docs/reference/cli/profiles#deleting-a-profile","358":"hatchet://docs/reference/cli/running-hatchet-locally","359":"hatchet://docs/reference/cli/running-hatchet-locally#prerequisites","360":"hatchet://docs/reference/cli/running-hatchet-locally#starting-hatchet-locally","361":"hatchet://docs/reference/cli/running-hatchet-locally#stopping-hatchet-locally","362":"hatchet://docs/reference/cli/running-hatchet-locally#reference","363":"hatchet://docs/reference/cli/running-workers-locally","364":"hatchet://docs/reference/cli/running-workers-locally#setting-up-a-hatchetyaml-file","365":"hatchet://docs/reference/cli/running-workers-locally#running-a-worker","366":"hatchet://docs/reference/cli/triggering-workflows","367":"hatchet://docs/reference/cli/triggering-workflows#example","368":"hatchet://docs/reference/cli/tui","369":"hatchet://docs/reference/cli/tui#runs-view","370":"hatchet://docs/reference/cli/tui#workflows-view","371":"hatchet://docs/reference/cli/tui#workers-view","372":"hatchet://docs/contributing/index","373":"hatchet://docs/contributing/index#issues","374":"hatchet://docs/contributing/github-app-setup#setup","375":"hatchet://docs/contributing/sdks","376":"hatchet://docs/contributing/sdks#environment-variables","377":"hatchet://docs/contributing/sdks#compatibility-matrices"},"fieldIds":{"title":0,"content":1,"codeIdentifiers":2,"keywords":3},"fieldLength":{"0":[5,100,1,9],"1":[3,22,1,9],"2":[1,68,1,9],"3":[2,107,1,9],"4":[2,64,1,9],"5":[2,85,1,9],"6":[5,14,1,9],"7":[1,199,4,6],"8":[2,19,1,6],"9":[3,58,1,7],"10":[2,112,1,7],"11":[2,99,1,7],"12":[2,56,1,7],"13":[1,56,1,4],"14":[3,93,9,4],"15":[3,75,1,4],"16":[3,96,1,4],"17":[1,98,1,4],"18":[5,57,1,4],"19":[1,57,1,7],"20":[3,116,13,7],"21":[3,154,6,7],"22":[1,80,1,7],"23":[2,56,1,11],"24":[3,145,14,11],"25":[3,195,22,11],"26":[1,53,1,11],"27":[2,56,1,11],"28":[4,36,1,11],"29":[2,65,1,1],"30":[1,107,1,1],"31":[2,59,1,1],"32":[2,117,1,1],"33":[3,31,1,1],"34":[2,123,1,1],"35":[4,242,35,1],"36":[7,92,1,1],"37":[3,91,1,1],"38":[2,209,1,1],"39":[7,162,19,1],"40":[4,190,24,1],"41":[7,43,1,1],"42":[2,93,1,1],"43":[2,198,23,1],"44":[1,36,1,1],"45":[1,31,1,1],"46":[3,302,1,1],"47":[1,56,1,1],"48":[2,87,7,1],"49":[2,112,1,1],"50":[3,133,11,1],"51":[2,46,1,1],"52":[2,242,33,1],"53":[4,69,20,1],"54":[2,46,1,1],"55":[5,280,25,1],"56":[2,50,1,1],"57":[3,99,1,1],"58":[6,155,11,1],"59":[8,97,18,1],"60":[2,121,14,1],"61":[3,155,14,1],"62":[5,247,8,1],"63":[1,67,1,1],"64":[1,66,1,1],"65":[2,215,21,1],"66":[2,209,27,1],"67":[1,78,1,1],"68":[2,173,32,1],"69":[3,113,1,1],"70":[2,40,1,1],"71":[1,54,1,1],"72":[3,266,23,1],"73":[6,100,1,1],"74":[1,315,28,1],"75":[3,138,1,1],"76":[3,122,1,1],"77":[2,52,1,1],"78":[3,148,30,1],"79":[2,70,1,1],"80":[5,48,1,1],"81":[3,206,21,1],"82":[3,221,26,1],"83":[1,123,1,1],"84":[7,176,39,1],"85":[2,123,1,1],"86":[5,80,1,1],"87":[3,146,1,1],"88":[3,106,1,1],"89":[2,154,16,1],"90":[3,104,1,1],"91":[1,37,1,1],"92":[4,90,1,1],"93":[3,89,4,1],"94":[3,167,12,1],"95":[6,122,23,1],"96":[4,103,17,1],"97":[3,57,5,1],"98":[3,41,1,1],"99":[2,3,1,1],"100":[3,114,1,1],"101":[2,180,1,1],"102":[4,127,1,1],"103":[2,58,1,1],"104":[4,281,45,1],"105":[4,335,65,1],"106":[2,158,1,1],"107":[2,62,1,1],"108":[2,49,1,1],"109":[2,199,24,1],"110":[2,155,19,1],"111":[3,68,1,1],"112":[3,220,33,1],"113":[2,251,29,1],"114":[2,44,1,1],"115":[2,135,6,1],"116":[2,171,26,1],"117":[2,246,29,1],"118":[2,262,22,1],"119":[2,44,1,1],"120":[5,157,13,1],"121":[3,208,23,1],"122":[2,36,1,1],"123":[2,222,1,1],"124":[3,179,1,1],"125":[3,22,1,1],"126":[4,54,1,1],"127":[2,181,20,1],"128":[2,37,1,1],"129":[3,120,6,1],"130":[3,127,10,1],"131":[2,70,1,1],"132":[3,196,18,1],"133":[3,169,29,1],"134":[2,90,1,1],"135":[3,111,18,1],"136":[4,298,33,1],"137":[3,106,1,1],"138":[4,134,15,1],"139":[2,63,1,1],"140":[1,44,1,1],"141":[6,101,14,1],"142":[5,196,20,1],"143":[1,20,1,1],"144":[1,212,16,1],"145":[1,167,1,1],"146":[3,177,1,1],"147":[2,38,1,1],"148":[2,74,3,1],"149":[2,138,16,1],"150":[4,57,1,1],"151":[2,51,1,1],"152":[1,128,1,1],"153":[2,196,12,1],"154":[3,253,8,1],"155":[4,173,15,1],"156":[3,76,5,1],"157":[2,225,32,1],"158":[1,311,1,1],"159":[1,38,1,1],"160":[3,209,21,1],"161":[2,189,34,1],"162":[5,217,49,1],"163":[1,5,1,1],"164":[4,121,1,1],"165":[2,14,1,1],"166":[3,79,1,1],"167":[6,108,1,1],"168":[5,50,1,1],"169":[5,79,1,1],"170":[3,77,1,1],"171":[5,52,1,1],"172":[2,25,1,1],"173":[2,122,1,1],"174":[2,151,1,1],"175":[2,114,1,1],"176":[3,105,1,1],"177":[2,115,1,1],"178":[2,19,1,1],"179":[3,39,1,1],"180":[3,48,1,1],"181":[6,38,1,1],"182":[5,85,1,1],"183":[6,35,1,1],"184":[2,29,1,1],"185":[1,22,1,1],"186":[2,28,1,1],"187":[4,38,1,1],"188":[2,94,1,1],"189":[2,103,1,1],"190":[2,51,1,1],"191":[3,61,1,1],"192":[2,50,1,1],"193":[2,48,1,1],"194":[1,14,1,1],"195":[3,22,1,1],"196":[2,70,1,1],"197":[2,21,1,1],"198":[2,40,1,1],"199":[3,33,1,1],"200":[1,21,1,1],"201":[4,38,1,1],"202":[3,26,1,1],"203":[2,28,1,1],"204":[3,44,1,1],"205":[3,43,1,1],"206":[1,49,1,1],"207":[4,58,1,1],"208":[3,26,1,5],"209":[13,162,1,5],"210":[10,77,1,5],"211":[1,25,1,1],"212":[1,54,1,1],"213":[1,33,1,1],"214":[1,243,18,1],"215":[1,24,1,1],"216":[1,285,20,1],"217":[1,74,1,1],"218":[3,76,1,1],"219":[2,189,15,1],"220":[2,192,16,1],"221":[2,169,17,1],"222":[1,40,1,1],"223":[5,60,1,1],"224":[2,68,1,1],"225":[2,226,10,1],"226":[2,70,1,1],"227":[1,186,12,1],"228":[6,75,1,1],"229":[2,29,1,1],"230":[7,62,1,1],"231":[3,93,1,1],"232":[1,3,1,1],"233":[1,15,1,1],"234":[1,155,13,1],"235":[3,5,1,1],"236":[1,20,1,1],"237":[4,44,1,1],"238":[1,226,15,1],"239":[1,3,1,1],"240":[1,88,4,1],"241":[3,155,12,1],"242":[4,5,1,1],"243":[2,211,2,1],"244":[5,8,1,1],"245":[3,67,1,1],"246":[3,69,1,1],"247":[1,53,1,1],"248":[2,115,1,1],"249":[3,71,3,1],"250":[3,31,1,1],"251":[3,67,1,1],"252":[3,92,5,1],"253":[3,109,5,1],"254":[2,162,1,1],"255":[2,159,1,1],"256":[3,18,1,1],"257":[2,44,1,1],"258":[2,48,1,1],"259":[2,47,1,1],"260":[2,63,1,1],"261":[3,34,1,1],"262":[2,45,1,1],"263":[2,26,1,1],"264":[2,55,1,1],"265":[3,96,1,1],"266":[3,76,1,1],"267":[3,22,1,1],"268":[4,346,3,1],"269":[3,24,1,1],"270":[2,30,1,1],"271":[3,21,1,1],"272":[6,52,1,1],"273":[2,28,1,1],"274":[2,34,1,1],"275":[3,22,1,1],"276":[1,42,1,1],"277":[6,77,1,1],"278":[5,148,4,1],"279":[5,172,1,1],"280":[4,51,1,1],"281":[1,19,1,1],"282":[1,30,1,1],"283":[5,62,2,1],"284":[3,70,1,1],"285":[4,39,2,1],"286":[1,83,1,1],"287":[1,44,1,1],"288":[1,87,1,1],"289":[4,204,2,1],"290":[1,74,1,1],"291":[2,71,1,1],"292":[2,46,1,1],"293":[3,55,1,1],"294":[3,349,38,1],"295":[4,87,1,1],"296":[4,102,1,1],"297":[4,88,1,1],"298":[2,71,1,1],"299":[2,30,1,1],"300":[1,33,1,1],"301":[2,112,1,1],"302":[2,21,1,1],"303":[1,20,1,1],"304":[1,50,4,1],"305":[2,96,1,1],"306":[1,28,1,1],"307":[4,334,1,1],"308":[1,57,1,1],"309":[1,264,1,1],"310":[1,76,1,1],"311":[1,109,1,1],"312":[1,103,1,1],"313":[1,62,1,1],"314":[1,107,1,1],"315":[2,55,1,1],"316":[1,227,1,1],"317":[1,210,1,1],"318":[1,156,1,1],"319":[1,59,1,1],"320":[1,103,1,1],"321":[1,46,1,1],"322":[1,434,6,1],"323":[1,152,1,1],"324":[1,325,1,1],"325":[1,178,11,1],"326":[1,224,20,1],"327":[1,105,1,1],"328":[1,138,13,1],"329":[2,103,1,1],"330":[1,126,4,1],"331":[2,136,8,1],"332":[1,258,13,1],"333":[1,462,2,1],"334":[1,112,1,1],"335":[1,90,1,1],"336":[1,51,1,1],"337":[2,63,1,1],"338":[1,130,1,1],"339":[1,69,1,1],"340":[1,131,1,1],"341":[1,166,1,1],"342":[1,109,1,1],"343":[1,68,1,1],"344":[1,108,1,1],"345":[1,277,13,1],"346":[1,71,1,1],"347":[2,34,1,1],"348":[1,78,1,1],"349":[1,33,4,1],"350":[2,17,1,1],"351":[1,29,1,1],"352":[3,34,1,1],"353":[2,29,1,1],"354":[4,46,1,1],"355":[3,31,1,1],"356":[3,24,1,1],"357":[3,27,1,1],"358":[3,52,1,1],"359":[1,24,1,1],"360":[3,18,1,1],"361":[3,13,1,1],"362":[1,77,1,1],"363":[3,21,1,1],"364":[6,76,2,1],"365":[3,61,1,1],"366":[2,34,1,1],"367":[1,43,2,1],"368":[4,60,1,1],"369":[2,42,1,1],"370":[2,47,1,1],"371":[2,46,1,1],"372":[1,153,1,1],"373":[1,72,1,1],"374":[1,199,1,1],"375":[1,21,1,1],"376":[2,102,1,1],"377":[2,62,1,1]},"averageFieldLength":[2.518518518518517,101.31481481481481,5.2328042328042335,1.53968253968254],"storedFields":{"0":{"title":"🪓 What is Hatchet?","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"---\nasIndexPage: true\n---\n\nimport { Callout } from \"nextra/components\";\nimport LanguageSwitcher from \"@/components/LanguageSwitcher\";\nimport Keywords from \"@/components/Keywords\";\n\n# What is Hatchet?\n\nHatchet is a developer platform that helps engineering teams build and deploy mission-critical AI agents, durable workflows, and background tasks. It supports applications written in Python, Typescript, Go and Ruby, and can be used as a service through [Hatchet Cloud](https://cloud.onhatchet.run) or [self-hosting](/self-hosting) (we're [open-source and 100% MIT-licensed](https://github.com/hatchet-dev/hatchet)). Hatchet provides a full platform for queuing, automatic retries, real-time monitoring, alerting, and logging.\n\nUnlike a traditional queuing system, Hatchet is built around the concept of durability. Every task and agent invocation is durably persisted in Hatchet, allowing for debugging, retries and replays, and more complex features like [durable workflows](/v1/durable-execution)."},"1":{"title":"Using these docs","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"Every docs page in the user guide uses inline code snippets across all four SDKs which are generated from tested examples:"},"2":{"title":"Concepts","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"There are three primary concepts to understand when getting started with Hatchet:\n\n- **[Tasks](/v1/tasks)** — the fundamental unit of work. A task wraps a single function and gives Hatchet everything it needs to schedule, execute, and observe it.\n- **[Workers](/v1/workers)** — long-running processes in your infrastructure that pick up and execute tasks.\n- **[Durable Workflows](/v1/durable-execution)** — compose multiple tasks into durable pipelines with dependencies, retries, and checkpointing.\n\nAll tasks and workflows are **defined as code**, making them easy to version, test, and deploy."},"3":{"title":"Use cases","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"While Hatchet is a general-purpose orchestration platform, it's particularly well-suited for:\n\n- **AI agents** — Hatchet's durability features allow agents to automatically checkpoint their current state and pick up where they left off when faced with unexpected errors. Hatchet's observability features and distributed-first approach are built for debugging long-running agents at scale.\n- **Massive parallelization** - Hatchet is built to handle millions of parallel task executions without overloading your workers. Worker-level slot control allows your workers to only accept the amount of work they can handle, while features like [fairness](/v1/concurrency) and [priorities](/v1/priority) are built to help scale massively parallel ingestion.\n- **Mission-critical workloads** - everything in Hatchet is durable by default. This means that every task, DAG, event or agent invocation is stored in a durable event log and ready to be replayed at some point in the future."},"4":{"title":"Self Hosting","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"If you plan on self-hosting or have requirements for an on-premise deployment, there are some additional considerations:\n\n**Minimal Infra Dependencies** - Hatchet is built on top of PostgreSQL and for simple workloads, [it's all you need](/self-hosting/hatchet-lite).\n\n**Fully Featured Open Source** - Hatchet is 100% MIT licensed, so you can run the same application code against [Hatchet Cloud](https://cloud.onhatchet.run) to get started quickly or [self-host](/self-hosting) when you need more control."},"5":{"title":"Production Readiness","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"Hatchet has been battle-tested in production environments, processing billions of tasks per month for scale-ups and enterprises across various industries. Our open source offering is deployed over 10k times per month, while Hatchet Cloud supports hundreds of companies running at scale.\n\n> \"With Hatchet, we've scaled our indexing workflows effortlessly, reducing failed runs by 50% and doubling our user base in just two weeks!\"\n> — Soohoon, Co-Founder @ Greptile\n\n> \"Hatchet enables Aevy to process up to 50,000 documents in under an hour through optimized parallel execution, compared to nearly a week with our previous setup.\"\n> — Ymir, CTO @ Aevy"},"6":{"title":"Ready to get started?","pageTitle":"🪓 What is Hatchet?","pageRoute":"hatchet://docs/v1/index","content":"Get started quickly with the **[Hatchet Cloud Quickstart](/v1/quickstart)** or **[self-hosting](/self-hosting)**."},"7":{"title":"Quickstart","pageTitle":"Quickstart","pageRoute":"hatchet://docs/v1/quickstart","content":"---\nasIndexPage: true\n---\n\nimport { snippets } from \"@/lib/generated/snippets\";\nimport { Snippet } from \"@/components/code\";\nimport { Callout, Card, Cards, Steps, Tabs } from \"nextra/components\";\nimport UniversalTabs from \"../../components/UniversalTabs\";\nimport Keywords from \"@/components/Keywords\";\n\n# Hatchet Cloud Quickstart\n\nBy the end of this guide you'll have a worker running locally that executes a simple task triggered from the CLI.\n\n> **Info:** This guide walks you through getting set up on Hatchet Cloud. If you'd like to\n>   self-host Hatchet, please see the [self-hosted quickstart](/self-hosting)\n>   instead.\n\n\n### Sign up\n\nIf you haven't already signed up for Hatchet Cloud, please register [here](https://cloud.onhatchet.run).\n\n### Set up your tenant\n\nIn Hatchet Cloud, you'll be shown a screen to create your first organization and tenant. A tenant is a logical separation of your environments (e.g. `dev`, `staging`, `production`). Each tenant has its own set of users who can access it.\n\nAfter creating the tenant, you can simply follow the instructions in the Hatchet Cloud dashboard to set up your first quickstart project and workflow. We have copied the instructions in the following steps.\n\n### Install the Hatchet CLI\n\n#### Native Install (Recommended)\n\n**MacOS, Linux, WSL**\n\n```sh\n    curl -fsSL https://install.hatchet.run/install.sh | bash\n```\n\n#### Homebrew\n\n**MacOS**\n\n```sh\n    brew install hatchet-dev/hatchet/hatchet --cask\n```\n\n### Set your Hatchet profile\n\nYou will need to create a Hatchet CLI profile to connect to your Hatchet Cloud tenant. You can do this using the `hatchet profile add` command:\n\n```sh\nhatchet profile add\n```\n\nNote that the Hatchet Cloud dashboard will provide you with an API token to use when creating your profile.\n\n### Run the quickstart\n\nYou can run the Hatchet Cloud quickstart using the `hatchet quickstart` command:\n\n```sh\nhatchet quickstart\n```\n\n### Run your worker\n\nAfter setting up the quickstart project, you can run your worker locally by following the instructions printed after the quickstart command. This will involve using the `hatchet worker dev` command:\n\n```sh\nhatchet worker dev\n```\n\n### Trigger a workflow\n\nFinally, you can trigger your workflow using the `hatchet trigger simple` command:\n\n```sh\nhatchet trigger simple\n```\n\n### (Optional) Install Hatchet docs MCP and Agent Skills\n\nGet Hatchet documentation directly in your AI coding assistant (Cursor, Claude Code, and more):\n\n```sh copy\nhatchet docs install\n```\n\nGet agent skills for common CLI operations:\n\n```sh copy\nhatchet skills install\n```\n\nSee the [full setup guide](/v1/using-coding-agents) for manual configuration options.\n\n\nAnd that's it! You should now have a Hatchet project set up on Hatchet Cloud with a worker running locally."},"8":{"title":"Next Steps","pageTitle":"Quickstart","pageRoute":"hatchet://docs/v1/quickstart","content":"Once you've completed the quickstart, continue to the next section to learn how to [create your first task](/v1/tasks)."},"9":{"title":"Using Coding Agents","pageTitle":"Using Coding Agents","pageRoute":"hatchet://docs/v1/using-coding-agents","content":"McpUrl,\n  CursorDeeplinkButton,\n  CursorMcpConfig,\n  ClaudeCodeCommand,\n  CursorTabLabel,\n  ClaudeCodeTabLabel,\n  OtherAgentsTabLabel,\n} from \"@/components/McpSetup\";\nimport Keywords from \"@/components/Keywords\";\n\n# Using Coding Agents\n\nHatchet is designed to work well with AI coding agents. This page covers how to give your agent access to Hatchet documentation and step-by-step skills for common CLI operations.\n\n> **Info:** **Prerequisite:** The `hatchet skills install` and `hatchet docs install`\n>   commands require the Hatchet CLI. See the [CLI reference](/cli) for\n>   installation instructions."},"10":{"title":"Agent Skills","pageTitle":"Using Coding Agents","pageRoute":"hatchet://docs/v1/using-coding-agents","content":"Agent skills are reference documents that teach AI coding agents how to use the Hatchet CLI — triggering workflows, starting workers, debugging runs, and more.\n\nRun the following command in your project root to install the skill package:\n\n```bash copy\nhatchet skills install\n```\n\nThis creates a `skills/hatchet-cli/` directory with step-by-step reference files and appends a section to your project's `AGENTS.md` (and `CLAUDE.md`) pointing agents to the right file for each task.\n\n**Install to a custom directory:**\n\n```bash copy\nhatchet skills install --dir ./my-project\n```\n\nAfter installation, commit the `skills/` directory and `AGENTS.md` to version control so all agents working in the repo benefit automatically.\n\n### Available references\n\nReference, When to use\n\n`references/setup-cli.md`, Installing the CLI, creating or listing profiles\n`references/start-worker.md`, Starting a dev worker for local development\n`references/trigger-and-watch.md`, Triggering a workflow and polling for completion\n`references/debug-run.md`, Diagnosing a failed, stuck, or unexpected run\n`references/replay-run.md`, Re-running a previous workflow with same or new input"},"11":{"title":"MCP Server","pageTitle":"Using Coding Agents","pageRoute":"hatchet://docs/v1/using-coding-agents","content":"Hatchet documentation is available as an **MCP (Model Context Protocol) server**, so AI coding assistants like Cursor and Claude Code can search and reference Hatchet docs directly.\n\nMCP endpoint: \n\n\n  \n  \n\n    #### Hatchet CLI\n\n```bash copy\n        hatchet docs install claude-code\n```\n\nIf `claude` is on your PATH, this runs the command automatically. Otherwise it prints it for you to copy.\n\n#### Command\n\nRun this command in your terminal:\n\n\n  \n  \n\n    For any AI tool that supports [llms.txt](https://llmstxt.org/), Hatchet docs are available at:\n\n    | Resource | URL |\n    |----------|-----|\n    | **llms.txt** (index) | [docs.hatchet.run/llms.txt](https://docs.hatchet.run/llms.txt) |\n    | **llms-full.txt** (all docs) | [docs.hatchet.run/llms-full.txt](https://docs.hatchet.run/llms-full.txt) |\n    | **Per-page markdown** | `docs.hatchet.run/llms/{section}/{page}.md` |\n    | **MCP endpoint** |  |\n\n    Every documentation page also includes a `<link rel=\"alternate\" type=\"text/markdown\">` header\n    pointing to its markdown version, and a \"View as Markdown\" link at the top of the page."},"12":{"title":"llms.txt","pageTitle":"Using Coding Agents","pageRoute":"hatchet://docs/v1/using-coding-agents","content":"For any AI tool that supports [llms.txt](https://llmstxt.org/), Hatchet docs are available at:\n\nResource, URL\n\n**llms.txt** (index), [docs.hatchet.run/llms.txt](https://docs.hatchet.run/llms.txt)\n**llms-full.txt** (all docs), [docs.hatchet.run/llms-full.txt](https://docs.hatchet.run/llms-full.txt)\n**Per-page markdown**, `docs.hatchet.run/llms/{section}/{page}.md`\n**MCP endpoint**, \n\nEvery documentation page also includes a `<link rel=\"alternate\" type=\"text/markdown\">` header\npointing to its markdown version, and a \"View as Markdown\" link at the top of the page."},"13":{"title":"Tasks","pageTitle":"Tasks","pageRoute":"hatchet://docs/v1/tasks","content":"# Tasks\n\nThe fundamental unit of work in Hatchet is a **task**. At its most basic level, a task is just a function. You can invoke a task on its own (a \"standalone\" task), compose tasks into a [DAG workflow](/v1/patterns/directed-acyclic-graphs), or use [durable task composition](/v1/child-spawning) to spawn child tasks at runtime.\n\nEvery task you invoke is **durable** - Hatchet persists it, its state, and its results even after it finishes running."},"14":{"title":"Defining a task","pageTitle":"Tasks","pageRoute":"hatchet://docs/v1/tasks","content":"A task needs a name and a function. The function accepts an [input](#input-and-output) and a [context](#the-context-object).\n\n#### Python\n\n```python\nclass SimpleInput(BaseModel):\n    message: str\n\n\nclass SimpleOutput(BaseModel):\n    transformed_message: str\n\n\n# Declare the task to run\n@hatchet.task(name=\"first-task\", input_validator=SimpleInput)\ndef first_task(input: SimpleInput, ctx: Context) -> SimpleOutput:\n    print(\"first-task task called\")\n\n    return SimpleOutput(transformed_message=input.message.lower())\n```\n\n#### Typescript\n\n```typescript\nimport { hatchet } from '../hatchet-client';\n\n// (optional) Define the input type for the workflow\nexport type SimpleInput = {\n  Message: string;\n};\n\nexport const simple = hatchet.task({\n  name: 'simple',\n  retries: 3,\n  fn: async (input: SimpleInput) => {\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n```\n\n#### Go\n\n```go\ntype SimpleInput struct {\n\tMessage string `json:\"message\"`\n}\n\ntype SimpleOutput struct {\n\tResult string `json:\"result\"`\n}\n\ntask := client.NewStandaloneTask(\"process-message\", func(ctx hatchet.Context, input SimpleInput) (SimpleOutput, error) {\n\treturn SimpleOutput{\n\t\tResult: \"Processed: \" + input.Message,\n\t}, nil\n})\n```\n\n#### Ruby\n\n```ruby\nFIRST_TASK = HATCHET.task(name: \"first-task\") do |input, ctx|\n  puts \"first-task called\"\n  { \"transformed_message\" => input[\"message\"].downcase }\nend\n```"},"15":{"title":"Input and output","pageTitle":"Tasks","pageRoute":"hatchet://docs/v1/tasks","content":"Every task receives an **input** - a JSON-serializable object passed when the task is triggered. The value that is returned from the task becomes the task's **output**, which callers receive when they await the result of the task.\n\nHatchet's SDKs support type-checked and runtime-validated input and output types for tasks, so that you can integrate your Hatchet tasks into your codebase in a type-safe and predictable way that provides you all of the guarantees you get from, for example, replacing the Hatchet task run with a local function call.\n\nYou can refer to the [examples above](#defining-a-task) to see how to provide validators for task inputs and outputs."},"16":{"title":"The context object","pageTitle":"Tasks","pageRoute":"hatchet://docs/v1/tasks","content":"In addition to input and output payloads, every task receives a **context**. The context provides Hatchet-related information that might be useful to the execution of the task at runtime. For instance, you might access the workflow run ID, the task run ID, or the retry count from the context and have your task's application logic do something with those values.\n\nThe context also provides helper methods for interacting with a number of Hatchet's features, such as [managing cancellations](/v1/cancellation), [refreshing timeouts](/v1/timeouts#refreshing-timeouts), [pushing stream events](/v1/streaming#pushing-stream-events) and more.\n\n#### Python\n\nSee the [Python SDK reference](/reference/python/context) for more details\n\n#### Typescript\n\nSee the [TypeScript SDK reference](/reference/typescript/Context) for more\ndetails\n\n#### Go\n\nSee the [Go SDK\nreference](https://pkg.go.dev/github.com/hatchet-dev/hatchet/sdks/go#Context)\nfor more details\n\n#### Ruby\n\nRuby SDK reference coming soon! For now, see the [Python SDK\nreference](/reference/python/context) to get a sense of what's available."},"17":{"title":"Configuration","pageTitle":"Tasks","pageRoute":"hatchet://docs/v1/tasks","content":"Tasks can be configured to handle common problems in distributed systems. For example, you might want to automatically retry a task when an external API returns a transient error, or limit how many instances of a task run at the same time to avoid overwhelming a downstream service.\n\nConcept, What it does\n\n[Retries](/v1/retry-policies), Retry the task on failure, with optional backoff.\n[Timeouts](/v1/timeouts), Limit how long a task may wait to be scheduled or to run.\n[Concurrency](/v1/concurrency), Distribute load fairly between your customers.\n[Rate limits](/v1/rate-limits), Throttle task execution over a time window.\n[Priority](/v1/priority), Influence scheduling order relative to other queued tasks.\n[Worker affinity](/v1/advanced-assignment/worker-affinity), Prefer or require specific workers for this task."},"18":{"title":"How tasks execute on workers","pageTitle":"Tasks","pageRoute":"hatchet://docs/v1/tasks","content":"Tasks don't run on their own. [Workers](/v1/workers) execute them. A worker is a long-running process that registers one or more tasks with Hatchet. When you trigger a task, Hatchet places it in a queue and assigns it to an available worker that has registered that task. When a task completes, the Hatchet SDK running on the worker sends the result back to Hatchet, which marks the task as a success or failure, displays the results, and so on."},"19":{"title":"Workers","pageTitle":"Workers","pageRoute":"hatchet://docs/v1/workers","content":"# Workers\n\nWorkers in Hatchet are the long-running processes that execute [tasks](/v1/tasks). In the broadest sense, it may be helpful to think of a worker as a simple `while` loop that receives a new task assignment from Hatchet, executes the task, and reports the results back.\n\nWhen workers are spun up - in any environment, be it locally, on a VM, etc. - they will register themselves with Hatchet to start receiving and executing tasks."},"20":{"title":"Declaring a worker","pageTitle":"Workers","pageRoute":"hatchet://docs/v1/workers","content":"A worker needs a name and a set of tasks (or workflows, more on this later) to register:\n\n#### Python\n\n```python\ndef main() -> None:\n    worker = hatchet.worker(\"dag-worker\", workflows=[dag_workflow])\n\n    worker.start()\n```\n\n#### Typescript\n\n```typescript\nimport { hatchet } from '../hatchet-client';\nimport { simple } from './workflow';\nimport { parent, child } from './workflow-with-child';\nimport { simpleWithZod } from './zod';\n\nasync function main() {\n  const worker = await hatchet.worker('simple-worker', {\n    // 👀 Declare the workflows that the worker can execute\n    workflows: [simple, simpleWithZod, parent, child],\n    // 👀 Declare the number of concurrent task runs the worker can accept\n    slots: 100,\n  });\n\n  await worker.start();\n}\n\nif (require.main === module) {\n  main();\n}\n```\n\n#### Go\n\n```go\nworker, err := client.NewWorker(\"simple-worker\", hatchet.WithWorkflows(task))\nif err != nil {\n\tlog.Fatalf(\"failed to create worker: %v\", err)\n}\n\ninterruptCtx, cancel := cmdutils.NewInterruptContext()\ndefer cancel()\n\nerr = worker.StartBlocking(interruptCtx)\nif err != nil {\n\tlog.Fatalf(\"failed to start worker: %v\", err)\n}\n```\n\n#### Ruby\n\n```ruby\ndef main\n  worker = HATCHET.worker(\"dag-worker\", workflows: [DAG_WORKFLOW])\n  worker.start\nend\n```\n\nWhen a worker starts, it registers each of its tasks and workflows with Hatchet. From that point on, Hatchet knows to route matching tasks to that worker.\n\nOne important note is that multiple workers can register the same task. In this scenario, Hatchet distributes work across all of them, allowing for simple horizontal scaling."},"21":{"title":"Starting a worker","pageTitle":"Workers","pageRoute":"hatchet://docs/v1/workers","content":"#### CLI (recommended)\n\nThe fastest way to run a worker during development is with the Hatchet CLI. This handles authentication and hot reloads on code changes:\n\n```bash\nhatchet worker dev\n```\n\n#### Script\n\nYou can also run workers without the CLI, which you're likely to do in a production setting, for instance. To do this, you'll first need to set a `HATCHET_CLIENT_TOKEN` environment variable, or provide it via parameters when creating the Hatchet client.\n\n> **Info:** If you don't already have a token, you can generate one in the \"API Tokens\" section under \"Settings\" in the dashboard.\n\n```bash\nexport HATCHET_CLIENT_TOKEN=\"<your-client-token>\"\n```\n\nIf you're running a self-hosted engine without TLS enabled, also set:\n\n```bash\nexport HATCHET_CLIENT_TLS_STRATEGY=none\n```\n\nThen run the worker:\n\n#### Python\n\n```bash\npython worker.py\n```\n\n#### Typescript\n\nAdd a script to your `package.json`:\n\n```json\n\"scripts\": {\n  \"start:worker\": \"ts-node src/worker.ts\"\n}\n```\n\nThen run it:\n\n```bash\nnpm run start:worker\n```\n\n#### Go\n\n```bash\ngo run main.go\n```\n\n#### Ruby\n\n```bash\nbundle exec ruby worker.rb\n```\n\nOnce the worker starts, you will see logs confirming it is connected:\n\n```\n[INFO]  🪓 -- STARTING HATCHET...\n[DEBUG] 🪓 -- 'test-worker' waiting for ['simpletask:step1']\n[DEBUG] 🪓 -- acquired action listener: efc4aaf2-...\n[DEBUG] 🪓 -- sending heartbeat\n```\n\n> **Info:** For self-hosted engines, there may be additional gRPC configuration options\n>   needed. See the [Self-Hosting](/self-hosting/worker-configuration-options)\n>   docs for details."},"22":{"title":"Slots","pageTitle":"Workers","pageRoute":"hatchet://docs/v1/workers","content":"Every worker has a fixed number of **slots** that control how many tasks it can run concurrently, which can be configured with the `slots` option on the worker. For instance, if `slots` is set to 5, the worker will run up to five tasks concurrently at any time. Any additional tasks wait in the queue until a slot opens up. Slots are a **local** limit. They protect the individual worker from attempting to run more tasks concurrently than desired, which can help control resource usage by the worker.\n\nThe default slot count for workers in Hatchet is 100. In many cases, leaving the default as-is will be perfectly fine, especially when first getting set up with Hatchet."},"23":{"title":"Running Tasks","pageTitle":"Running Tasks","pageRoute":"hatchet://docs/v1/running-your-task","content":"# Running Tasks\n\nOnce you've [defined some tasks](/v1/tasks) and registered them on a [worker](/v1/workers), you're ready to run them!\n\nHatchet lets you run tasks in a number of ways, which support different application needs. In broad strokes, these are [fire-and-wait](#fire-and-wait), various forms of [fire-and-forget-style triggering](#fire-and-forget), and scheduling tasks to run either [periodically](#crons) or at some [specific time in the future](#scheduled-runs)."},"24":{"title":"Fire-and-wait","pageTitle":"Running Tasks","pageRoute":"hatchet://docs/v1/running-your-task","content":"Fire-and-wait is a common way of triggering a task which blocks until it completes and returns a result. This is particularly useful for situations where you want to do something with the result of your task. For instance, if your task generates some LLM output which you want to return to the user or persist in the database, you might trigger a task, wait for it to complete, collect its result, and then continue on with your application logic.\n\n#### Python\n\nCall `run` or `aio_run` on a `Task` or `Workflow` object to invoke it. These methods block until the task or workflow completes and return the result.\n\n```python\nfrom examples.child.worker import SimpleInput, child_task\n\nchild_task.run(SimpleInput(message=\"Hello, World!\"))\n```\n\nThe run methods in Hatchet's Python SDK also have async flavors you can `await`. These are prefixed with `aio_`, such as `aio_run`.\n\n```python\nresult = await child_task.aio_run(SimpleInput(message=\"Hello, World!\"))\n```\n\nNote that the type of `input` here is a Pydantic model that matches the input schema of the task or workflow being triggered.\n\n#### Typescript\n\nCall `run` on the `Task` object to invoke it. This returns a promise that resolves when the task completes and returns the result.\n\n```typescript\nconst res = await parent.run(\n  {\n    Message: 'HeLlO WoRlD',\n  },\n  {\n    additionalMetadata: {\n      test: 'test',\n    },\n  }\n);\n\n// 👀 Access the results of the Task\nconsole.log(res.TransformedMessage);\n```\n\n#### Go\n\nCall `Run` on the `Task` object to invoke it. This blocks until the task completes and returns the result.\n\n```go\nresult, err := task.Run(context.Background(), SimpleInput{Message: \"Hello, World!\"})\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\nCall `run` on the `Task` object to invoke it. This blocks until the task completes and returns the result.\n\n```ruby\nresult = CHILD_TASK_WF.run({ \"message\" => \"Hello, World!\" })\n```"},"25":{"title":"Fire-and-forget","pageTitle":"Running Tasks","pageRoute":"hatchet://docs/v1/running-your-task","content":"On the other hand, fire-and-forget-style triggering enqueues a task without waiting for the result. This is useful for background jobs like sending emails, processing uploads, or kicking off long-running pipelines where the application does _not_ need to wait for the result to continue along.\n\n#### Python\n\nCall `run_no_wait` on a `Task` or `Workflow` object to enqueue it fire-and-forget. This returns a `WorkflowRunRef` you can use to access the run ID and result later.\n\n```python\nref = say_hello.run(input=HelloInput(name=\"World\"), wait_for_result=False)\n```\n\nThere's also an async flavor:\n\n```python\nref = await say_hello.aio_run(\n    input=HelloInput(name=\"Async World\"), wait_for_result=False\n)\n```\n\nNote that the type of `input` here is a Pydantic model that matches the input schema of the task.\n\n#### Typescript\n\nCall `run_no_wait` on the `Task` object to enqueue it without waiting for the result. This returns a `WorkflowRunRef`.\n\n#### Go\n\nCall `RunNoWait` on the `Task` object to enqueue it without waiting for the result. This returns a `WorkflowRunRef`.\n\n```go\nrunRef, err := task.RunNoWait(context.Background(), SimpleInput{Message: \"Hello, World!\"})\nif err != nil {\n\treturn err\n}\n\nfmt.Println(runRef.RunId)\n```\n\n#### Ruby\n\nCall `run_no_wait` on the `Task` object to enqueue it without waiting for the result. This returns a `WorkflowRunRef`.\n\n```ruby\nref = SAY_HELLO.run_no_wait({ \"name\" => \"World\" })\n```\n\nWhen running a task fire-and-forget-style, you can also always retrieve the result later on. The workflow run ref that's returned from these trigger methods has a result method, which lets you retrieve the result of the triggered task if you need it later.\n\n#### Python\n\nUse `ref.result()` to block until the result is available:\n\n```python\nresult = ref.result()\n```\n\nor await `aio_result`:\n\n```python\nresult = await ref.aio_result()\n```\n\n#### Typescript\n\n```typescript\n// the return object of the enqueue method is a WorkflowRunRef which includes a listener for the result of the workflow\nconst result = await run.output;\nconsole.log(result);\n\n// if you need to subscribe to the result of the workflow at a later time, you can use the runRef method and the stored runId\nconst ref = hatchet.runRef(runId);\nconst result2 = await ref.output;\nconsole.log(result2);\n```\n\n#### Go\n\n```go\nresult, err := runRef.Result()\nif err != nil {\n\treturn err\n}\n\nvar resultOutput SimpleOutput\nerr = result.TaskOutput(\"process-message\").Into(&resultOutput)\nif err != nil {\n\treturn err\n}\n\nfmt.Println(resultOutput.Result)\n```\n\n#### Ruby\n\n```ruby\nresult = ref.result\n```\n\n### Triggering from events\n\nAnother method of running tasks fire-and-forget style is via [pushing events](/v1/external-events/pushing-events) to Hatchet. If a task is configured to be triggered by an event, then when the event is pushed to Hatchet, a corresponding run will be triggered using the payload of the event as the input to the run.\n\nYou can push events to Hatchet directly using the SDKs, or via [webhooks](/v1/webhooks), which are converted into Hatchet events internally on ingestion."},"26":{"title":"Crons","pageTitle":"Running Tasks","pageRoute":"hatchet://docs/v1/running-your-task","content":"Another common way to run tasks is on a cron schedule, which Hatchet [supports natively](/v1/cron-runs). Crons are useful for running tasks that are expected to run at the same time every day, such as data processing pipelines, reconciliation jobs, and so on.\n\nNote that Hatchet supports second-level cron granularity, although in most cases using the minutes as the most granular level is perfectly fine."},"27":{"title":"Scheduled runs","pageTitle":"Running Tasks","pageRoute":"hatchet://docs/v1/running-your-task","content":"Finally, Hatchet also supports [scheduling a run to be triggered at a specific time in the future](/v1/scheduled-runs). This is particularly useful for situations where you want to wait a known amount of time before running some task, such as sending a follow up email or a welcome email to a new user, or allowing your customers to choose when they want something to run, such as sending a reminder, for instance."},"28":{"title":"Triggering from the dashboard","pageTitle":"Running Tasks","pageRoute":"hatchet://docs/v1/running-your-task","content":"Finally, there are a number of pages in the dashboard that have a `Trigger Run` button. You can provide run parameters such as input, additional metadata, and a scheduled time.\n\n![Create Scheduled Run](../../public/schedule-dash.gif)"},"29":{"title":"Durable Execution","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/durable-execution","content":"# Introduction to Durable Execution\n\nAt its core, Hatchet is a _**durable execution**_ platform. Unfortunately, durable execution is an overloaded, often-confusing term. If you're new to durable execution, or are curious for a refresher, we wrote a [blog post outlining the core ideas](https://hatchet.run/blog/durable-execution).\n\nAt its most basic level, durable execution provides a toolbox that, when used correctly, gives you some guarantees about tasks and workflows you write in Hatchet that you wouldn't get from an ordinary task queueing system."},"30":{"title":"Guarantees","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/durable-execution","content":"One of the main promises of durable execution, when used correctly, is to give your tasks something closer to [exactly-once semantics](https://www.confluent.io/blog/exactly-once-semantics-are-possible-heres-how-apache-kafka-does-it/) than you'd get from traditional task queues. In practice, this means that a durable task can guarantee that your application logic is cached correctly and retry-safe, such that every time a piece of a durable task completes, it creates a new checkpoint (an entry in a durable event log), from which we can replay without needing to re-execute the actual application logic.\n\nThis means that if you run a durable task to a midway point and the worker it's running on crashes, you can replay the task from whatever checkpoint it last reached without re-running any of the previous steps or duplicating any work. This is priceless in systems that cannot reasonably be made idempotent, so replaying on failure is impossible."},"31":{"title":"Core Assumptions","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/durable-execution","content":"The core assumption of durable execution in Hatchet is that durable tasks only do one of two things: They can _wait_ for something, such as a [sleep to complete](/v1/sleep) or an [event to be received](/v1/events), or they can _[spawn child tasks](/v1/child-spawning)_. These operations can also be composed, such that you can have a durable task wait for _either_ a sleep to complete _or_ an event to be pushed, whichever comes first. You can achieve this behavior by using [or groups](/v1/patterns/durable-task-execution#or-groups)."},"32":{"title":"Example Uses","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/durable-execution","content":"There are lots of cases where durable execution is useful. A few common ones where it's an obvious choice are:\n\n\n1. Agentic workflows, especially ones that require human-in-the-loop steps, which continuously spawn children, collect results, spawn more children, and so on, in a loop. Durable tasks are an obvious fit here, since the durable task can be replayed from where it left off without losing any of the progress that was made by the agent in the past, and without needing to e.g. replay the human-in-the-loop portions of the task, such as approvals or similar.\n2. Tasks that are hard to make idempotent, where we cannot replay part of the task once it's completed. For example, something that involves sending an email to a customer, or updating a value in a table midway through.\n3. Dynamic workflows, where we build a DAG at runtime by selecting which child workflows to spawn based on the input to the durable task or the results of upstream checkpoints. This is particularly useful for powering tools like drag-and-drop DAG builders."},"33":{"title":"Learn More!","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/durable-execution","content":"There are lots of durable execution concepts and features to cover, and we're only just scratching the surface here! Check out our more detailed [durable execution documentation](/v1/patterns) to keep learning and building."},"34":{"title":"Scheduled Runs","pageTitle":"Scheduled Runs","pageRoute":"hatchet://docs/v1/scheduled-runs","content":"# Scheduled Runs\n\n> This example assumes we have a [task](/v1/tasks) registered on a running [worker](/v1/workers).\n\nScheduled runs allow you to trigger a task at a specific time in the future. Some example use cases of scheduling runs might include:\n\n- Sending a reminder email at a specific time after a user took an action.\n- Running a one-time maintenance task at a predetermined time as determined by your application. For instance, you might want to run a database vacuum during a maintenance window any time a task matches a certain criteria.\n- Allowing a customer to decide when they want your application to perform a specific task. For instance, if your application is a simple alarm app that sends a customer a notification at a time that they specify, you might create a scheduled run for each alarm that the customer sets.\n\nHatchet supports scheduled runs to run on a schedule defined in a few different ways:\n\n- [Programmatically](/v1/scheduled-runs#programmatically-creating-scheduled-runs): Use the Hatchet SDKs to dynamically set the schedule of a task.\n- [Hatchet Dashboard](/v1/scheduled-runs#managing-scheduled-runs-in-the-hatchet-dashboard): Manually create scheduled runs from the Hatchet Dashboard.\n\n> **Warning:** The scheduled time is when Hatchet **enqueues** the task, not when the run\n>   starts. Scheduling constraints like concurrency limits, rate limits, and retry\n>   policies can affect run start times."},"35":{"title":"Programmatically Creating Scheduled Runs","pageTitle":"Scheduled Runs","pageRoute":"hatchet://docs/v1/scheduled-runs","content":"### Create a Scheduled Run\n\nYou can create dynamic scheduled runs programmatically via the API to run tasks at a specific time in the future.\n\nHere's an example of creating a scheduled run to trigger a task tomorrow at noon:\n\n#### Python\n\n```python\nfrom datetime import datetime\n\nfrom examples.simple.worker import simple\n\nschedule = simple.schedule(datetime(2025, 3, 14, 15, 9, 26))\n\n## 👀 do something with the id\nprint(schedule.id)\n```\n\n#### Typescript\n\n```typescript\nconst runAt = new Date(new Date().setHours(12, 0, 0, 0) + 24 * 60 * 60 * 1000);\n\nconst scheduled = await simple.schedule(runAt, {\n  Message: 'hello',\n});\n\n// 👀 Get the scheduled run ID of the workflow\n// it may be helpful to store the scheduled run ID of the workflow\n// in a database or other persistent storage for later use\nconst scheduledRunId = scheduled.metadata.id;\nconsole.log(scheduledRunId);\n```\n\n#### Go\n\n```go\nscheduledRun, err := client.Schedules().Create(\n\tcontext.Background(),\n\t\"scheduled\",\n\tfeatures.CreateScheduledRunTrigger{\n\t\tTriggerAt: time.Now().Add(1 * time.Minute),\n\t\tInput:     map[string]interface{}{\"message\": \"Hello, World!\"},\n\t},\n)\nif err != nil {\n\tlog.Fatalf(\"failed to create scheduled run: %v\", err)\n}\n```\n\n#### Ruby\n\n```ruby\nschedule = SIMPLE.schedule(Time.now + 86_400, input: { \"message\" => \"Hello, World!\" })\n\n## do something with the id\nputs schedule.metadata.id\n```\n\nIn this example you can have different scheduled times for different customers, or dynamically set the scheduled time based on some other business logic.\n\nWhen creating a scheduled run via the API, you will receive a scheduled run object with a metadata property containing the id of the scheduled run. This id can be used to reference the scheduled run when deleting the scheduled run and is often stored in a database or other persistence layer.\n\n> **Info:** Note: Be mindful of the time zone of the scheduled run. Scheduled runs are\n>   **always** stored and returned in UTC.\n\n### Deleting a Scheduled Run\n\nYou can delete a scheduled run by calling the `delete` method on the scheduled client.\n\n#### Python\n\n```python\nhatchet.scheduled.delete(scheduled_id=scheduled_run.metadata.id)\n```\n\n#### Typescript\n\n```typescript\nawait hatchet.scheduled.delete(scheduled);\n```\n\n#### Go\n\n```go\nerr = client.Schedules().Delete(\n\tcontext.Background(),\n\tscheduledRun.Metadata.Id,\n)\nif err != nil {\n\tlog.Fatalf(\"failed to delete scheduled run: %v\", err)\n}\n```\n\n#### Ruby\n\n```ruby\nhatchet.scheduled.delete(scheduled_run.metadata.id)\n```\n\n### Listing Scheduled Runs\n\nYou can list all scheduled runs for a task by calling the `list` method on the scheduled client.\n\n#### Python\n\n```python\nscheduled_runs = hatchet.scheduled.list()\n```\n\n#### Typescript\n\n```typescript\nconst scheduledRuns = await hatchet.scheduled.list({\n  workflow: simple,\n});\nconsole.log(scheduledRuns);\n```\n\n#### Go\n\n```go\nscheduledRuns, err := client.Schedules().List(\n\tcontext.Background(),\n\trest.WorkflowScheduledListParams{},\n)\nif err != nil {\n\tlog.Fatalf(\"failed to list scheduled runs: %v\", err)\n}\n```\n\n#### Ruby\n\n```ruby\nscheduled_runs = hatchet.scheduled.list\n```\n\n### Rescheduling a Scheduled Run\n\nIf you need to change the trigger time for an existing scheduled run, you can reschedule it by updating its `triggerAt`.\n\n#### Python\n\n```python\nhatchet.scheduled.update(\n    scheduled_id=scheduled_run.metadata.id,\n    trigger_at=datetime.now(tz=timezone.utc) + timedelta(hours=1),\n)\n```\n\n#### Typescript\n\n```typescript\nawait hatchet.scheduled.update(scheduledRunId, {\n  triggerAt: new Date(Date.now() + 60 * 60 * 1000),\n});\n```\n\n#### Ruby\n\n```ruby\nhatchet.scheduled.update(\n  scheduled_run.metadata.id,\n  trigger_at: Time.now + 3600\n)\n```\n\n> **Warning:** You can only reschedule scheduled runs created via the API (not runs created\n>   via a code-defined schedule), and Hatchet may reject rescheduling if the run\n>   has already triggered.\n\n### Bulk operations (delete / reschedule)\n\nHatchet supports bulk operations for scheduled runs. You can bulk delete scheduled runs, and you can bulk reschedule scheduled runs by providing a list of updates.\n\n#### Python\n\n```python\nhatchet.scheduled.bulk_delete(scheduled_ids=[id])\n\nhatchet.scheduled.bulk_delete(\n    workflow_id=\"workflow_id\",\n    statuses=[ScheduledRunStatus.SCHEDULED],\n    additional_metadata={\"customer_id\": \"customer-a\"},\n)\n```\n```python\nhatchet.scheduled.bulk_update(\n    [\n        (id, datetime.now(tz=timezone.utc) + timedelta(hours=2)),\n    ]\n)\n```\n\n#### Typescript\n\n```typescript\nawait hatchet.scheduled.bulkDelete({\n  scheduledRuns: [scheduledRunId],\n});\n```\n```typescript\nawait hatchet.scheduled.bulkUpdate([\n  { scheduledRun: scheduledRunId, triggerAt: new Date(Date.now() + 2 * 60 * 60 * 1000) },\n]);\n```\n\n#### Ruby\n\n```ruby\nhatchet.scheduled.bulk_delete(scheduled_ids: [id])\n```\n```ruby\nhatchet.scheduled.bulk_update(\n  [[id, Time.now + 7200]]\n)\n```"},"36":{"title":"Managing Scheduled Runs in the Hatchet Dashboard","pageTitle":"Scheduled Runs","pageRoute":"hatchet://docs/v1/scheduled-runs","content":"In the Hatchet Dashboard, you can view and manage scheduled runs for your tasks.\n\nNavigate to \"Triggers\" > \"Scheduled Runs\" in the left sidebar and click \"Create Scheduled Run\" at the top right.\n\nYou can specify run parameters such as Input, Additional Metadata, and the Scheduled Time.\n\n![Create Scheduled Run](../../public/schedule-dash.gif)\n\nYou can also manage existing scheduled runs:\n\n- **Single-run actions**: Use the per-row actions menu to **Reschedule** or **Delete** an individual scheduled run.\n- **Bulk actions**: Use the **Actions** menu to bulk **Delete** or **Reschedule** either:\n  - The selected rows, or\n  - All rows matching the current filters (including “all” if no filters are set).\n\n> **Info:** In the dashboard, reschedule/delete actions may be disabled for runs that were\n>   created via a code-defined schedule, and rescheduling may be disabled for runs\n>   that have already triggered."},"37":{"title":"Scheduled Run Considerations","pageTitle":"Scheduled Runs","pageRoute":"hatchet://docs/v1/scheduled-runs","content":"When using scheduled runs, there are a few considerations to keep in mind:\n\n1. **Time Zone**: Scheduled runs are stored and returned in UTC. Make sure to consider the time zone when defining your scheduled time.\n\n2. **Execution Time**: The actual execution time of a scheduled run may vary slightly from the scheduled time. Hatchet makes a best-effort attempt to enqueue the task as close to the scheduled time as possible, but there may be slight delays due to system load or other factors.\n\n3. **Missed Schedules**: If a scheduled task is missed (e.g., due to system downtime), Hatchet will not automatically run the missed instances when the service comes back online.\n\n4. **Overlapping Schedules**: If a task is still running when a second scheduled run is scheduled to start, Hatchet will start a new instance of the task or respect [concurrency](/v1/concurrency) policy."},"38":{"title":"Cron Runs","pageTitle":"Cron Runs","pageRoute":"hatchet://docs/v1/cron-runs","content":"# Recurring Runs with Cron\n\n> This example assumes we have a [task](/v1/tasks) registered on a running [worker](/v1/workers).\n\nA [Cron](https://en.wikipedia.org/wiki/Cron) is a time-based job scheduler that allows you to define when a task should be executed automatically on a pre-determined schedule.\n\nSome example use cases for cron-style tasks might include:\n\n1. Running a daily report at a specific time.\n2. Sending weekly digest emails to users about their activity from the past week.\n3. Running a monthly billing process to generate invoices for customers.\n\nHatchet supports cron triggers to run on a schedule defined in a few different ways:\n\n- [Task Definitions](/v1/cron-runs#defining-a-cron-in-your-task-definition): Define a cron expression in your task definition to trigger the task on a predefined schedule.\n- [Dynamic Programmatically](/v1/cron-runs#programmatically-creating-cron-triggers): Use the Hatchet SDKs to dynamically set the cron schedule of a task.\n- [Hatchet Dashboard](/v1/cron-runs#managing-cron-jobs-in-the-hatchet-dashboard): Manually create cron triggers from the Hatchet Dashboard.\n\n> **Warning:** The expression is when Hatchet **enqueues** the task, not when the run starts.\n>   Scheduling constraints like concurrency limits, rate limits, and retry\n>   policies can affect run start times.\n\n### Cron Expression Syntax\n\nCron expressions in Hatchet follow the standard cron syntax. Hatchet supports both 5-field and 6-field expressions.\nA cron expression consists of 5 to 6 fields separated by spaces. If there are 6 fields, the first field is seconds; if\nthere are 5 fields, the first field is minutes.\n\n```\n┌───────────── second (0 - 59) (optional)\n│ ┌───────────── minute (0 - 59)\n│ │ ┌───────────── hour (0 - 23)\n│ │ │ ┌───────────── day of the month (1 - 31)\n│ │ │ │ ┌───────────── month (1 - 12)\n│ │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday)\n* * * * * *\n```\n\nEach field can contain a specific value, an asterisk (`*`) to represent all possible values, or a range of values. Here are some examples of cron expressions:\n\n- `0 0 * * *`: Run every day at midnight\n- `*/15 * * * *`: Run every 15 minutes\n- `0 9 * * 1`: Run every Monday at 9 AM\n- `0 0 1 * *`: Run on the first day of every month at midnight\n- `30 * * * * *`: Run at 30 seconds past every minute (6-field)\n\n> **Info:** Keep in mind, Hatchet Cloud meters by Task runs so use seconds wisely. Have\n>   questions about pricing? [Contact us](https://hatchet.run/office-hours)"},"39":{"title":"Defining a Cron in Your Task Definition","pageTitle":"Cron Runs","pageRoute":"hatchet://docs/v1/cron-runs","content":"You can define a task with a cron schedule by configuring the cron expression as part of the task definition:\n\n#### Python-Sync\n\n```python\n# Adding a cron trigger to a workflow is as simple\n# as adding a `cron expression` to the `on_cron`\n# prop of the workflow definition\n\ncron_workflow = hatchet.workflow(name=\"CronWorkflow\", on_crons=[\"* * * * *\"])\n\n\n@cron_workflow.task()\ndef step1(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    return {\n        \"time\": \"step1\",\n    }\n```\n\n#### Python-Async\n\n```python\n# Adding a cron trigger to a workflow is as simple\n# as adding a `cron expression` to the `on_cron`\n# prop of the workflow definition\n\ncron_workflow = hatchet.workflow(name=\"CronWorkflow\", on_crons=[\"* * * * *\"])\n\n\n@cron_workflow.task()\ndef step1(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    return {\n        \"time\": \"step1\",\n    }\n```\n\n#### Typescript\n\n```typescript\nexport const onCron = hatchet.workflow({\n  name: 'on-cron-workflow',\n  on: {\n    // 👀 add a cron expression to run the workflow every 15 minutes\n    cron: '*/15 * * * *',\n  },\n});\n```\n\n#### Go\n\n```go\ndailyCleanup := client.NewStandaloneTask(\"cleanup-temp-files\", func(ctx hatchet.Context, input CronInput) (CronOutput, error) {\n\tlog.Printf(\"Running daily cleanup at %s\", input.Timestamp)\n\n\ttime.Sleep(2 * time.Second)\n\n\treturn CronOutput{\n\t\tJobName:    \"daily-cleanup\",\n\t\tExecutedAt: time.Now().Format(time.RFC3339),\n\t\tNextRun:    \"Next run: tomorrow at 2 AM\",\n\t}, nil\n},\n\thatchet.WithWorkflowCron(\"0 2 * * *\"),\n\thatchet.WithWorkflowCronInput(CronInput{\n\t\tTimestamp: time.Now().Format(time.RFC3339),\n\t}),\n\thatchet.WithWorkflowDescription(\"Daily cleanup and maintenance tasks\"),\n)\n```\n\n#### Ruby\n\n```ruby\nCRON_WORKFLOW = HATCHET.workflow(\n  name: \"CronWorkflow\",\n  on_crons: [\"*/5 * * * *\"]\n)\n\nCRON_WORKFLOW.task(:cron_task) do |input, ctx|\n  puts \"Cron task executed at #{Time.now}\"\n  { \"status\" => \"success\" }\nend\n```\n\nIn the examples above, we set the `on cron` property of the task. The property specifies the cron expression that determines when the task should be triggered.\n\n\n  Note: When modifying a cron in your task definition, it will override any cron\n  schedule for previous crons defined in previous task definitions, but crons\n  created via the API or Dashboard will still be respected."},"40":{"title":"Programmatically Creating Cron Triggers","pageTitle":"Cron Runs","pageRoute":"hatchet://docs/v1/cron-runs","content":"### Create a Cron Trigger\n\nYou can create dynamic cron triggers programmatically via the API. This is useful if you want to create a cron trigger that is not known at the time of task definition,\n\nHere's an example of creating a a cron to trigger a report for a specific customer every day at noon:\n\n#### Python-Sync\n\n```python\ncron_trigger = dynamic_cron_workflow.create_cron(\n    cron_name=\"customer-a-daily-report\",\n    expression=\"0 12 * * *\",\n    input=DynamicCronInput(name=\"John Doe\"),\n    additional_metadata={\n        \"customer_id\": \"customer-a\",\n    },\n)\n\n\nid = cron_trigger.metadata.id  # the id of the cron trigger\n```\n\n#### Python-Async\n\n```python\ncron_trigger = await dynamic_cron_workflow.aio_create_cron(\n    cron_name=\"customer-a-daily-report\",\n    expression=\"0 12 * * *\",\n    input=DynamicCronInput(name=\"John Doe\"),\n    additional_metadata={\n        \"customer_id\": \"customer-a\",\n    },\n)\n\ncron_trigger.metadata.id  # the id of the cron trigger\n```\n\n#### Typescript\n\n```typescript\nconst cron = await simple.cron('simple-daily', '0 0 * * *', {\n  Message: 'hello',\n});\n\n// it may be useful to save the cron id for later\nconst cronId = cron.metadata.id;\n```\n\n#### Go\n\n```go\ncreatedCron, err := client.Crons().Create(context.Background(), \"cleanup-temp-files\", features.CreateCronTrigger{\n\tName:       \"daily-cleanup\",\n\tExpression: \"0 0 * * *\",\n\tInput: map[string]interface{}{\n\t\t\"timestamp\": time.Now().Format(time.RFC3339),\n\t},\n\tAdditionalMetadata: map[string]interface{}{\n\t\t\"description\": \"Daily cleanup and maintenance tasks\",\n\t},\n})\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\ncron_trigger = dynamic_cron_workflow.create_cron(\n  \"customer-a-daily-report\",\n  \"0 12 * * *\",\n  input: { \"name\" => \"John Doe\" }\n)\n\nid = cron_trigger.metadata.id\n```\n\nIn this example you can have different expressions for different customers, or dynamically set the expression based on some other business logic.\n\nWhen creating a cron via the API, you will receive a cron trigger object with a metadata property containing the id of the cron trigger. This id can be used to reference the cron trigger when deleting the cron trigger and is often stored in a database or other persistence layer.\n\n\n  Note: Cron Name and Expression are required fields when creating a cron\n  trigger and we enforce a unique constraint on the two.\n\n\n### Delete a Cron Trigger\n\nYou can delete a cron trigger by passing the cron object or a cron trigger id to the delete method.\n\n#### Python-Sync\n\n```python\nhatchet.cron.delete(cron_id=cron_trigger.metadata.id)\n```\n\n#### Python-Async\n\n```python\nawait hatchet.cron.aio_delete(cron_id=cron_trigger.metadata.id)\n```\n\n#### Typescript\n\n```typescript\nawait hatchet.crons.delete(cronId);\n```\n\n#### Go\n\n```go\nerr = client.Crons().Delete(context.Background(), createdCron.Metadata.Id)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nhatchet.cron.delete(cron_trigger.metadata.id)\n```\n\n\n  Note: Deleting a cron trigger will not cancel any currently running instances\n  of the task. It will simply stop the cron trigger from triggering the task\n  again.\n\n\n### List Cron Triggers\n\nRetrieves a list of all task cron triggers matching the criteria.\n\n#### Python-Sync\n\n```python\ncron_triggers = hatchet.cron.list()\n```\n\n#### Python-Async\n\n```python\nawait hatchet.cron.aio_list()\n```\n\n#### Typescript\n\n```typescript\nconst crons = await hatchet.crons.list({\n  workflow: simple,\n});\n```\n\n#### Go\n\n```go\ncronList, err := client.Crons().List(context.Background(), rest.CronWorkflowListParams{\n\tAdditionalMetadata: &[]string{\"description:Daily cleanup and maintenance tasks\"},\n})\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\ncron_triggers = hatchet.cron.list\n```"},"41":{"title":"Managing Cron Triggers in the Hatchet Dashboard","pageTitle":"Cron Runs","pageRoute":"hatchet://docs/v1/cron-runs","content":"In the Hatchet Dashboard, you can view and manage cron triggers for your tasks.\n\nNavigate to \"Triggers\" > \"Cron Jobs\" in the left sidebar and click \"Create Cron Job\" at the top right.\n\nYou can specify run parameters such as Input, Additional Metadata, and the Expression.\n\n![Create Cron Job](../../public/cron-dash.gif)"},"42":{"title":"Cron Considerations","pageTitle":"Cron Runs","pageRoute":"hatchet://docs/v1/cron-runs","content":"When using cron triggers, there are a few considerations to keep in mind:\n\n1. **Time Zone**: Cron schedules are UTC. Make sure to consider the time zone when defining your cron expressions.\n\n2. **Execution Time**: The actual execution time of a cron-triggered task may vary slightly from the scheduled time. Hatchet makes a best-effort attempt to enqueue the task as close to the scheduled time as possible, but there may be slight delays due to system load or other factors.\n\n3. **Missed Schedules**: If a scheduled task is missed (e.g., due to system downtime), Hatchet will **not** automatically run the missed instances. It will wait for the next scheduled time to trigger the task.\n\n4. **Overlapping Schedules**: If a task is still running when the next scheduled time arrives, Hatchet will start a new instance of the task or respect the [concurrency](/v1/concurrency) policy."},"43":{"title":"Bulk Runs","pageTitle":"Bulk Runs","pageRoute":"hatchet://docs/v1/bulk-run","content":"# Bulk Run Many Tasks\n\nOften you may want to run a task multiple times with different inputs. There is significant overhead (i.e. network roundtrips) to write the task, so if you're running multiple tasks, it's best to use the bulk run methods.\n\n#### Python\n\nYou can use the `aio_run_many` method to bulk run a task. This will return a list of results.\n\n```python\ngreetings = [\"Hello, World!\", \"Hello, Moon!\", \"Hello, Mars!\"]\n\nresults = await child_task.aio_run_many(\n    [\n        # run each greeting as a task in parallel\n        child_task.create_bulk_run_item(\n            input=SimpleInput(message=greeting),\n        )\n        for greeting in greetings\n    ]\n)\n\n# this will await all results and return a list of results\nprint(results)\n```\n\n> **Info:** `Workflow.create_bulk_run_item` is a typed helper to create the inputs for\n>   each task.\n\nThere are additional bulk methods available on the `Workflow` object.\n\n- `aio_run_many`\n- `aio_run_many_no_wait`\n\nAnd blocking variants:\n\n- `run_many`\n- `run_many_no_wait`\n\nAs with the run methods, you can call bulk methods from within a task and the runs will be associated with the parent task in the dashboard.\n\n#### Typescript\n\nYou can use the `run` method directly to bulk run tasks by passing an array of inputs. This will return a list of results.\n\n```typescript\nconst res = await simple.run([\n  {\n    Message: 'HeLlO WoRlD',\n  },\n  {\n    Message: 'Hello MoOn',\n  },\n]);\n\n// 👀 Access the results of the Task\nconsole.log(res[0].TransformedMessage);\nconsole.log(res[1].TransformedMessage);\n```\n\nThere are additional bulk methods available on the `Task` object.\n\n- `run`\n- `runNoWait`\n\nAs with the run methods, you can call bulk methods on the task fn context parameter within a task and the runs will be associated with the parent task in the dashboard.\n\n```typescript\nconst parent = hatchet.task({\n  name: 'simple',\n  fn: async (input: SimpleInput, ctx) => {\n    // Bulk run two tasks in parallel\n    const child = await ctx.bulkRunChildren([\n      {\n        workflow: simple,\n        input: {\n          Message: 'Hello, World!',\n        },\n      },\n      {\n        workflow: simple,\n        input: {\n          Message: 'Hello, Moon!',\n        },\n      },\n    ]);\n\n    return {\n      TransformedMessage: `${child[0].TransformedMessage} ${child[1].TransformedMessage}`,\n    };\n  },\n});\n```\n\nAvailable bulk methods on the `Context` object are: - `bulkRunChildren` - `bulkRunChildrenNoWait`\n\n#### Go\n\nYou can use the `RunMany` method directly on the `Workflow` or `StandaloneTask` instance to bulk run tasks by passing an array of inputs. This will return a list of run IDs.\n\n```go\n// Prepare inputs as []RunManyOpt for bulk run\ninputs := make([]hatchet.RunManyOpt, len(bulkInputs))\nfor i, input := range bulkInputs {\n\tinputs[i] = hatchet.RunManyOpt{\n\t\tInput: input,\n\t}\n}\n\n// Run workflows in bulk\nctx := context.Background()\nrunRefs, err := workflow.RunMany(ctx, inputs)\nif err != nil {\n\tlog.Fatalf(\"failed to run bulk workflows: %v\", err)\n}\n```\n\nAdditional bulk methods are coming soon for the Go SDK. Join our [Discord](https://hatchet.run/discord) to stay up to date.\n\n#### Ruby\n\n```ruby\ngreetings = [\"Hello, World!\", \"Hello, Moon!\", \"Hello, Mars!\"]\n\nresults = CHILD_TASK_WF.run_many(\n  greetings.map do |greeting|\n    CHILD_TASK_WF.create_bulk_run_item(\n      input: { \"message\" => greeting }\n    )\n  end\n)\n\nputs results\n```"},"44":{"title":"Webhooks","pageTitle":"Webhooks","pageRoute":"hatchet://docs/v1/webhooks","content":"# Webhooks\n\nWebhooks allow external systems to trigger Hatchet workflows by sending HTTP requests to dedicated endpoints. This enables real-time integration with third-party services like GitHub, Stripe, Slack, or any system that can send webhook events."},"45":{"title":"Guides","pageTitle":"Webhooks","pageRoute":"hatchet://docs/v1/webhooks","content":"We have step-by-step guides for the most common webhook integrations:\n\n- [**Stripe**](/cookbooks/webhooks-stripe) — payments, subscriptions, invoices\n- [**GitHub**](/cookbooks/webhooks-github) — pull requests, issues, pushes\n- [**Slack**](/cookbooks/webhooks-slack) — slash commands, interactive components, event subscriptions"},"46":{"title":"Creating a webhook","pageTitle":"Webhooks","pageRoute":"hatchet://docs/v1/webhooks","content":"To create a webhook, you'll need to fill out some fields that tell Hatchet how to determine which workflows to trigger from your webhook, and how to validate it when it arrives from the sender. In particular, you'll need to provide the following fields:\n\n#### Name\n\nThe **Webhook Name** is tenant-unique (meaning a single tenant can only use each name once), and is used to create the URL for where the incoming webhook request should be sent. For instance, if your tenant id was `d60181b7-da6c-4d4c-92ec-8aa0fc74b3e5` and your webhook name was `my-webhook`, then the URL might look like `https://cloud.onhatchet.run/api/v1/stable/tenants/d60181b7-da6c-4d4c-92ec-8aa0fc74b3e5/webhooks/my-webhook`. Note that you can copy this URL in the dashboard.\n\n#### Source\n\nThe **Source** indicates the source of the webhook, which can be a pre-provided one for easy setup like Stripe or Github, or a \"generic\" one, which lets you configure all of the necessary fields for your webhook integration based on what the webhook sender provides.\n\n#### Event Key Expression\n\nThe **Event Key Expression** is a [CEL](https://cel.dev/) expression that you can use to create a dynamic event key from the payload and headers of the incoming webhook. You can either set this to a constant value, like `webhook`, or you could set it to something dynamic using those two options. Some examples:\n\n1. `'stripe:' + input.type` would create event keys where `'stripe:'` is a prefix for all keys indicating the webhook came from Stripe, and `input.type` selects the `type` field off of the webhook payload and uses it to create the final event key. The result might look something like `stripe:payment_intent.created`.\n2. `'github:' + headers['x-github-event'] + ':' + input.action` could create a key like `github:star:created`\n\n> **Info:** The result of the event key expression is what Hatchet will use as the event\n>   key, so you'd need to set a matching event key as a trigger on your workflows\n>   in order to trigger them from the webhooks you create. For instance, you might\n>   add `on_events=[\"stripe:payment_intent.created\"]` to listen for payment intent\n>   created events in the previous example.\n\n#### Scope Expression (Optional)\n\nThe **Scope Expression** is an optional [CEL](https://cel.dev/) expression that evaluates to a string used to filter which workflows to trigger. This is useful when you have multiple workflows listening to the same event key but want to route to specific workflows based on the webhook content.\n\nLike the event key expression, you have access to `input` (the webhook payload) and `headers` (the request headers). Some examples:\n\n1. `input.customer_id` would use the customer ID from the payload as the scope\n2. `headers['x-organization-id']` would use a header value as the scope\n3. `input.metadata.environment` could route to different workflows based on environment\n\n#### Static Payload (Optional)\n\nThe **Static Payload** is an optional JSON object that gets merged with the incoming webhook payload before it's passed to your workflows. This is useful for:\n\n- Adding constant metadata to all events from this webhook\n- Injecting configuration values that aren't in the original payload\n- Overriding specific fields from the incoming payload\n\n> **Info:** When there's a key collision between the incoming webhook payload and the\n>   static payload, the static payload values take precedence.\n\nFor example, if you set a static payload of `{\"source\": \"stripe\", \"environment\": \"production\"}` and receive a webhook with `{\"type\": \"payment_intent.created\", \"source\": \"api\"}`, the final payload passed to your workflow would be `{\"type\": \"payment_intent.created\", \"source\": \"stripe\", \"environment\": \"production\"}`.\n\n#### Authentication\n\nFinally, you'll need to specify how Hatchet should authenticate incoming webhook requests. For non-generic sources like Stripe and Github, Hatchet has presets for most of the fields, so in most cases you'd only need to provide a secret.\n\nIf you're using a generic source, then you'll need to specify an authentication method (either basic auth, an API key, HMAC-based auth), and provide the required fields (such as a username and password in the basic auth case).\n\n> **Warning:** Hatchet encrypts any secrets you provide for validating incoming webhooks.\n\nThe different authentication methods require different fields to be provided:\n\n- **Pre-configured sources** (Stripe, GitHub, Slack): Only require a webhook secret\n- **Generic sources** require different fields depending on the selected authentication method:\n  - **Basic Auth**: Requires a username and password\n  - **API Key**: Requires header name containing the key on incoming requests, and secret key itself\n  - **HMAC**: Requires a header name containing the secret on incoming requests, the secret itself, an encoding method (e.g. hex, base64), and an algorithm (e.g. `SHA256`, `SHA1`, etc.)."},"47":{"title":"Usage","pageTitle":"Webhooks","pageRoute":"hatchet://docs/v1/webhooks","content":"While you're creating your webhook (and also after you've created it), you can copy the webhook URL, which is what you'll provide to the webhook _sender_.\n\nOnce you've done that, the last thing to do is register the event keys you want your workers to listen for so that they can be triggered by incoming webhooks.\n\nFor examples on how to do this, see the [documentation on event triggers](/v1/external-events/run-on-event)."},"48":{"title":"Pushing Events","pageTitle":"Pushing Events","pageRoute":"hatchet://docs/v1/external-events/pushing-events","content":"# Pushing Events\n\nYou can push an event to Hatchet by calling the `push` method on the Hatchet event client and providing the event name and payload. Any tasks that have registered an [event trigger](/v1/external-events/run-on-event) for that event key will be run.\n\n#### Python\n\n```python\nhatchet.event.push(\"user:create\", {\"should_skip\": False})\n```\n\n#### Typescript\n\n```typescript\nconst res = await hatchet.events.push('simple-event:create', {\n  Message: 'hello',\n  ShouldSkip: false,\n});\n```\n\n#### Go\n\n```go\nerr := client.Events().Push(\n\tcontext.Background(),\n\t\"simple-event:create\",\n\tEventInput{\n\t\tMessage: \"Hello, World!\",\n\t},\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nHATCHET.event.push(\"user:create\", { \"should_skip\" => false })\n```\n\n> **Info:** Event triggers evaluate tasks to run at the time of the event. If an event is\n>   received before the task is registered, the task will not be run."},"49":{"title":"Event Trigger","pageTitle":"Event Trigger","pageRoute":"hatchet://docs/v1/external-events/run-on-event","content":"# Event Trigger\n\n> This example assumes we have a [task](/v1/tasks) registered on a running [worker](/v1/workers).\n\nRun-on-event allows you to trigger one or more tasks when a specific event occurs. This is useful when you need to execute a task in response to an ephemeral event where the result is not important. A few common use cases for event-triggered task runs are:\n\n1. Running a task when an ephemeral event is received, such as a webhook or a message from a queue.\n2. When you want to run multiple independent tasks in response to a single event. For instance, if you wanted to run a `send_welcome_email` task, and you also wanted to run a `grant_new_user_credits` task, and a `reward_referral` task, all triggered by the signup. In this case, you might declare all three of those tasks with an event trigger for `user:signup`, and then have them all kick off when that event happens.\n\n> **Info:** Event triggers evaluate tasks to run at the time of the event. If an event is\n>   received before the task is registered, the task will not be run."},"50":{"title":"Declaring Event Triggers","pageTitle":"Event Trigger","pageRoute":"hatchet://docs/v1/external-events/run-on-event","content":"To run a task on an event, you need to declare the event that will trigger the task. This is done by declaring the `on_events` property in the task declaration.\n\n#### Python\n\n```python\nEVENT_KEY = \"user:create\"\nSECONDARY_KEY = \"foobarbaz\"\nWILDCARD_KEY = \"subscription:*\"\n\n\nclass EventWorkflowInput(BaseModel):\n    should_skip: bool\n\n\nevent_workflow = hatchet.workflow(\n    name=\"EventWorkflow\",\n    on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n    input_validator=EventWorkflowInput,\n)\n```\n\n#### Typescript\n\n```typescript\nexport const lower = hatchet.workflow({\n  name: 'lower',\n  // 👀 Declare the event that will trigger the workflow\n  onEvents: ['simple-event:create'],\n});\n```\n\n#### Go\n\n```go\nconst SimpleEvent = \"simple-event:create\"\n\nfunc Lower(client *hatchet.Client) *hatchet.StandaloneTask {\n\treturn client.NewStandaloneTask(\n\t\t\"lower\", func(ctx hatchet.Context, input EventInput) (*LowerTaskOutput, error) {\n\t\t\treturn &LowerTaskOutput{\n\t\t\t\tTransformedMessage: strings.ToLower(input.Message),\n\t\t\t}, nil\n\t\t},\n\t\thatchet.WithWorkflowEvents(SimpleEvent),\n\t)\n}\n```\n\n#### Ruby\n\n```ruby\nEVENT_KEY = \"user:create\"\nSECONDARY_KEY = \"foobarbaz\"\nWILDCARD_KEY = \"subscription:*\"\n\nEVENT_WORKFLOW = HATCHET.workflow(\n  name: \"EventWorkflow\",\n  on_events: [EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY]\n)\n```\n\n> **Info:** Note: Multiple tasks can be triggered by the same event.\n\n> **Info:** As of engine version 0.65.0, Hatchet supports wildcard event triggers using\n>   the `*` wildcard pattern. For example, you could register `subscription:*` as\n>   your event key, which would match incoming events like `subcription:create`,\n>   `subscription:renew`, `subscription:cancel`, and so on."},"51":{"title":"Event Filters","pageTitle":"Event Filters","pageRoute":"hatchet://docs/v1/external-events/event-filters","content":"# Event Filters\n\nEvents can be _filtered_ in Hatchet, which allows you to push events to Hatchet and only trigger task runs from them in certain cases. **If you enable filters on a workflow, your workflow will be triggered once for each matching filter on any incoming event with a matching scope** (more on scopes below)."},"52":{"title":"Basic Usage","pageTitle":"Event Filters","pageRoute":"hatchet://docs/v1/external-events/event-filters","content":"There are two ways to create filters in Hatchet.\n\n### Default filters on the workflow\n\nThe simplest way to create a filter is to register it declaratively with your workflow when it's created. For example:\n\n#### Python\n\n```python\nevent_workflow_with_filter = hatchet.workflow(\n    name=\"EventWorkflow\",\n    on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n    input_validator=EventWorkflowInput,\n    default_filters=[\n        DefaultFilter(\n            expression=\"true\",\n            scope=\"example-scope\",\n            payload={\n                \"main_character\": \"Anna\",\n                \"supporting_character\": \"Stiva\",\n                \"location\": \"Moscow\",\n            },\n        )\n    ],\n)\n```\n\n#### Typescript\n\n```typescript\nexport const lowerWithFilter = hatchet.workflow({\n  name: 'lower',\n  // 👀 Declare the event that will trigger the workflow\n  onEvents: ['simple-event:create'],\n  defaultFilters: [\n    {\n      expression: 'true',\n      scope: 'example-scope',\n      payload: {\n        mainCharacter: 'Anna',\n        supportingCharacter: 'Stiva',\n        location: 'Moscow',\n      },\n    },\n  ],\n});\n```\n\n#### Go\n\n```go\nfunc LowerWithFilter(client *hatchet.Client) *hatchet.StandaloneTask {\n\treturn client.NewStandaloneTask(\n\t\t\"lower\", accessFilterPayload,\n\t\thatchet.WithWorkflowEvents(SimpleEvent),\n\t\thatchet.WithFilters(types.DefaultFilter{\n\t\t\tExpression: \"true\",\n\t\t\tScope:      \"example-scope\",\n\t\t\tPayload: map[string]interface{}{\n\t\t\t\t\"main_character\":       \"Anna\",\n\t\t\t\t\"supporting_character\": \"Stiva\",\n\t\t\t\t\"location\":             \"Moscow\"},\n\t\t}),\n\t)\n}\n```\n\n#### Ruby\n\n```ruby\nEVENT_WORKFLOW_WITH_FILTER = HATCHET.workflow(\n  name: \"EventWorkflow\",\n  on_events: [EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n  default_filters: [\n    Hatchet::DefaultFilter.new(\n      expression: \"true\",\n      scope: \"example-scope\",\n      payload: {\n        \"main_character\" => \"Anna\",\n        \"supporting_character\" => \"Stiva\",\n        \"location\" => \"Moscow\"\n      }\n    )\n  ]\n)\n\nEVENT_WORKFLOW.task(:task) do |input, ctx|\n  puts \"event received\"\n  ctx.filter_payload\nend\n```\n\nIn each of these cases, we register a filter with the workflow. Note that these \"declarative\" filters are overwritten each time your workflow is updated, so the ids associated with them will not be stable over time. This allows you to modify a filter in-place or remove a filter, and not need to manually delete it over the API.\n\n### Filters feature client\n\nYou also can create event filters by using the `filters` clients on the SDKs:\n\n#### Python\n\n```python\nhatchet.filters.create(\n    workflow_id=event_workflow.id,\n    expression=\"input.should_skip == false\",\n    scope=\"foobarbaz\",\n    payload={\n        \"main_character\": \"Anna\",\n        \"supporting_character\": \"Stiva\",\n        \"location\": \"Moscow\",\n    },\n)\n```\n\n#### Typescript\n\n```typescript\nhatchet.filters.create({\n  workflowId: lower.id,\n  expression: 'input.ShouldSkip == false',\n  scope: 'foobarbaz',\n  payload: {\n    main_character: 'Anna',\n    supporting_character: 'Stiva',\n    location: 'Moscow',\n  },\n});\n```\n\n#### Go\n\n```go\n_, err = client.Filters().Create(\n\tcontext.Background(),\n\trest.V1CreateFilterRequest{\n\t\tWorkflowId: uuid.MustParse(\"bb866b59-5a86-451b-8023-10d451db11d3\"),\n\t\tExpression: \"true\",\n\t\tScope:      \"example-scope\",\n\t},\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nHATCHET_CLIENT.filters.create(\n  workflow_id: EVENT_WORKFLOW.id,\n  expression: \"input.should_skip == false\",\n  scope: \"foobarbaz\",\n  payload: {\n    \"main_character\" => \"Anna\",\n    \"supporting_character\" => \"Stiva\",\n    \"location\" => \"Moscow\"\n  }\n)\n```\n\n> **Warning:** Note the `scope` argument to the filter is required **both when creating a\n>   filter, and when pushing events**. If the scope on filter creation does not\n>   match the scope provided when pushing events, the filter will not apply.\n\nThen, push an event that uses the filter to determine whether to run. For instance, this run will be skipped, since the payload does not match the expression:\n\n#### Python\n\n```python\nhatchet.event.push(\n    event_key=EVENT_KEY,\n    payload={\n        \"should_skip\": True,\n    },\n    scope=\"foobarbaz\",\n)\n```\n\n#### Typescript\n\n```typescript\nhatchet.events.push(\n  SIMPLE_EVENT,\n  {\n    Message: 'hello',\n    ShouldSkip: true,\n  },\n  {\n    scope: 'foobarbaz',\n  }\n);\n```\n\n#### Go\n\n```go\nskipPayload := map[string]interface{}{\n\t\"shouldSkip\": true,\n}\nskipScope := \"foobarbaz\"\nerr = client.Events().Push(\n\tcontext.Background(),\n\t\"simple-event:create\",\n\tskipPayload,\n\tv0Client.WithFilterScope(&skipScope),\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nHATCHET_CLIENT.event.push(\n  EVENT_KEY,\n  { \"should_skip\" => true },\n  scope: \"foobarbaz\"\n)\n```\n\nBut this one will be triggered since the payload _does_ match the expression:\n\n#### Python\n\n```python\nhatchet.event.push(\n    event_key=EVENT_KEY,\n    payload={\n        \"should_skip\": False,\n    },\n    scope=\"foobarbaz\",\n)\n```\n\n#### Typescript\n\n```typescript\nhatchet.events.push(\n  SIMPLE_EVENT,\n  {\n    Message: 'hello',\n    ShouldSkip: false,\n  },\n  {\n    scope: 'foobarbaz',\n  }\n);\n```\n\n#### Go\n\n```go\ntriggerPayload := map[string]interface{}{\n\t\"shouldSkip\": false,\n}\ntriggerScope := \"foobarbaz\"\nerr = client.Events().Push(\n\tcontext.Background(),\n\t\"simple-event:create\",\n\ttriggerPayload,\n\tv0Client.WithFilterScope(&triggerScope),\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nHATCHET_CLIENT.event.push(\n  EVENT_KEY,\n  { \"should_skip\" => false },\n  scope: \"foobarbaz\"\n)\n```\n\n> **Info:** In Hatchet, filters are \"positive\", meaning that we look for _matches_ to the\n>   filter to determine which tasks to trigger."},"53":{"title":"Accessing the filter payload","pageTitle":"Event Filters","pageRoute":"hatchet://docs/v1/external-events/event-filters","content":"You can access the filter payload by using the `Context` in the task that was triggered by your event:\n\n#### Python\n\n```python\n@event_workflow_with_filter.task()\ndef filtered_task(input: EventWorkflowInput, ctx: Context) -> None:\n    print(ctx.filter_payload)\n```\n\n#### Typescript\n\n```typescript\nlowerWithFilter.task({\n  name: 'lowerWithFilter',\n  fn: (input, ctx) => {\n    console.log(ctx.filterPayload());\n  },\n});\n```\n\n#### Go\n\n```go\nfunc accessFilterPayload(ctx hatchet.Context, input EventInput) (*LowerTaskOutput, error) {\n\tfmt.Println(ctx.FilterPayload())\n\treturn &LowerTaskOutput{\n\t\tTransformedMessage: strings.ToLower(input.Message),\n\t}, nil\n}\n```\n\n#### Ruby\n\n```ruby\nEVENT_WORKFLOW_WITH_FILTER.task(:filtered_task) do |input, ctx|\n  puts ctx.filter_payload.inspect\nend\n```"},"54":{"title":"Advanced Usage","pageTitle":"Event Filters","pageRoute":"hatchet://docs/v1/external-events/event-filters","content":"In addition to referencing `input` in the expression (which corresponds to the _event_ payload), you can also reference the following fields:\n\n1. `payload` corresponds to the _filter_ payload (which was part of the request when the filter was created).\n2. `additional_metadata` allows for filtering based on `additional_metadata` sent with the event.\n3. `event_key` allows for filtering based on the key of the event, such as `user:created`."},"55":{"title":"Invoking Tasks From Other Services","pageTitle":"Inter-Service Triggering","pageRoute":"hatchet://docs/v1/inter-service-triggering","content":"While Hatchet recommends importing your workflows and standalone tasks directly to use for triggering runs, this only works in a monorepo or similar setups where you have access to those objects. However, it's common to have a polyrepo, have code written in multiple languages, or otherwise not be able to import your workflows and standalone tasks directly.\nHatchet provides stub tasks for these cases, allowing you to trigger your tasks from anywhere in a type-safe way with only minor code duplication.\n\n### Creating a \"Stub\" Task on your External Service (Recommended)\n\nThe recommended way to trigger a run from a service where you _cannot_ import the workflow or standalone task definition directly is to create a \"stub\" task or workflow on your external service. This is a Hatchet task or workflow that has the same name and input/output types as the task you want to trigger on your Hatchet worker, but without the function or other configuration.\n\nThis allows you to have a polyglot, fully typed interface with full SDK support.\n\n#### Typescript\n\n```typescript\nimport { hatchet } from '../hatchet-client';\n\n// (optional) Define the input type for the workflow\nexport type SimpleInput = {\n  Message: string;\n};\n\n// (optional) Define the output type for the workflow\nexport type SimpleOutput = {\n  'to-lower': {\n    TransformedMessage: string;\n  };\n};\n\n// declare the workflow with the same name as the\n// workflow name on the worker\nexport const simple = hatchet.workflow({\n  name: 'simple',\n});\n\n// you can use all the same run methods on the stub\n// with full type-safety\nsimple.run({ Message: 'Hello, World!' });\nsimple.runNoWait({ Message: 'Hello, World!' });\nsimple.schedule(new Date(), { Message: 'Hello, World!' });\nsimple.cron('my-cron', '0 0 * * *', { Message: 'Hello, World!' });\n```\n\n#### Python\n\nConsider a task with an implementation like this:\n\n```python\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet\n\n\nclass TaskInput(BaseModel):\n    user_id: int\n\n\nclass TaskOutput(BaseModel):\n    ok: bool\n\n\nhatchet = Hatchet()\n\n\n@hatchet.task(name=\"externally-triggered-task\", input_validator=TaskInput)\nasync def externally_triggered_task(input: TaskInput, ctx: Context) -> TaskOutput:\n    return TaskOutput(ok=True)\n```\n\nTo trigger this task from a separate service where the code is not shared, start by defining models that match the input and output types of the task defined above.\n\n```python\nclass TaskInput(BaseModel):\n    user_id: int\n\n\nclass TaskOutput(BaseModel):\n    ok: bool\n```\n\nNext, create the stub task.\n\n```python\nstub = hatchet.stubs.task(\n    # make sure the name and schemas exactly match the implementation\n    name=\"externally-triggered-task\",\n    input_validator=TaskInput,\n    output_validator=TaskOutput,\n)\n```\n\nFinally, use the stub to trigger the underlying task, and (optionally) retrieve the result.\n\n```python\n# input type checks properly\nresult = await stub.aio_run(input=TaskInput(user_id=1234))\n\n# `result.ok` type checks properly\nprint(\"Is successful:\", result.ok)\n```\n\n#### Go\n\n```go\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\thatchet \"github.com/hatchet-dev/hatchet/sdks/go\"\n)\n\ntype StubInput struct {\n\tMessage string `json:\"message\"`\n}\n\ntype StubOutput struct {\n\tOk bool `json:\"ok\"`\n}\n\nfunc StubWorkflow(client *hatchet.Client) *hatchet.StandaloneTask {\n\treturn client.NewStandaloneTask(\"stub-workflow\", func(ctx hatchet.Context, input StubInput) (StubOutput, error) {\n\t\treturn StubOutput{\n\t\t\tOk: true,\n\t\t}, nil\n\t})\n}\n\nfunc main() {\n\tclient, err := hatchet.NewClient()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create hatchet client: %v\", err)\n\t}\n\n\ttask := StubWorkflow(client)\n\n\t// we are simply running the task here, but it can be implemented in another service / worker\n\t// and in another language with the same name and input-output types\n\tresult, err := task.Run(context.Background(), StubInput{Message: \"Hello, World!\"})\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to run task: %v\", err)\n\t}\n\n\tfmt.Println(result)\n}\n```\n\n#### Ruby\n\n\n  Note that this approach requires code duplication, which can break type\n  safety. For instance, if the input type to your workflow changes, you need to\n  remember to also change the type passed to the stub. Some ways to mitigate\n  risks here are helpful comments reminding developers to keep these types in\n  sync, code generation tools, and end-to-end tests."},"56":{"title":"Retry Policies","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"# Simple Task Retries\n\nHatchet provides a simple and effective way to handle failures in your tasks using a retry policy. This feature allows you to specify the number of times a task should be retried if it fails, helping to improve the reliability and resilience of your tasks.\n\n> **Info:** Task-level retries can be added to both `Standalone Tasks` and `Workflow\n>   Tasks`."},"57":{"title":"How it works","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"When a task fails (i.e. throws an error or returns a non-zero exit code), Hatchet can automatically retry the task based on the `retries` configuration defined in the task object. Here's how it works:\n\n1. If a task fails and `retries` is set to a value greater than 0, Hatchet will catch the error and retry the task.\n2. The task will be retried up to the specified number of times, with each retry being executed after a short delay to avoid overwhelming the system.\n3. If the task succeeds during any of the retries, the task will continue as normal.\n4. If the task continues to fail after exhausting all the specified retries, the task will be marked as failed.\n\nThis simple retry mechanism can help to mitigate transient failures, such as network issues or temporary unavailability of external services, without requiring complex error handling logic in your task code."},"58":{"title":"How to use task-level retries","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"To enable retries for a task, simply add the `retries` property to the task object in your task definition:\n\n#### Python\n\n```python\n@simple_workflow.task(retries=3)\ndef always_fail(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    raise Exception(\"simple task failed\")\n```\n\n#### Typescript\n\n```typescript\nexport const retries = hatchet.task({\n  name: 'retries',\n  retries: 3,\n  fn: async (_, ctx) => {\n    throw new Error('intentional failure');\n  },\n});\n```\n\n#### Go\n\n```go\nretries := client.NewStandaloneTask(\"retries-task\", func(ctx hatchet.Context, input RetriesInput) (*RetriesResult, error) {\n\treturn nil, errors.New(\"intentional failure\")\n}, hatchet.WithRetries(3))\n```\n\n#### Ruby\n\n```ruby\nSIMPLE_RETRY_WORKFLOW.task(:always_fail, retries: 3) do |input, ctx|\n  raise \"simple task failed\"\nend\n```\n\nYou can add the `retries` property to any task, and Hatchet will handle the retry logic automatically.\n\nIt's important to note that task-level retries are not suitable for all types of failures.\nFor example, if a task fails due to a programming error or an invalid configuration, retrying the task will likely not resolve the issue.\nIn these cases, you should fix the underlying problem in your code or configuration rather than relying on retries. See [Bypassing retry logic](#bypassing-retry-logic).\n\nAdditionally, if a task interacts with external services or databases, you should ensure that the operation is idempotent (i.e. can be safely repeated without changing the result) before enabling retries. Otherwise, retrying the task could lead to unintended side effects or inconsistencies in your data."},"59":{"title":"Accessing the Retry Count in a Running Task","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"You can access the current retry count on the task's context object:\n\n#### Python\n\n```python\n@simple_workflow.task(retries=3)\ndef fail_twice(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    if ctx.retry_count < 2:\n        raise Exception(\"simple task failed\")\n\n    return {\"status\": \"success\"}\n```\n\n#### Typescript\n\n```typescript\nexport const retriesWithCount = hatchet.task({\n  name: 'retries-with-count',\n  retries: 3,\n  fn: async (_, ctx) => {\n    // > Get the current retry count\n    const retryCount = ctx.retryCount();\n\n    console.log(`Retry count: ${retryCount}`);\n\n    if (retryCount < 2) {\n      throw new Error('intentional failure');\n    }\n\n    return {\n      message: 'success',\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nretriesWithCount := client.NewStandaloneTask(\"fail-twice-task\", func(ctx hatchet.Context, input RetriesWithCountInput) (*RetriesWithCountResult, error) {\n\t// Get the current retry count\n\tretryCount := ctx.RetryCount()\n\n\tfmt.Printf(\"Retry count: %d\\n\", retryCount)\n\n\tif retryCount < 2 {\n\t\treturn nil, errors.New(\"intentional failure\")\n\t}\n\n\treturn &RetriesWithCountResult{\n\t\tMessage: \"success\",\n\t}, nil\n}, hatchet.WithRetries(3))\n```\n\n#### Ruby\n\n```ruby\nSIMPLE_RETRY_WORKFLOW.task(:fail_twice, retries: 3) do |input, ctx|\n  raise \"simple task failed\" if ctx.retry_count < 2\n\n  { \"status\" => \"success\" }\nend\n```"},"60":{"title":"Exponential Backoff","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"Hatchet also supports exponential backoff for retries, which can be useful for handling failures in a more resilient manner. Exponential backoff increases the delay between retries exponentially, giving the failing service more time to recover before the next retry.\n\n#### Python\n\n```python\n@backoff_workflow.task(\n    retries=10,\n    # 👀 Maximum number of seconds to wait between retries\n    backoff_max_seconds=10,\n    # 👀 Factor to increase the wait time between retries.\n    # This sequence will be 2s, 4s, 8s, 10s, 10s, 10s... due to the maxSeconds limit\n    backoff_factor=2.0,\n)\ndef backoff_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    if ctx.retry_count < 3:\n        raise Exception(\"backoff task failed\")\n\n    return {\"status\": \"success\"}\n```\n\n#### Typescript\n\n```typescript\nexport const withBackoff = hatchet.task({\n  name: 'with-backoff',\n  retries: 10,\n  backoff: {\n    // 👀 Maximum number of seconds to wait between retries\n    maxSeconds: 10,\n    // 👀 Factor to increase the wait time between retries.\n    // This sequence will be 2s, 4s, 8s, 10s, 10s, 10s... due to the maxSeconds limit\n    factor: 2,\n  },\n  fn: async () => {\n    throw new Error('intentional failure');\n  },\n});\n```\n\n#### Go\n\n```go\nwithBackoff := client.NewStandaloneTask(\"with-backoff-task\", func(ctx hatchet.Context, input BackoffInput) (*BackoffResult, error) {\n\treturn nil, errors.New(\"intentional failure\")\n}, hatchet.WithRetries(3), hatchet.WithRetryBackoff(2, 10))\n```\n\n#### Ruby\n\n```ruby\nBACKOFF_WORKFLOW.task(\n  :backoff_task,\n  retries: 10,\n  # Maximum number of seconds to wait between retries\n  backoff_max_seconds: 10,\n  # Factor to increase the wait time between retries.\n  # This sequence will be 2s, 4s, 8s, 10s, 10s, 10s... due to the maxSeconds limit\n  backoff_factor: 2.0\n) do |input, ctx|\n  raise \"backoff task failed\" if ctx.retry_count < 3\n\n  { \"status\" => \"success\" }\nend\n```"},"61":{"title":"Bypassing Retry logic","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"The Hatchet SDKs each expose a `NonRetryable` exception, which allows you to bypass pre-configured retry logic for the task. **If your task raises this exception, it will not be retried.** This allows you to circumvent the default retry behavior in instances where you don't want to or cannot safely retry. Some examples in which this might be useful include:\n\n1. A task that calls an external API which returns a 4XX response code.\n2. A task that contains a single non-idempotent operation that can fail but cannot safely be rerun on failure, such as a billing operation.\n3. A failure that requires manual intervention to resolve.\n\n#### Python\n\n```python\n@non_retryable_workflow.task(retries=1)\ndef should_not_retry(input: EmptyModel, ctx: Context) -> None:\n    raise NonRetryableException(\"This task should not retry\")\n```\n\n#### Typescript\n\n```typescript\nconst shouldNotRetry = nonRetryableWorkflow.task({\n  name: 'should-not-retry',\n  fn: () => {\n    throw new NonRetryableError('This task should not retry');\n  },\n  retries: 1,\n});\n```\n\n#### Go\n\n```go\nretries := client.NewStandaloneTask(\"non-retryable-task\", func(ctx hatchet.Context, input NonRetryableInput) (*NonRetryableResult, error) {\n\treturn nil, worker.NewNonRetryableError(errors.New(\"intentional failure\"))\n}, hatchet.WithRetries(3))\n```\n\n#### Ruby\n\n```ruby\nNON_RETRYABLE_WORKFLOW.task(:should_not_retry, retries: 1) do |input, ctx|\n  raise Hatchet::NonRetryableError, \"This task should not retry\"\nend\n\nNON_RETRYABLE_WORKFLOW.task(:should_retry_wrong_exception_type, retries: 1) do |input, ctx|\n  raise TypeError, \"This task should retry because it's not a NonRetryableError\"\nend\n\nNON_RETRYABLE_WORKFLOW.task(:should_not_retry_successful_task, retries: 1) do |input, ctx|\n  # no-op\nend\n```\n\nIn these cases, even though `retries` is set to a non-zero number (meaning the task would ordinarily retry), Hatchet will not retry."},"62":{"title":"Python SDK Client Retry Behavior","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"The retry behavior described above is for task execution inside Hatchet. The Python SDK also has separate retry behavior for certain client-side REST and gRPC calls made by the SDK itself.\n\nThese client retries are configured separately from task retries and do not control whether a task is retried after failing in a worker.\n\n> **Info:** Task retries and SDK client retries are separate mechanisms. Task retries\n>   control whether Hatchet retries a task after task failure. SDK client retries\n>   control whether the Python SDK retries certain API calls to Hatchet.\n\n### Default client retry behavior\n\nBy default, the Python SDK retries certain client calls with exponential backoff, with `max_attempts` defaulting to 5.\n\n**REST API calls**\n\nError Type, Retried by Default\n\nHTTP 5xx (server errors), Yes\nHTTP 404 (not found), Yes\nHTTP 429 (too many requests), No\nHTTP 400, 401, 403, 409, 422 (client errors), No\nTransport errors (timeout, connection, TLS, protocol), No\n\n**gRPC calls**\n\nStatus Code, Retried\n\n`UNAVAILABLE`, `DEADLINE_EXCEEDED`, `INTERNAL`, Yes\n`RESOURCE_EXHAUSTED`, `ABORTED`, `UNKNOWN`, Yes\n`UNIMPLEMENTED`, `NOT_FOUND`, `INVALID_ARGUMENT`, No\n`ALREADY_EXISTS`, `UNAUTHENTICATED`, `PERMISSION_DENIED`, No\n\n> **Info:** REST 404 responses are retried by default because some REST reads can observe\n>   replication lag between the core database and the OLAP database.\n\n### Configuring Python SDK client retries\n\nThe Python SDK exposes client retry configuration through `TenacityConfig`, either directly in `ClientConfig` or via environment variables.\n\n```python\nimport os\n\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.config import ClientConfig, HTTPMethod, TenacityConfig\n\nhatchet = Hatchet(\n    config=ClientConfig(\n        token=os.environ[\"HATCHET_CLIENT_TOKEN\"],\n        tenacity=TenacityConfig(\n            max_attempts=5,\n            retry_429=False,\n            retry_transport_errors=False,\n            retry_transport_methods=[HTTPMethod.GET, HTTPMethod.DELETE],\n        ),\n    )\n)\n```\n\nName, Type, Description, Default\n\n`max_attempts`, `int`, Maximum number of retry attempts. Set to 0 to disable retries., `5`\n`retry_429`, `bool`, Enable retries for HTTP 429 Too Many Requests responses., `False`\n`retry_transport_errors`, `bool`, Enable retries for REST transport-level errors (timeout, connection, TLS)., `False`\n`retry_transport_methods`, `list[HTTPMethod]`, HTTP methods to retry on transport errors when `retry_transport_errors` is enabled., `[GET, DELETE]`\n\nYou can also configure these via environment variables:\n\nEnvironment Variable, Description\n\n`HATCHET_CLIENT_TENACITY_MAX_ATTEMPTS`, Maximum retry attempts\n`HATCHET_CLIENT_TENACITY_RETRY_429`, Enable 429 retries (`true`/`false`)\n`HATCHET_CLIENT_TENACITY_RETRY_TRANSPORT_ERRORS`, Enable transport error retries (`true`/`false`)\n\n### Idempotency considerations\n\n> **Warning:** When `retry_transport_errors` is enabled, only idempotent HTTP methods (`GET`,\n>   `DELETE`) are retried by default. Non-idempotent methods (`POST`, `PUT`,\n>   `PATCH`) are excluded because retrying them after a transport error could\n>   result in duplicate operations if the original request succeeded but the\n>   response was lost.\n\nYou can add non-idempotent methods to `retry_transport_methods`, but only do so if:\n\n1. Your operations are idempotent (for example, because they use idempotency keys), or\n2. You understand and accept the risk of duplicate operations\n\n### Retry timing\n\nPython SDK client retries use exponential backoff with jitter. Fine-grained backoff timing is not currently configurable through `TenacityConfig`."},"63":{"title":"Conclusion","pageTitle":"Retry Policies","pageRoute":"hatchet://docs/v1/retry-policies","content":"Hatchet's task-level retry feature is a simple and effective way to handle transient failures in your tasks, improving the reliability and resilience of your tasks. By specifying the number of retries for each task, you can ensure that your tasks can recover from temporary issues without requiring complex error handling logic.\n\nRemember to use retries judiciously and only for tasks that are idempotent. For more advanced retry strategies, such as exponential backoff or circuit breaking, stay tuned for future updates to Hatchet's retry capabilities."},"64":{"title":"Timeouts","pageTitle":"Timeouts","pageRoute":"hatchet://docs/v1/timeouts","content":"# Timeouts in Hatchet\n\nTimeouts are an important concept in Hatchet that allow you to control how long a task is allowed to run before it is considered to have failed. This is useful for ensuring that your tasks don't run indefinitely and consume unnecessary resources. Timeouts in Hatchet are treated as failures and the task will be [retried](./retry-policies.mdx) if specified.\n\nThere are two types of timeouts in Hatchet:\n\n1. **Scheduling Timeouts** (Default 5m) - the time a task is allowed to wait in the queue before it is cancelled\n2. **Execution Timeouts** (Default 60s) - the time a task is allowed to run before it is considered to have failed"},"65":{"title":"Timeout Format","pageTitle":"Timeouts","pageRoute":"hatchet://docs/v1/timeouts","content":"In Hatchet, timeouts are specified using a string in the format `<number><unit>`, where `<number>` is an integer and `<unit>` is one of:\n\n- `s` for seconds\n- `m` for minutes\n- `h` for hours\n\nFor example:\n\n- `10s` means 10 seconds\n- `4m` means 4 minutes\n- `1h` means 1 hour\n\nIf no unit is specified, seconds are assumed.\n\n> **Info:** In the Python SDK, timeouts can also be specified as a `datetime.timedelta`\n>   object.\n\n### Task-Level Timeouts\n\nYou can specify execution and scheduling timeouts for a task using the `execution_timeout` and `schedule_timeout` parameters when creating a task.\n\n#### Ruby\n\n```python\n# 👀 Specify an execution timeout on a task\n@timeout_wf.task(\n    execution_timeout=timedelta(seconds=5), schedule_timeout=timedelta(minutes=10)\n)\ndef timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    time.sleep(30)\n    return {\"status\": \"success\"}\n```\n\n#### Tab 2\n\n```typescript\nexport const withTimeouts = hatchet.task({\n  name: 'with-timeouts',\n  // time the task can wait in the queue before it is cancelled\n  scheduleTimeout: '10s',\n  // time the task can run before it is cancelled\n  executionTimeout: '10s',\n  fn: async (input: SimpleInput, ctx) => {\n    // wait 15 seconds\n    await sleep(15000);\n\n    // get the abort controller\n    const { abortController } = ctx;\n\n    // if the abort controller is aborted, throw an error\n    if (abortController.signal.aborted) {\n      throw new Error('cancelled');\n    }\n\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n```\n\n#### Tab 3\n\n```go\n// Task that will timeout - sleeps for 10 seconds but has 3 second timeout\n_ = timeoutWorkflow.NewTask(\"timeout-task\",\n\tfunc(ctx hatchet.Context, input TimeoutInput) (TimeoutOutput, error) {\n\t\tlog.Printf(\"Starting task that will timeout. Message: %s\", input.Message)\n\n\t\t// Sleep for 10 seconds (will be interrupted by timeout)\n\t\ttime.Sleep(10 * time.Second)\n\n\t\t// This should not be reached due to timeout\n\t\tlog.Println(\"Task completed successfully (this shouldn't be reached)\")\n\t\treturn TimeoutOutput{\n\t\t\tStatus:    \"completed\",\n\t\t\tCompleted: true,\n\t\t}, nil\n\t},\n\thatchet.WithExecutionTimeout(3*time.Second), // 3 second timeout\n)\n```\n\n#### Tab 4\n\n```ruby\n# Specify an execution timeout on a task\nTIMEOUT_WF.task(:timeout_task, execution_timeout: 5, schedule_timeout: 600) do |input, ctx|\n  sleep 30\n  { \"status\" => \"success\" }\nend\n\nREFRESH_TIMEOUT_WF = HATCHET.workflow(name: \"RefreshTimeoutWorkflow\")\n```\n\nIn these tasks, both timeouts are specified, meaning:\n\n1. If the task is not scheduled before the `schedule_timeout` is reached, it will be cancelled.\n2. If the task does not complete before the `execution_timeout` is reached (after starting), it will be cancelled.\n\n> **Warning:** A timed out task does not guarantee that the task will be stopped immediately.\n>   The task will be stopped as soon as the worker is able to stop the task. See\n>   [cancellation](./cancellation.mdx) for more information."},"66":{"title":"Refreshing Timeouts","pageTitle":"Timeouts","pageRoute":"hatchet://docs/v1/timeouts","content":"In some cases, you may need to extend the timeout for a task while it is running. This can be done by using the task context.\n\nFor example:\n\n#### Ruby\n\n```python\n@refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4))\ndef refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    ctx.refresh_timeout(timedelta(seconds=10))\n    time.sleep(5)\n\n    return {\"status\": \"success\"}\n```\n\n#### Tab 2\n\n```typescript\nexport const refreshTimeout = hatchet.task({\n  name: 'refresh-timeout',\n  executionTimeout: '10s',\n  scheduleTimeout: '10s',\n  fn: async (input: SimpleInput, ctx) => {\n    // adds 15 seconds to the execution timeout\n    ctx.refreshTimeout('15s');\n    await sleep(15000);\n\n    // get the abort controller\n    const { abortController } = ctx;\n\n    // now this condition will not be met\n    // if the abort controller is aborted, throw an error\n    if (abortController.signal.aborted) {\n      throw new Error('cancelled');\n    }\n\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n```\n\n#### Tab 3\n\n```go\n// Create workflow with timeout refresh example\nrefreshTimeoutWorkflow := client.NewWorkflow(\"refresh-timeout-demo\",\n\thatchet.WithWorkflowDescription(\"Demonstrates timeout refresh functionality\"),\n\thatchet.WithWorkflowVersion(\"1.0.0\"),\n)\n\n// Task that refreshes its timeout to avoid timing out\n_ = refreshTimeoutWorkflow.NewTask(\"refresh-timeout-task\",\n\tfunc(ctx hatchet.Context, input TimeoutInput) (TimeoutOutput, error) {\n\t\tlog.Printf(\"Starting task with timeout refresh. Message: %s\", input.Message)\n\n\t\t// Refresh timeout by 10 seconds\n\t\tlog.Println(\"Refreshing timeout by 10 seconds...\")\n\t\terr := ctx.RefreshTimeout(\"10s\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to refresh timeout: %v\", err)\n\t\t\treturn TimeoutOutput{\n\t\t\t\tStatus:    \"failed\",\n\t\t\t\tCompleted: false,\n\t\t\t}, err\n\t\t}\n\n\t\t// Now sleep for 5 seconds (should complete successfully)\n\t\tlog.Println(\"Sleeping for 5 seconds...\")\n\t\ttime.Sleep(5 * time.Second)\n\n\t\tlog.Println(\"Task completed successfully after timeout refresh\")\n\t\treturn TimeoutOutput{\n\t\t\tStatus:    \"completed\",\n\t\t\tCompleted: true,\n\t\t}, nil\n\t},\n\thatchet.WithExecutionTimeout(3*time.Second), // Initial 3 second timeout\n)\n```\n\n#### Tab 4\n\n```ruby\nREFRESH_TIMEOUT_WF.task(:refresh_task, execution_timeout: 4) do |input, ctx|\n  ctx.refresh_timeout(10)\n  sleep 5\n\n  { \"status\" => \"success\" }\nend\n```\n\nIn this example, the task initially would exceed its execution timeout. But before it does, we call the `refreshTimeout` method, which extends the timeout and allows it to complete. Importantly, refreshing a timeout is an additive operation - the new timeout is added to the existing timeout. So for instance, if the task originally had a timeout of `30s` and we call `refreshTimeout(\"15s\")`, the new timeout will be `45s`.\n\nThe task timeout can be refreshed multiple times within a task to further extend the timeout as needed."},"67":{"title":"Cancellation","pageTitle":"Cancellation","pageRoute":"hatchet://docs/v1/cancellation","content":"# Cancellation in Hatchet Tasks\n\nHatchet provides a mechanism for canceling task executions gracefully, allowing you to signal to running tasks that they should stop running. Cancellation can be triggered on graceful termination of a worker or automatically through concurrency control strategies like [`CANCEL_IN_PROGRESS`](./concurrency.mdx#cancel_in_progress), which cancels currently running task instances to free up slots for new instances when the concurrency limit is reached.\n\nWhen a task is canceled, Hatchet sends a cancellation signal to the task. The task can then check for the cancellation signal and take appropriate action, such as cleaning up resources, aborting network requests, or gracefully terminating their execution."},"68":{"title":"Cancellation Mechanisms","pageTitle":"Cancellation","pageRoute":"hatchet://docs/v1/cancellation","content":"#### Python\n\n```python\n@cancellation_workflow.task()\ndef check_flag(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    for i in range(3):\n        time.sleep(1)\n\n        # Note: Checking the status of the exit flag is mostly useful for cancelling\n        # sync tasks without needing to forcibly kill the thread they're running on.\n        if ctx.exit_flag:\n            print(\"Task has been cancelled\")\n            raise ValueError(\"Task has been cancelled\")\n\n    return {\"error\": \"Task should have been cancelled\"}\n```\n```python\n@cancellation_workflow.task()\nasync def self_cancel(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    await asyncio.sleep(2)\n\n    ## Cancel the task\n    await ctx.aio_cancel()\n\n    await asyncio.sleep(10)\n\n    return {\"error\": \"Task should have been cancelled\"}\n```\n\n#### Typescript\n\n```typescript\nexport const cancellation = hatchet.task({\n  name: 'cancellation',\n  fn: async (_, ctx) => {\n    await sleep(10 * 1000);\n\n    if (ctx.cancelled) {\n      throw new Error('Task was cancelled');\n    }\n\n    return {\n      Completed: true,\n    };\n  },\n});\n```\n```typescript\nexport const abortSignal = hatchet.task({\n  name: 'abort-signal',\n  fn: async (_, { abortController }) => {\n    try {\n      const response = await axios.get('https://api.example.com/data', {\n        signal: abortController.signal,\n      });\n      // Handle the response\n    } catch (error) {\n      if (axios.isCancel(error)) {\n        // Request was canceled\n        console.log('Request canceled');\n      } else {\n        // Handle other errors\n      }\n    }\n  },\n});\n```\n\n#### Go\n\n```go\n// Add a long-running task that can be cancelled\n_ = workflow.NewTask(\"long-running-task\", func(ctx hatchet.Context, input CancellationInput) (CancellationOutput, error) {\n\tlog.Printf(\"Starting long-running task with message: %s\", input.Message)\n\n\t// Simulate long-running work with cancellation checking\n\tfor i := 0; i < 10; i++ {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Printf(\"Task cancelled after %d steps\", i)\n\t\t\treturn CancellationOutput{\n\t\t\t\tStatus:    \"cancelled\",\n\t\t\t\tCompleted: false,\n\t\t\t}, nil\n\t\tdefault:\n\t\t\tlog.Printf(\"Working... step %d/10\", i+1)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\t}\n\n\tlog.Println(\"Task completed successfully\")\n\treturn CancellationOutput{\n\t\tStatus:    \"completed\",\n\t\tCompleted: true,\n\t}, nil\n}, hatchet.WithExecutionTimeout(30*time.Second))\n```\n\n#### Ruby\n\n```ruby\nCANCELLATION_WORKFLOW.task(:check_flag) do |input, ctx|\n  3.times do\n    sleep 1\n\n    # Note: Checking the status of the exit flag is mostly useful for cancelling\n    # sync tasks without needing to forcibly kill the thread they're running on.\n    if ctx.cancelled?\n      puts \"Task has been cancelled\"\n      raise \"Task has been cancelled\"\n    end\n  end\n\n  { \"error\" => \"Task should have been cancelled\" }\nend\n```\n```ruby\nCANCELLATION_WORKFLOW.task(:self_cancel) do |input, ctx|\n  sleep 2\n\n  ## Cancel the task\n  ctx.cancel\n\n  sleep 10\n\n  { \"error\" => \"Task should have been cancelled\" }\nend\n```"},"69":{"title":"Cancellation Best Practices","pageTitle":"Cancellation","pageRoute":"hatchet://docs/v1/cancellation","content":"When working with cancellation in Hatchet tasks, consider the following best practices:\n\n1. **Graceful Termination**: When a task receives a cancellation signal, aim to terminate its execution gracefully. Clean up any resources, abort pending operations, and perform any necessary cleanup tasks before returning from the task function.\n\n2. **Cancellation Checks**: Regularly check for cancellation signals within long-running tasks or loops. This allows the task to respond to cancellation in a timely manner and avoid unnecessary processing.\n\n3. **Asynchronous Operations**: If a task performs asynchronous operations, such as network requests or file I/O, consider passing the cancellation signal to those operations. Many libraries and APIs support cancellation through the `AbortSignal` interface.\n\n4. **Error Handling**: Handle cancellation errors appropriately. Distinguish between cancellation errors and other types of errors to provide meaningful error messages and take appropriate actions.\n\n5. **Cancellation Propagation**: If a task invokes other functions or libraries, consider propagating the cancellation signal to those dependencies. This ensures that cancellation is handled consistently throughout the task."},"70":{"title":"Additional Features","pageTitle":"Cancellation","pageRoute":"hatchet://docs/v1/cancellation","content":"In addition to the methods of cancellation listed here, Hatchet also supports [bulk cancellation](./bulk-retries-and-cancellations.mdx), which allows you to cancel many tasks in bulk using either their IDs or a set of filters, which is often the easiest way to cancel many things at once."},"71":{"title":"Conclusion","pageTitle":"Cancellation","pageRoute":"hatchet://docs/v1/cancellation","content":"Cancellation is a powerful feature in Hatchet that allows you to gracefully stop task executions when needed. Remember to follow best practices when implementing cancellation in your tasks, such as graceful termination, regular cancellation checks, handling asynchronous operations, proper error handling, and cancellation propagation.\n\nBy incorporating cancellation into your Hatchet tasks and workflows, you can build more resilient and responsive systems that can adapt to changing circumstances and user needs."},"72":{"title":"Bulk Retries & Cancellations","pageTitle":"Bulk Retries & Cancellations","pageRoute":"hatchet://docs/v1/bulk-retries-and-cancellations","content":"# Bulk Cancellations and Replays\n\nV1 adds the ability to cancel or replay task runs in bulk, which you can now do either in the Hatchet Dashboard or programmatically via the SDKs and the REST API.\n\nThere are two ways of bulk cancelling or replaying tasks in both cases:\n\n1. You can provide a list of task run ids to cancel or replay, which will cancel or replay all of the tasks in the list.\n2. You can provide a list of filters, similar to the list of filters on task runs in the Dashboard, and cancel or replay runs matching those filters. For instance, if you wanted to replay all failed runs of a `SimpleTask` from the past fifteen minutes that had the `foo` field in `additional_metadata` set to `bar`, you could apply those filters and replay all of the matching runs.\n\n### Bulk Operations by Run Ids\n\nThe first way to bulk cancel or replay runs is by providing a list of run ids. This is the most straightforward way to cancel or replay runs in bulk.\n\n#### Python\n\n> **Info:** In the Python SDK, the mechanics of bulk replaying and bulk cancelling tasks\n>   are exactly the same. The only change would be replacing e.g.\n>   `hatchet.runs.bulk_cancel` with `hatchet.runs.bulk_replay`.\n\nFirst, we'll start by fetching a task via the REST API.\n\n```python\nfrom datetime import datetime, timedelta, timezone\n\nfrom hatchet_sdk import BulkCancelReplayOpts, Hatchet, RunFilter, V1TaskStatus\n\nhatchet = Hatchet()\n\nworkflows = hatchet.workflows.list()\n\nassert workflows.rows\n\nworkflow = workflows.rows[0]\n```\n\nNow that we have a task, we'll get runs for it, so that we can use them to bulk cancel by run id.\n\n```python\nworkflow_runs = hatchet.runs.list(workflow_ids=[workflow.metadata.id])\n```\n\nAnd finally, we can cancel the runs in bulk.\n\n```python\nworkflow_run_ids = [workflow_run.metadata.id for workflow_run in workflow_runs.rows]\n\nbulk_cancel_by_ids = BulkCancelReplayOpts(ids=workflow_run_ids)\n\nhatchet.runs.bulk_cancel(bulk_cancel_by_ids)\n```\n\n> **Info:** Note that the Python SDK also exposes async versions of each of these methods:\n>\n>       - `workflows.list` -> `await workflows.aio_list`\n>       - `runs.list` -> `await runs.aio_list`\n>       - `runs.bulk_cancel` -> `await runs.aio_bulk_cancel`\n\n#### Go\n\n> **Info:** Just like in the Python SDK, the mechanics of bulk replaying and bulk\n>   cancelling tasks are exactly the same.\n\nFirst, we'll start by fetching a task via the REST API.\n\n```python\nfrom datetime import datetime, timedelta, timezone\n\nfrom hatchet_sdk import BulkCancelReplayOpts, Hatchet, RunFilter, V1TaskStatus\n\nhatchet = Hatchet()\n\nworkflows = hatchet.workflows.list()\n\nassert workflows.rows\n\nworkflow = workflows.rows[0]\n```\n\nNow that we have a task, we'll get runs for it, so that we can use them to bulk cancel by run id.\n\n```python\nworkflow_runs = hatchet.runs.list(workflow_ids=[workflow.metadata.id])\n```\n\nAnd finally, we can cancel the runs in bulk.\n\n```python\nworkflow_run_ids = [workflow_run.metadata.id for workflow_run in workflow_runs.rows]\n\nbulk_cancel_by_ids = BulkCancelReplayOpts(ids=workflow_run_ids)\n\nhatchet.runs.bulk_cancel(bulk_cancel_by_ids)\n```\n\n#### Ruby\n\n```ruby\nhatchet = Hatchet::Client.new\n\nworkflows = hatchet.workflows.list\n\nworkflow = workflows.rows.first\n```\n```ruby\nworkflow_runs = hatchet.runs.list(workflow_ids: [workflow.metadata.id])\n```\n```ruby\nworkflow_run_ids = workflow_runs.rows.map { |run| run.metadata.id }\n\nhatchet.runs.bulk_cancel(ids: workflow_run_ids)\n```\n\n### Bulk Operations by Filters\n\nThe second way to bulk cancel or replay runs is by providing a list of filters. This is the most powerful way to cancel or replay runs in bulk, as it allows you to cancel or replay all runs matching a set of arbitrary filters without needing to provide IDs for the runs in advance.\n\n#### Python\n\nThe example below provides some filters you might use to cancel or replay runs in bulk. Importantly, these filters are very similar to the filters you can use in the Hatchet Dashboard to filter which task runs are displaying.\n\n```python\nbulk_cancel_by_filters = BulkCancelReplayOpts(\n    filters=RunFilter(\n        since=datetime.today() - timedelta(days=1),\n        until=datetime.now(tz=timezone.utc),\n        statuses=[V1TaskStatus.RUNNING],\n        workflow_ids=[workflow.metadata.id],\n        additional_metadata={\"key\": \"value\"},\n    )\n)\n\nhatchet.runs.bulk_cancel(bulk_cancel_by_filters)\n```\n\nRunning this request will cancel all task runs matching the filters provided.\n\n#### Go\n\nThe example below provides some filters you might use to cancel or replay runs in bulk. Importantly, these filters are very similar to the filters you can use in the Hatchet Dashboard to filter which task runs are displaying.\n\n```python\nbulk_cancel_by_filters = BulkCancelReplayOpts(\n    filters=RunFilter(\n        since=datetime.today() - timedelta(days=1),\n        until=datetime.now(tz=timezone.utc),\n        statuses=[V1TaskStatus.RUNNING],\n        workflow_ids=[workflow.metadata.id],\n        additional_metadata={\"key\": \"value\"},\n    )\n)\n\nhatchet.runs.bulk_cancel(bulk_cancel_by_filters)\n```\n\nRunning this request will cancel all task runs matching the filters provided.\n\n#### Ruby\n\n```ruby\nhatchet.runs.bulk_cancel(\n  since: Time.now - 86_400,\n  until_time: Time.now,\n  statuses: [\"RUNNING\"],\n  workflow_ids: [workflow.metadata.id],\n  additional_metadata: { \"key\" => \"value\" }\n)\n```\n\n# Manual Retries\n\nHatchet provides a manual retry mechanism that allows you to handle failed task instances flexibly from the Hatchet dashboard.\n\nNavigate to the specific task in the Hatchet dashboard and click on the failed run. From there, you can inspect the details of the run, including the input data and the failure reason for each task.\n\nTo retry a failed task, simply click on the task in the run details view and then click the \"Replay\" button. This will create a new instance of the task, starting from the failed task, and using the same input data as the original run.\n\nManual retries give you full control over when and how to reprocess failed instances. For example, you may choose to wait until an external service is back online before retrying instances that depend on that service, or you may need to deploy a bug fix to your task code before retrying instances that were affected by the bug."},"73":{"title":"A Note on Dead Letter Queues","pageTitle":"Bulk Retries & Cancellations","pageRoute":"hatchet://docs/v1/bulk-retries-and-cancellations","content":"A dead letter queue (DLQ) is a messaging concept used to handle messages that cannot be processed successfully. In the context of task management, a DLQ can be used to store failed task instances that require manual intervention or further analysis.\n\nWhile Hatchet does not have a built-in dead letter queue feature, the persistence of failed task instances in the dashboard serves a similar purpose. By keeping a record of failed instances, Hatchet allows you to track and manage failures, perform root cause analysis, and take appropriate actions, such as modifying input data or updating your task code before manually retrying the failed instances.\n\nIt's important to note that the term \"dead letter queue\" is more commonly associated with messaging systems like Apache Kafka or Amazon SQS, where unprocessed messages are automatically moved to a separate queue for manual handling. In Hatchet, the failed instances are not automatically moved to a separate queue but are instead persisted in the dashboard for manual management."},"74":{"title":"Concurrency","pageTitle":"Concurrency","pageRoute":"hatchet://docs/v1/concurrency","content":"# Concurrency Control in Hatchet Tasks\n\nHatchet provides powerful concurrency control features to help you manage the execution of your tasks. This is particularly useful when you have tasks that may be triggered frequently or have long-running steps, and you want to limit the number of concurrent executions to prevent overloading your system, ensure fairness, or avoid race conditions.\n\n> **Info:** Concurrency strategies can be added to both `Tasks` and `Workflows`.\n\n### Why use concurrency control?\n\nThere are several reasons why you might want to use concurrency control in your Hatchet tasks:\n\n1. **Fairness**: When you have multiple clients or users triggering tasks, concurrency control can help ensure fair access to resources. By limiting the number of concurrent runs per client or user, you can prevent a single client from monopolizing the system and ensure that all clients get a fair share of the available resources.\n\n2. **Resource management**: If your task steps are resource-intensive (e.g., they make external API calls or perform heavy computations), running too many instances concurrently can overload your system. By limiting concurrency, you can ensure your system remains stable and responsive.\n\n3. **Avoiding race conditions**: If your task steps modify shared resources, running multiple instances concurrently can lead to race conditions and inconsistent data. Concurrency control helps you avoid these issues by ensuring only a limited number of instances run at a time.\n\n4. **Compliance with external service limits**: If your task steps interact with external services that have rate limits, concurrency control can help you stay within those limits and avoid being throttled or blocked.\n\n5. **Spike Protection**: When you have tasks that are triggered by external events, such as webhooks or user actions, you may experience spikes in traffic that can overwhelm your system. Concurrency control can help you manage these spikes by limiting the number of concurrent runs and queuing new runs until resources become available.\n\n### Available Strategies:\n\n- [`GROUP_ROUND_ROBIN`](#group-round-robin): Distribute task instances across available slots in a round-robin fashion based on the `key` function.\n- [`CANCEL_IN_PROGRESS`](#cancel-in-progress): Cancel the currently running task instances for the same concurrency key to free up slots for the new instance.\n- [`CANCEL_NEWEST`](#cancel-newest): Cancel the newest task instance for the same concurrency key to free up slots for the new instance.\n\n> We're always open to adding more strategies to fit your needs. Join our [discord](https://hatchet.run/discord) to let us know.\n\n### Setting concurrency on workers\n\nIn addition to setting concurrency limits at the task level, you can also control concurrency at the worker level by passing the `slots` option when creating a new `Worker` instance:\n\n#### Python\n\n```python\nclass WorkflowInput(BaseModel):\n    group: str\n\n\nconcurrency_limit_rr_workflow = hatchet.workflow(\n    name=\"ConcurrencyDemoWorkflowRR\",\n    concurrency=ConcurrencyExpression(\n        expression=\"input.group\",\n        max_runs=1,\n        limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n    ),\n    input_validator=WorkflowInput,\n)\n```\n\n#### Typescript\n\n```typescript\nexport const simpleConcurrency = hatchet.workflow({\n  name: 'simple-concurrency',\n  concurrency: {\n    maxRuns: 1,\n    limitStrategy: ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n    expression: 'input.GroupKey',\n  },\n});\n```\n\n#### Go\n\n```go\nvar maxRuns int32 = 1\nstrategy := types.GroupRoundRobin\n\nreturn client.NewStandaloneTask(\"simple-concurrency\",\n\tfunc(ctx worker.HatchetContext, input ConcurrencyInput) (*TransformedOutput, error) {\n\t\t// Random sleep between 200ms and 1000ms\n\t\ttime.Sleep(time.Duration(200+rand.Intn(800)) * time.Millisecond)\n\n\t\treturn &TransformedOutput{\n\t\t\tTransformedMessage: input.Message,\n\t\t}, nil\n\t},\n\thatchet.WithWorkflowConcurrency(types.Concurrency{\n\t\tExpression:    \"input.GroupKey\",\n\t\tMaxRuns:       &maxRuns,\n\t\tLimitStrategy: &strategy,\n\t}),\n)\n```\n\n#### Ruby\n\n```ruby\nCONCURRENCY_LIMIT_RR_WORKFLOW = HATCHET.workflow(\n  name: \"ConcurrencyDemoWorkflowRR\",\n  concurrency: Hatchet::ConcurrencyExpression.new(\n    expression: \"input.group\",\n    max_runs: 1,\n    limit_strategy: :group_round_robin\n  )\n)\n\nCONCURRENCY_LIMIT_RR_WORKFLOW.task(:step1) do |input, ctx|\n  puts \"starting step1\"\n  sleep 2\n  puts \"finished step1\"\nend\n```\n\nThis example will only let 1 run in each group run at a given time to fairly distribute the load across the workers."},"75":{"title":"Group Round Robin","pageTitle":"Concurrency","pageRoute":"hatchet://docs/v1/concurrency","content":"### How it works\n\nWhen a new task instance is triggered, the `GROUP_ROUND_ROBIN` strategy will:\n\n1. Determine the group that the instance belongs to based on the `key` function defined in the task's concurrency configuration.\n2. Check if there are any available slots for the instance's group based on the `slots` limit of available workers.\n3. If a slot is available, the new task instance starts executing immediately.\n4. If no slots are available, the new task instance is added to a queue for its group.\n5. When a running task instance completes and a slot becomes available for a group, the next queued instance for that group (in round-robin order) is dequeued and starts executing.\n\nThis strategy ensures that task instances are processed fairly across different groups, preventing any one group from monopolizing the available resources. It also helps to reduce latency for instances within each group, as they are processed in a round-robin fashion rather than strictly in the order they were triggered.\n\n### When to use `GROUP_ROUND_ROBIN`\n\nThe `GROUP_ROUND_ROBIN` strategy is particularly useful in scenarios where:\n\n- You have multiple clients or users triggering task instances, and you want to ensure fair resource allocation among them.\n- You want to process instances within each group in a round-robin fashion to minimize latency and ensure that no single instance within a group is starved for resources.\n- You have long-running task instances and want to avoid one group's instances monopolizing the available slots.\n\nKeep in mind that the `GROUP_ROUND_ROBIN` strategy may not be suitable for all use cases, especially those that require strict ordering or prioritization of the most recent events."},"76":{"title":"Cancel In Progress","pageTitle":"Concurrency","pageRoute":"hatchet://docs/v1/concurrency","content":"### How it works\n\nWhen a new task instance is triggered, the `CANCEL_IN_PROGRESS` strategy will:\n\n1. Determine the group that the instance belongs to based on the `key` function defined in the task's concurrency configuration.\n2. Check if there are any available slots for the instance's group based on the `maxRuns` limit of available workers.\n3. If a slot is available, the new task instance starts executing immediately.\n4. If there are no available slots, currently running task instances for the same concurrency key are cancelled to free up slots for the new instance.\n5. The new task instance starts executing immediately.\n\n### When to use Cancel In Progress\n\nThe `CANCEL_IN_PROGRESS` strategy is particularly useful in scenarios where:\n\n- You have long-running task instances that may become stale or irrelevant if newer instances are triggered.\n- You want to prioritize processing the most recent data or events, even if it means canceling older task instances.\n- You have resource-intensive tasks where it's more efficient to cancel an in-progress instance and start a new one than to wait for the old instance to complete.\n- Your user UI allows for multiple inputs, but only the most recent is relevant (i.e. chat messages, form submissions, etc.)."},"77":{"title":"Cancel Newest","pageTitle":"Concurrency","pageRoute":"hatchet://docs/v1/concurrency","content":"### How it works\n\nThe `CANCEL_NEWEST` strategy is similar to `CANCEL_IN_PROGRESS`, but it cancels the newly enqueued run instead of the oldest.\n\n### When to use `CANCEL_NEWEST`\n\nThe `CANCEL_NEWEST` strategy is particularly useful in scenarios where:\n\n- You want to allow in progress runs to complete before starting new work.\n- You have long-running task instances and want to avoid one group's instances monopolizing the available slots."},"78":{"title":"Multiple concurrency strategies","pageTitle":"Concurrency","pageRoute":"hatchet://docs/v1/concurrency","content":"You can also combine multiple concurrency strategies to create a more complex concurrency control system. For example, you can use one group key to represent a specific team, and another group to represent a specific resource in that team, giving you more control over the rate at which tasks are executed.\n\n#### Python\n\n```python\nclass WorkflowInput(BaseModel):\n    name: str\n    digit: str\n\n\nconcurrency_workflow_level_workflow = hatchet.workflow(\n    name=\"ConcurrencyWorkflowLevel\",\n    input_validator=WorkflowInput,\n    concurrency=[\n        ConcurrencyExpression(\n            expression=\"input.digit\",\n            max_runs=DIGIT_MAX_RUNS,\n            limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n        ),\n        ConcurrencyExpression(\n            expression=\"input.name\",\n            max_runs=NAME_MAX_RUNS,\n            limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n        ),\n    ],\n)\n```\n\n#### Typescript\n\n```typescript\nexport const multipleConcurrencyKeys = hatchet.workflow({\n  name: 'simple-concurrency',\n  concurrency: [\n    {\n      maxRuns: 1,\n      limitStrategy: ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n      expression: 'input.Tier',\n    },\n    {\n      maxRuns: 1,\n      limitStrategy: ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n      expression: 'input.Account',\n    },\n  ],\n});\n```\n\n#### Go\n\n```go\nstrategy := types.GroupRoundRobin\nvar maxRuns int32 = 20\n\nreturn client.NewStandaloneTask(\"multi-concurrency\",\n\tfunc(ctx worker.HatchetContext, input ConcurrencyInput) (*TransformedOutput, error) {\n\t\t// Random sleep between 200ms and 1000ms\n\t\ttime.Sleep(time.Duration(200+rand.Intn(800)) * time.Millisecond)\n\n\t\treturn &TransformedOutput{\n\t\t\tTransformedMessage: input.Message,\n\t\t}, nil\n\t},\n\thatchet.WithWorkflowConcurrency(\n\t\ttypes.Concurrency{\n\t\t\tExpression:    \"input.Tier\",\n\t\t\tMaxRuns:       &maxRuns,\n\t\t\tLimitStrategy: &strategy,\n\t\t}, types.Concurrency{\n\t\t\tExpression:    \"input.Account\",\n\t\t\tMaxRuns:       &maxRuns,\n\t\t\tLimitStrategy: &strategy,\n\t\t},\n\t),\n)\n```\n\n#### Ruby\n\n```ruby\nCONCURRENCY_WORKFLOW_LEVEL_WORKFLOW = HATCHET.workflow(\n  name: \"ConcurrencyWorkflowLevel\",\n  concurrency: [\n    Hatchet::ConcurrencyExpression.new(\n      expression: \"input.digit\",\n      max_runs: DIGIT_MAX_RUNS_WL,\n      limit_strategy: :group_round_robin\n    ),\n    Hatchet::ConcurrencyExpression.new(\n      expression: \"input.name\",\n      max_runs: NAME_MAX_RUNS_WL,\n      limit_strategy: :group_round_robin\n    )\n  ]\n)\n\nCONCURRENCY_WORKFLOW_LEVEL_WORKFLOW.task(:task_1) do |input, ctx|\n  sleep SLEEP_TIME_WL\nend\n\nCONCURRENCY_WORKFLOW_LEVEL_WORKFLOW.task(:task_2) do |input, ctx|\n  sleep SLEEP_TIME_WL\nend\n```"},"79":{"title":"Rate Limits","pageTitle":"Rate Limits","pageRoute":"hatchet://docs/v1/rate-limits","content":"# Rate Limiting Step Runs in Hatchet\n\nHatchet allows you to enforce rate limits on task runs, enabling you to control the rate at which your service runs consume resources, such as external API calls, database queries, or other services. By defining rate limits, you can prevent task runs from exceeding a certain number of requests per time window (e.g., per second, minute, or hour), ensuring efficient resource utilization and avoiding overloading external services.\n\nThe state of active rate limits can be viewed in the dashboard in the `Rate Limit` resource tab."},"80":{"title":"Dynamic vs Static Rate Limits","pageTitle":"Rate Limits","pageRoute":"hatchet://docs/v1/rate-limits","content":"Hatchet offers two patterns for Rate Limiting task runs:\n\n1. [Dynamic Rate Limits](#dynamic-rate-limits): Allows for complex rate limiting scenarios, such as per-user limits, by using `input` or `additional_metadata` keys to upsert a limit at runtime.\n2. [Static Rate Limits](#static-rate-limits): Allows for simple rate limiting for resources known prior to runtime (e.g., external APIs)."},"81":{"title":"Dynamic Rate Limits","pageTitle":"Rate Limits","pageRoute":"hatchet://docs/v1/rate-limits","content":"Dynamic rate limits are ideal for complex scenarios where rate limits need to be partitioned by resources that are only known at runtime.\n\nThis pattern is especially useful for:\n\n1. Rate limiting individual users or tenants\n2. Implementing variable rate limits based on subscription tiers or user roles\n3. Dynamically adjusting limits based on real-time system load or other factors\n\n### How It Works\n\n1. Define the dynamic rate limit key with a CEL (Common Expression Language) Expression on the key, referencing either `input` or `additional_metadata`.\n2. Provide this key as part of the workflow trigger or event `input` or `additional_metadata` at runtime.\n3. Hatchet will create or update the rate limit based on the provided key and enforce it for the step run.\n\n> **Info:** Note: Dynamic keys are a shared resource, this means the same rendered cel on\n>   multiple steps will be treated as one global rate limit.\n\n### Declaring and Consuming Dynamic Rate Limits\n\n#### Ruby\n\n> Note: `dynamic_key` must be a CEL expression. `units` and `limits` can be either an integer or a CEL expression.\n\nWe can add one or more rate limits to a task by adding the `rate_limits` configuration to the task definition.\n\n```python\n@rate_limit_workflow.task(\n    rate_limits=[\n        RateLimit(\n            dynamic_key=\"input.user_id\",\n            units=1,\n            limit=10,\n            duration=RateLimitDuration.MINUTE,\n        )\n    ]\n)\ndef step_2(input: RateLimitInput, ctx: Context) -> None:\n    print(\"executed step_2\")\n```\n\n#### Tab 2\n\n> Note: `dynamicKey` must be a CEL expression. `units` and `limit` can be either an integer or a CEL expression.\n\nWe can add one or more rate limits to a task by adding the `rate_limits` configuration to the task definition.\n\n```typescript\nconst task2 = hatchet.task({\n  name: 'task2',\n  fn: (input: { userId: string }) => {\n    console.log('executed task2 for user: ', input.userId);\n  },\n  rateLimits: [\n    {\n      dynamicKey: 'input.userId',\n      units: 1,\n      limit: 10,\n      duration: RateLimitDuration.MINUTE,\n    },\n  ],\n});\n```\n\n#### Tab 3\n\n> Note: Go requires both a key and KeyExpr be set and the LimitValueExpr must be a CEL.\n\n```go\nuserUnits := 1\nuserLimit := \"10\"\nduration := types.Minute\ndynamicTask := client.NewStandaloneTask(\"task2\",\n\tfunc(ctx hatchet.Context, input APIRequest) (string, error) {\n\t\tlog.Printf(\"executed task2 for user: %s\", input.UserID)\n\n\t\treturn \"completed\", nil\n\t},\n\thatchet.WithRateLimits(&types.RateLimit{\n\t\tKey:            \"input.userId\",\n\t\tUnits:          &userUnits,\n\t\tLimitValueExpr: &userLimit,\n\t\tDuration:       &duration,\n\t}),\n)\n```\n\n#### Tab 4\n\n```ruby\nRATE_LIMIT_WORKFLOW.task(\n  :step_2,\n  rate_limits: [\n    Hatchet::RateLimit.new(\n      dynamic_key: \"input.user_id\",\n      units: 1,\n      limit: 10,\n      duration: :minute\n    )\n  ]\n) do |input, ctx|\n  puts \"executed step_2\"\nend\n```"},"82":{"title":"Static Rate Limits","pageTitle":"Rate Limits","pageRoute":"hatchet://docs/v1/rate-limits","content":"Static Rate Limits (formerly known as Global Rate Limits) are defined as part of your worker startup lifecycle prior to runtime. This model provides a single \"source of truth\" for pre-defined resources such as:\n\n1. External API resources that have a rate limit across all users or tenants\n2. Database connection pools with a maximum number of concurrent connections\n3. Shared computing resources with limited capacity\n\n### How It Works\n\n1. Declare static rate limits using the `put_rate_limit` method in the `Admin` client before starting your worker.\n2. Specify the units of consumption for a specific rate limit key in each step definition using the `rate_limits` configuration.\n3. Hatchet enforces the defined rate limits by tracking the number of units consumed by each step run across all workflow runs.\n\nIf a step run exceeds the rate limit, Hatchet re-queues the step run until the rate limit is no longer exceeded.\n\n### Declaring Static Limits\n\nDefine the static rate limits that can be consumed by any step run across all workflow runs using the `put_rate_limit` method in the `Admin` client within your code.\n\n#### Ruby\n\n```python\nRATE_LIMIT_KEY = \"test-limit\"\n\nhatchet.rate_limits.put(RATE_LIMIT_KEY, 2, RateLimitDuration.SECOND)\n```\n\n#### Tab 2\n\n{\" \"}\n\n```typescript\nhatchet.ratelimits.upsert({\n  key: 'api-service-rate-limit',\n  limit: 10,\n  duration: RateLimitDuration.SECOND,\n});\n```\n\n#### Tab 3\n\n```go\nerr = client.RateLimits().Upsert(features.CreateRatelimitOpts{\n\tKey:      RATE_LIMIT_KEY,\n\tLimit:    10,\n\tDuration: types.Second,\n})\nif err != nil {\n\tlog.Fatalf(\"failed to create rate limit: %v\", err)\n}\n```\n\n#### Tab 4\n\n```ruby\ndef main\n  HATCHET.rate_limits.put(RATE_LIMIT_KEY, 2, :second)\n\n  worker = HATCHET.worker(\n    \"rate-limit-worker\", slots: 10, workflows: [RATE_LIMIT_WORKFLOW]\n  )\n  worker.start\nend\n```\n\n### Consuming Static Rate Limits\n\nWith your rate limit key defined, specify the units of consumption for a specific key in each step definition by adding the `rate_limits` configuration to your step definition in your workflow.\n\n#### Ruby\n\n```python\nRATE_LIMIT_KEY = \"test-limit\"\n\n\n@rate_limit_workflow.task(rate_limits=[RateLimit(static_key=RATE_LIMIT_KEY, units=1)])\ndef step_1(input: RateLimitInput, ctx: Context) -> None:\n    print(\"executed step_1\")\n```\n\n#### Tab 2\n\n```typescript\nconst RATE_LIMIT_KEY = 'api-service-rate-limit';\n\nconst task1 = hatchet.task({\n  name: 'task1',\n  rateLimits: [\n    {\n      staticKey: RATE_LIMIT_KEY,\n      units: 1,\n    },\n  ],\n  fn: (input) => {\n    console.log('executed task1');\n  },\n});\n```\n\n#### Tab 3\n\n```go\nunits := 1\nstaticTask := client.NewStandaloneTask(\"task1\",\n\tfunc(ctx hatchet.Context, input APIRequest) (string, error) {\n\t\tlog.Println(\"executed task1\")\n\n\t\treturn \"completed\", nil\n\t},\n\thatchet.WithRateLimits(&types.RateLimit{\n\t\tKey:   RATE_LIMIT_KEY,\n\t\tUnits: &units,\n\t}),\n)\n```\n\n#### Tab 4\n\n```ruby\nRATE_LIMIT_KEY = \"test-limit\"\n\nRATE_LIMIT_WORKFLOW.task(\n  :step_1,\n  rate_limits: [Hatchet::RateLimit.new(static_key: RATE_LIMIT_KEY, units: 1)]\n) do |input, ctx|\n  puts \"executed step_1\"\nend\n```\n\n### Limiting Workflow Runs\n\nTo rate limit an entire workflow run, it's recommended to specify the rate limit configuration on the entry step (i.e., the first step in the workflow). This will gate the execution of all downstream steps in the workflow."},"83":{"title":"Priority","pageTitle":"Priority","pageRoute":"hatchet://docs/v1/priority","content":"# Assigning priority to tasks in Hatchet\n\nHatchet allows you to assign different `priority` values to your tasks depending on how soon you want them to run. `priority` can be set to either `1`, `2`, or `3`, (`low`, `medium`, and `high`, respectively) with relatively higher values resulting in that task being picked up before others of the same type. **By default, runs in Hatchet have a priority of 1 (low) unless otherwise specified.**\n\n\nPriority only affects multiple runs of a _single_ workflow. If you have two different workflows (A and B) and set A to globally have a priority of 3, and B to globally have a priority of 1, this does _not_ guarantee that if there is one task from A and one from B in the queue, that A's task will be run first.\n\nHowever, _within_ A, if you enqueue one task with priority 3 and one with priority 1, the priority 3 task will be run first.\n\n\nA couple of common use cases for assigning priorities are things like:\n\n1. Having high-priority (e.g. paying, new, etc.) customers be prioritized over lower-priority ones, allowing them to get faster turnaround times on their tasks.\n2. Having tasks triggered via your API run with higher priority than the same tasks triggered by a cron."},"84":{"title":"Setting priority for a task or workflow","pageTitle":"Priority","pageRoute":"hatchet://docs/v1/priority","content":"There are a few different ways to set priorities for tasks or workflows in Hatchet.\n\n### Workflow-level default priority\n\nFirst, you can set a default priority at the workflow level:\n\n#### Ruby\n\n```python\nDEFAULT_PRIORITY = Priority.LOW\nSLEEP_TIME = 0.25\n\npriority_workflow = hatchet.workflow(\n    name=\"PriorityWorkflow\",\n    default_priority=DEFAULT_PRIORITY,\n)\n```\n\n#### Tab 2\n\n```typescript\nexport const priorityWf = hatchet.workflow({\n  name: 'priority-wf',\n  defaultPriority: Priority.LOW,\n});\n```\n\n#### Tab 3\n\n```go\nworkflow := client.NewWorkflow(\n\t\"priority\",\n\thatchet.WithWorkflowDefaultPriority(features.RunPriorityLow),\n)\n```\n\n#### Tab 4\n\n```ruby\nDEFAULT_PRIORITY = 1\nSLEEP_TIME = 0.25\n\nPRIORITY_WORKFLOW = HATCHET.workflow(\n  name: \"PriorityWorkflow\",\n  default_priority: DEFAULT_PRIORITY\n)\n\nPRIORITY_WORKFLOW.task(:priority_task) do |input, ctx|\n  puts \"Priority: #{ctx.priority}\"\n  sleep SLEEP_TIME\nend\n```\n\nThis will assign the same default priority to all runs of this workflow (and all of the workflow's corresponding tasks), but will have no effect without also setting run-level priorities, since every run will use the same default.\n\n### Priority-on-trigger\n\nWhen you trigger a run, you can set the priority of the triggered run to override its default priority.\n\n#### Ruby\n\n```python\nlow_prio = priority_workflow.run(\n    ## 👀 Adding priority and key to metadata to show them in the dashboard\n    priority=Priority.LOW,\n    additional_metadata={\"priority\": \"low\", \"key\": 1},\n    wait_for_result=False,\n)\n\nhigh_prio = priority_workflow.run(\n    ## 👀 Adding priority and key to metadata to show them in the dashboard\n    priority=Priority.HIGH,\n    additional_metadata={\"priority\": \"high\", \"key\": 1},\n    wait_for_result=False,\n)\n```\n\n#### Tab 2\n\n```typescript\nconst run = priority.run(new Date(Date.now() + 60 * 60 * 1000), { priority: Priority.HIGH });\n```\n\n#### Tab 3\n\n```go\nref, err := client.RunNoWait(\n\tcontext.Background(),\n\tworkflow.GetName(),\n\tPriorityInput{},\n\thatchet.WithRunPriority(features.RunPriorityLow),\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Tab 4\n\n```ruby\nlow_prio = PRIORITY_WORKFLOW.run_no_wait(\n  {},\n  options: Hatchet::TriggerWorkflowOptions.new(\n    priority: 1,\n    additional_metadata: { \"priority\" => \"low\", \"key\" => 1 }\n  )\n)\n\nhigh_prio = PRIORITY_WORKFLOW.run_no_wait(\n  {},\n  options: Hatchet::TriggerWorkflowOptions.new(\n    priority: 3,\n    additional_metadata: { \"priority\" => \"high\", \"key\" => 1 }\n  )\n)\n```\n\nSimilarly, you can also assign a priority to scheduled and cron workflows.\n\n#### Ruby\n\n```python\nschedule = priority_workflow.schedule(\n    run_at=datetime.now(tz=timezone.utc) + timedelta(minutes=1),\n    priority=Priority.HIGH,\n)\n\ncron = priority_workflow.create_cron(\n    cron_name=\"my-scheduled-cron\",\n    expression=\"0 * * * *\",\n    priority=Priority.HIGH,\n)\n```\n\n#### Tab 2\n\n```typescript\nconst scheduled = priority.schedule(\n  new Date(Date.now() + 60 * 60 * 1000),\n  {},\n  { priority: Priority.HIGH }\n);\nconst delayed = priority.delay(60 * 60 * 1000, {}, { priority: Priority.HIGH });\nconst cron = priority.cron(\n  `daily-cron-${Math.random()}`,\n  '0 0 * * *',\n  {},\n  { priority: Priority.HIGH }\n);\n```\n\n#### Tab 3\n\n```go\npriority := features.RunPriorityHigh\n\nschedule, err := client.Schedules().Create(\n\tcontext.Background(),\n\tworkflow.GetName(),\n\tfeatures.CreateScheduledRunTrigger{\n\t\tPriority: &priority,\n\t},\n)\nif err != nil {\n\treturn err\n}\n\ncron, err := client.Crons().Create(\n\tcontext.Background(),\n\tworkflow.GetName(),\n\tfeatures.CreateCronTrigger{\n\t\tPriority: &priority,\n\t},\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Tab 4\n\n```ruby\nschedule = PRIORITY_WORKFLOW.schedule(\n  Time.now + 60,\n  options: Hatchet::TriggerWorkflowOptions.new(priority: 3)\n)\n\ncron = PRIORITY_WORKFLOW.create_cron(\n  \"my-scheduled-cron\",\n  \"0 * * * *\",\n  input: {},\n)\n```\n\nIn these cases, the priority set on the trigger will override the default priority, so these runs will be processed ahead of lower-priority ones."},"85":{"title":"Durable Execution","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/patterns/durable-task-execution","content":"# Durable Tasks\n\nUse durable tasks when **you don't know the shape of work ahead of time**. For example, an AI agent that picks its next action based on a model response, a fan-out where N is determined by the input data, or a pipeline that branches and spawns sub-workflows based on intermediate results. In all of these cases, the \"graph\" of work doesn't exist when the task starts; it emerges at runtime as the task makes decisions and [spawns children](/v1/child-spawning).\n\nA durable task is a single long-running function that acts as an **orchestrator**: it spawns child tasks, waits for their results, makes decisions, and spawns more. Hatchet checkpoints its progress so it can recover from crashes, survive long waits, and resume on any worker without re-executing completed work.\n\n> **Info:** If you know the full graph of work upfront (every task and dependency is fixed\n>   before execution begins), use a [DAG](/v1/patterns/directed-acyclic-graphs)\n>   instead. You can always [mix both patterns](/v1/patterns/mixing-patterns) in\n>   the same application."},"86":{"title":"When to Use Durable Tasks","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/patterns/durable-task-execution","content":"Scenario, Why Durable?\n\n**Dynamic fan-out** (N unknown), Spawn children based on runtime data; wait for results without holding a slot.\n**Agentic workflows**, An agent decides what to do next, spawns subtasks, loops, or stops at runtime.\n**Long waits** (hours/days), Worker slots are freed during waits; no wasted compute.\n**Human-in-the-loop**, Wait for approval events without holding resources\n**Multi-step with inline pauses**, `SleepFor` and `WaitForEvent` let you express complex procedural flows.\n**Crash-resilient pipelines**, Automatically resume from checkpoints after failures."},"87":{"title":"How It Works","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/patterns/durable-task-execution","content":"A durable task builds the workflow at runtime through **child spawning**. The task function runs, inspects data, and decides what to do next by spawning child tasks. The parent is [evicted](/v1/task-eviction) while children execute, freeing its worker slot. When children complete, the parent resumes from its checkpoint and continues.\n\n```mermaid\nsequenceDiagram\n    participant P as Durable Task\n    participant H as Hatchet\n    participant W as Workers\n\n    P->>H: Spawn Child A, Child B, Child C...N\n    H-->>P: Evicted (slot freed)\n    H->>W: Schedule children across fleet\n    W->>H: Child results\n    H->>P: Resume from checkpoint\n    P->>P: Inspect results, decide next step\n    P->>H: Spawn more children, sleep, or finish\n```\n\nThis is fundamentally different from a DAG, where every task and dependency is declared before execution begins. With durable tasks, the number of children, which branches to take, and whether to loop or stop are all determined by your code at runtime.\n\n\n### Checkpoints\n\nEach call to `SleepFor`, `WaitForEvent`, `WaitFor`, `Memo`, or `RunChild` creates a checkpoint in the durable event log. These checkpoints record the task's progress.\n\n### Worker slot is freed during waits\n\nWhen a durable task enters a wait (sleep, event, or child result), Hatchet [evicts](/v1/task-eviction) it from the worker. The slot is immediately available for other tasks.\n\n### Task resumes from checkpoint\n\nWhen the wait completes, Hatchet re-queues the task on any available worker. It replays the event log up to the last checkpoint and resumes execution from there. Completed operations are not re-executed."},"88":{"title":"The Durable Context","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/patterns/durable-task-execution","content":"Declare a task as durable (using `durable_task` instead of `task`) and it receives a `DurableContext` instead of a normal `Context`. The `DurableContext` extends `Context` with methods for checkpointing and waiting:\n\nMethod, Purpose\n\n**`SleepFor(duration)`**, Pause for a fixed duration. Respects the original sleep time on restart; if interrupted after 23 of 24 hours, only sleeps 1 more hour.\n**`WaitForEvent(key, expr)`**, Wait for an external event by key, with optional [CEL filter](https://github.com/google/cel-spec) expression on the payload.\n**`WaitFor(conditions)`**, General-purpose wait accepting any combination of sleep conditions, event conditions, or or-groups. `SleepFor` and `WaitForEvent` are convenience wrappers around this method.\n**`Memo(function)`**, Run functions whose outputs are memoized based on the input arguments.\n**`RunChild(task, input)`**, Spawn a child task and wait for its result. The parent is evicted during the wait."},"89":{"title":"Example Task","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/patterns/durable-task-execution","content":"```python\ndurable_workflow = hatchet.workflow(name=\"DurableWorkflow\")\n```\n\nNow add tasks to the workflow. The first is a regular task; the second is a durable task that sleeps and waits for an event:\n\n```python\nEVENT_KEY = \"durable-example:event\"\nSLEEP_TIME = 5\nREPLAY_RESET_SLEEP_TIME = 3\n\n\n@durable_workflow.task()\nasync def ephemeral_task(input: EmptyModel, ctx: Context) -> None:\n    print(\"Running non-durable task\")\n\n\nclass AwaitedEvent(BaseModel):\n    id: str\n\n\n@durable_workflow.durable_task()\nasync def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str | int]:\n    print(\"Waiting for sleep\")\n    sleep = await ctx.aio_sleep_for(duration=timedelta(seconds=SLEEP_TIME))\n    print(\"Sleep finished\")\n\n    print(\"Waiting for event\")\n    event = await ctx.aio_wait_for_event(\n        EVENT_KEY, \"true\", payload_validator=AwaitedEvent\n    )\n    print(\"Event received\")\n\n    return {\n        \"status\": \"success\",\n        \"event_id\": event.id,\n        \"sleep_duration_seconds\": sleep.duration.seconds,\n    }\n```\n\n> **Info:** The `durable_task` decorator gives the function a `DurableContext` instead of\n>   a regular `Context`. This is the only difference in declaration; the task\n>   registers and runs on the same worker as regular tasks.\n\nIf this task is interrupted at any time, it will continue from where it left off. If the task calls `ctx.aio_sleep_for` for 24 hours and is interrupted after 23 hours, it will only sleep for 1 more hour on restart.\n\n### Or Groups\n\nDurable tasks can combine multiple wait conditions using [or groups](/v1/conditions#or-groups). For example, you could wait for either an event or a sleep (whichever comes first):\n\n```python\n@durable_workflow.durable_task()\nasync def wait_for_or_group_1(\n    _i: EmptyModel, ctx: DurableContext\n) -> dict[str, str | int | float]:\n    start = time.time()\n    wait_result = await ctx.aio_wait_for(\n        uuid4().hex,\n        or_(\n            SleepCondition(timedelta(seconds=SLEEP_TIME)),\n            UserEventCondition(event_key=EVENT_KEY),\n        ),\n    )\n\n    key = list(wait_result.keys())[0]\n    event_id = list(wait_result[key].keys())[0]\n\n    return {\n        \"runtime\": time.time() - start,\n        \"key\": key,\n        \"event_id\": event_id,\n    }\n```"},"90":{"title":"Spawning Child Tasks","pageTitle":"Durable Execution","pageRoute":"hatchet://docs/v1/patterns/durable-task-execution","content":"Child spawning is the primary way durable tasks build workflows at runtime. A durable task can spawn any runnable (regular tasks, other durable tasks, or entire DAG workflows), wait for results, and decide what to do next.\n\nChild type, Example\n\n**Regular task**, Spawn a stateless task for a quick computation or API call.\n**Durable task**, Spawn another durable task that has its own checkpoints, sleeps, and event waits.\n**DAG workflow**, Spawn an entire multi-task workflow and wait for its final output.\n\nThe parent is evicted while children execute, so it consumes no resources. The number and type of children can be determined dynamically based on input, intermediate results, or model outputs.\n\nSee [Child Spawning](/v1/child-spawning) for patterns and full examples.\n\n> **Info:** For an in-depth look at how durable execution works internally, see [this blog\n>   post](https://hatchet.run/blog/durable-execution)."},"91":{"title":"DAGs","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"# Declarative Workflow Design (DAGs)\n\nHatchet workflows are designed in a **Directed Acyclic Graph (DAG)** format, where each task is a node in the graph, and the dependencies between tasks are the edges. This structure ensures that workflows are organized, predictable, and free from circular dependencies."},"92":{"title":"How DAG Workflows Work","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"### You declare the graph\n\nDefine tasks and their dependencies upfront. Hatchet knows the full shape of work before execution begins.\n\n### Hatchet executes in order\n\nTasks run as soon as their parents complete. Independent tasks run in parallel automatically. A worker slot is only assigned when a task is ready to execute, so tasks waiting on parents consume no resources. Each task has configurable [retry policies](/v1/retry-policies) and [timeouts](/v1/timeouts).\n\n### Results flow downstream\n\nTask outputs are cached and passed to child tasks. If a failure occurs mid-workflow, completed tasks don't re-run.\n\n### Everything is observable\n\nEvery task execution is tracked in the dashboard — inputs, outputs, durations, and errors. You can see exactly where a workflow succeeded or failed."},"93":{"title":"Defining a Workflow","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"Start by declaring a workflow with a name. The workflow object can declare additional workflow-level configuration options which we'll cover later.\n\nThe returned object is an instance of the `Workflow` class, which is the primary interface for interacting with the workflow (i.e. [running](/v1/running-your-task#run-and-wait), [enqueuing](/v1/running-your-task#fire-and-forget), [scheduling](/v1/scheduled-runs), etc).\n\n#### Python\n\n```python\ndag_workflow = hatchet.workflow(name=\"DAGWorkflow\")\n```\n\n#### Typescript\n\n```typescript\n// First, we declare the workflow\nexport const dag = hatchet.workflow({\n  name: 'simple',\n});\n```\n\n#### Go\n\n```go\nworkflow := client.NewWorkflow(\"dag-workflow\")\n```\n\n#### Ruby\n\n```ruby\nDAG_WORKFLOW = HATCHET.workflow(name: \"DAGWorkflow\")\n```\n\n\n  The Workflow return object can be interacted with in the same way as a\n  [task](/v1/tasks), however, it can only take a subset of options which are\n  applied at the task level."},"94":{"title":"Defining a Task","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"Now that we have a workflow, we can define a task to be executed as part of the workflow. Tasks are defined by calling the `task` method on the workflow object.\n\nThe `task` method takes a name and a function that defines the task's behavior. The function will receive the workflow's input and return the task's output. Tasks also accept a number of other configuration options, which are covered elsewhere in our documentation.\n\n#### Python\n\nIn Python, the `task` method is a decorator, which is used like this to wrap a function:\n\n```python\n@dag_workflow.task(execution_timeout=timedelta(seconds=5))\ndef step1(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\nThe function takes two arguments: `input`, which is a Pydantic model, and `ctx`, which is the Hatchet `Context` object. We'll discuss both of these more later.\n\n> **Info:** In the internals of Hatchet, the task is called using _positional arguments_, meaning that you can name `input` and `ctx` whatever you like.\n>\n> For instance, `def task_1(foo: EmptyModel, bar: Context) -> None:` is perfectly valid.\n\n#### Typescript\n\n```typescript\n// Next, we declare the tasks bound to the workflow\nconst toLower = dag.task({\n  name: 'to-lower',\n  fn: (input) => {\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n```\n\nThe `fn` argument is a function that takes the workflow's input and a\ncontext object. The context object contains information about the workflow\nrun (e.g. the run ID, the workflow's input, etc). It can be synchronous or\nasynchronous.\n\n#### Go\n\n```go\nstep1 := workflow.NewTask(\"step-1\", func(ctx hatchet.Context, input Input) (StepOutput, error) {\n\treturn StepOutput{\n\t\tStep:   1,\n\t\tResult: input.Value * 2,\n\t}, nil\n})\n```\n\n#### Ruby\n\n```ruby\nSTEP1 = DAG_WORKFLOW.task(:step1, execution_timeout: 5) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n\nSTEP2 = DAG_WORKFLOW.task(:step2, execution_timeout: 5) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```"},"95":{"title":"Building a DAG with Task Dependencies","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"The power of Hatchet's workflow design comes from connecting tasks into a DAG structure. Tasks can specify dependencies (parents) which must complete successfully before the task can start.\n\n#### Python\n\n```python\n@dag_workflow.task(execution_timeout=timedelta(seconds=5))\nasync def step2(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n\n\n@dag_workflow.task(parents=[step1, step2])\nasync def step3(input: EmptyModel, ctx: Context) -> RandomSum:\n    one = ctx.task_output(step1).random_number\n    two = ctx.task_output(step2).random_number\n\n    return RandomSum(sum=one + two)\n```\n\n#### Typescript\n\n```typescript\ndag.task({\n  name: 'reverse',\n  parents: [toLower],\n  fn: async (input, ctx) => {\n    const lower = await ctx.parentOutput(toLower);\n    return {\n      Original: input.Message,\n      Transformed: lower.TransformedMessage.split('').reverse().join(''),\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nstep2 := workflow.NewTask(\"step-2\", func(ctx hatchet.Context, input Input) (StepOutput, error) {\n\t// Get output from step 1\n\tvar step1Output StepOutput\n\tif err := ctx.ParentOutput(step1, &step1Output); err != nil {\n\t\treturn StepOutput{}, err\n\t}\n\n\treturn StepOutput{\n\t\tStep:   2,\n\t\tResult: step1Output.Result + 10,\n\t}, nil\n}, hatchet.WithParents(step1))\n```\n\n#### Ruby\n\n```ruby\nDAG_WORKFLOW.task(:step3, parents: [STEP1, STEP2]) do |input, ctx|\n  one = ctx.task_output(STEP1)[\"random_number\"]\n  two = ctx.task_output(STEP2)[\"random_number\"]\n\n  { \"sum\" => one + two }\nend\n\nDAG_WORKFLOW.task(:step4, parents: [STEP1, :step3]) do |input, ctx|\n  puts(\n    \"executed step4\",\n    Time.now.strftime(\"%H:%M:%S\"),\n    input.inspect,\n    ctx.task_output(STEP1).inspect,\n    ctx.task_output(:step3).inspect\n  )\n\n  { \"step4\" => \"step4\" }\nend\n```"},"96":{"title":"Accessing Parent Task Outputs","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"As shown in the examples above, tasks can access outputs from their parent tasks using the context object:\n\n#### Python\n\n```python\n@dag_workflow.task(execution_timeout=timedelta(seconds=5))\nasync def step2(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n\n\n@dag_workflow.task(parents=[step1, step2])\nasync def step3(input: EmptyModel, ctx: Context) -> RandomSum:\n    one = ctx.task_output(step1).random_number\n    two = ctx.task_output(step2).random_number\n\n    return RandomSum(sum=one + two)\n```\n\n#### Typescript\n\n```typescript\ndag.task({\n  name: 'task-with-parent-output',\n  parents: [toLower],\n  fn: async (input, ctx) => {\n    const lower = await ctx.parentOutput(toLower);\n    return {\n      Original: input.Message,\n      Transformed: lower.TransformedMessage.split('').reverse().join(''),\n    };\n  },\n});\n```\n\n#### Go\n\n```go\n// Inside a task with parent dependencies\nvar parentOutput ParentOutputType\nerr := ctx.ParentOutput(parentTask, &parentOutput)\nif err != nil {\n    return nil, err\n}\n```\n\n#### Ruby\n\n```ruby\nDAG_WORKFLOW.task(:step3, parents: [STEP1, STEP2]) do |input, ctx|\n  one = ctx.task_output(STEP1)[\"random_number\"]\n  two = ctx.task_output(STEP2)[\"random_number\"]\n\n  { \"sum\" => one + two }\nend\n\nDAG_WORKFLOW.task(:step4, parents: [STEP1, :step3]) do |input, ctx|\n  puts(\n    \"executed step4\",\n    Time.now.strftime(\"%H:%M:%S\"),\n    input.inspect,\n    ctx.task_output(STEP1).inspect,\n    ctx.task_output(:step3).inspect\n  )\n\n  { \"step4\" => \"step4\" }\nend\n```"},"97":{"title":"Running a Workflow","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"You can run workflows directly or enqueue them for asynchronous execution. All the same methods for running a task are available for workflows!\n\n#### Python\n\n```python\ndag_workflow.run()\n```\n\n#### Typescript\n\n```typescript\nconst input = { Message: 'Hello, World!' };\n\n// Run workflow and wait for the result\nconst result = await simple.run(input);\n\n// Enqueue workflow to be executed asynchronously\nconst runReference = await simple.runNoWait(input);\n```\n\n#### Go\n\n```go\n// Run workflow and wait for the result\nresult, err := simple.Run(ctx, input)\n\n// Enqueue workflow to be executed asynchronously\nrunID, err := simple.RunNoWait(ctx, input)\n```\n\n#### Ruby\n\n```ruby\nresult = DAG_WORKFLOW.run\nputs result\n```"},"98":{"title":"Pre-Determined Pipelines","pageTitle":"DAGs","pageRoute":"hatchet://docs/v1/patterns/directed-acyclic-graphs","content":"DAGs naturally model fixed multi-stage pipelines where the sequence of tasks and their dependencies are known before execution. ETL workflows, document processing pipelines, and CI/CD workflows all follow this pattern: each stage depends on the previous, and the overall structure is visible and predictable in the dashboard."},"99":{"title":"Best Practices","pageTitle":"Best Practices","pageRoute":"hatchet://docs/v1/patterns/mixing-patterns","content":"# Best Practices"},"100":{"title":"Choosing a Pattern","pageTitle":"Best Practices","pageRoute":"hatchet://docs/v1/patterns/mixing-patterns","content":"Use a **DAG** for any portion of work whose shape you know upfront, and use a **durable task** to orchestrate the parts whose shape is dynamic. You can mix them freely within the same application and even within the same workflow.\n\nScenario, Pattern\n\nFixed pipeline, every step is known, DAG\nFixed pipeline, but one step needs a long wait, DAG with a durable task node\nDynamic orchestration of known pipelines, Durable task spawning DAGs\nFully dynamic, shape decided at runtime, Durable task spawning tasks/durable tasks\nAgent that reasons and acts in a loop, Durable task spawning children per iteration\n\n[DAGs](/v1/patterns/directed-acyclic-graphs) are inherently deterministic, since their shape is predefined and intermediate results are cached. If your workflow can be represented as a DAG, prefer that. Reach for a durable task only when you need capabilities a static graph can't express.\n\n> **Info:** You don't have to choose one pattern for your entire application. Different\n>   workflows can use different patterns, and a single workflow can mix them.\n>   Start with the simplest pattern that fits and add complexity only when needed."},"101":{"title":"Mixing Patterns","pageTitle":"Best Practices","pageRoute":"hatchet://docs/v1/patterns/mixing-patterns","content":"### A durable task inside a DAG\n\nA DAG workflow can include a durable task as one of its nodes. The durable task checkpoints and waits like any other, while the rest of the DAG proceeds according to its declared dependencies.\n\nThis is useful when most of your pipeline is a fixed graph but one step needs dynamic behavior, for example a pipeline where one stage runs an agentic loop that decides what to do at runtime.\n\n```mermaid\ngraph LR\n    A[Prepare Data] --> B[Durable: Agentic Loop]\n    B --> C[Publish Results]\n    style B stroke:#3392FF,stroke-dasharray: 5 5\n```\n\nThe durable task (`Agentic Loop`) can spawn children, sleep, wait for events, or loop until a condition is met. When it completes, the downstream `Publish Results` task runs automatically.\n\n### Spawning a DAG from a durable task\n\nA durable task can spawn an entire DAG workflow as a child, wait for its result, and then continue. This lets you use procedural control flow to decide _which_ pipeline to run and _how many times_ to run it, while the pipeline itself is a well-defined graph.\n\n```mermaid\ngraph TD\n    DT[Durable Task] -->|spawns| DAG1[DAG: Process Batch 1]\n    DT -->|spawns| DAG2[DAG: Process Batch 2]\n    DT -->|spawns| DAG3[DAG: Process Batch N]\n    DAG1 -->|result| DT\n    DAG2 -->|result| DT\n    DAG3 -->|result| DT\n    style DT stroke:#3392FF\n    style DAG1 stroke:#22C55E\n    style DAG2 stroke:#22C55E\n    style DAG3 stroke:#22C55E\n```\n\nThe durable task decides at runtime how many batches to process, spawns a DAG workflow for each one, and collects the results. The DAG workflows run in parallel across your worker fleet while the durable task's slot is freed.\n\n### Durable tasks spawning durable tasks\n\nA durable task can spawn other durable tasks as children, each with their own checkpoints and event waits. This creates a tree of durable work that's entirely driven by runtime logic.\n\n```mermaid\ngraph TD\n    Root[Durable: Orchestrator] -->|spawns| A[Durable: Agent A]\n    Root -->|spawns| B[Durable: Agent B]\n    A -->|spawns| A1[Task: Subtask]\n    A -->|spawns| A2[Task: Subtask]\n    B -->|spawns| B1[Durable: Sub-Agent]\n    B1 -->|spawns| B1a[Task: Subtask]\n    style Root stroke:#3392FF,stroke-dasharray: 5 5\n    style A stroke:#3392FF,stroke-dasharray: 5 5\n    style B stroke:#3392FF,stroke-dasharray: 5 5\n    style B1 stroke:#3392FF,stroke-dasharray: 5 5\n```\n\nThis pattern is ideal for agent-based systems where each level of the tree decides what to do next. Each durable task in the tree can sleep, wait for events, or spawn more children, and none of them hold a worker slot while waiting."},"102":{"title":"Determinism in Durable Tasks","pageTitle":"Best Practices","pageRoute":"hatchet://docs/v1/patterns/mixing-patterns","content":"Durable tasks must be **deterministic** between checkpoints. The task should always perform the same sequence of operations in between retries. This is what allows Hatchet to replay the task from the last checkpoint. If a task is not deterministic, it may produce different results on each retry, which can lead to unexpected behavior.\n\n### Rules for determinism\n\n1. **Only call methods available on the `DurableContext`**: a common way to introduce non-determinism is to call methods that produce side effects. If you need to fetch data from a database, call an API, or otherwise interact with external systems, spawn those operations as a **child task** using `RunChild`. Durable tasks are [evicted](/v1/task-eviction) at every wait point and replayed from checkpoint on resume. Any side effect not behind a checkpoint will re-execute.\n\n2. **When updating durable tasks, always guarantee backwards compatibility**: if you change the order of checkpoint operations in a durable task, you may break determinism. For example, if you call `SleepFor` followed by `WaitFor`, and then change the order of those calls, Hatchet will not be able to replay the task correctly. The task may have already been checkpointed at the first call to `SleepFor`, and changing the order makes that checkpoint meaningless."},"103":{"title":"Child Spawning","pageTitle":"Child Spawning","pageRoute":"hatchet://docs/v1/child-spawning","content":"# Child Spawning\n\nA task can spawn child tasks at runtime, including other durable tasks or entire DAG workflows. Children run independently on any available worker, and the parent can wait for their results.\n\nBoth durable tasks and DAG tasks support child spawning with the same core API. The key difference is that durable tasks free the parent's worker slot while waiting (via [eviction](/v1/task-eviction)), while DAG tasks hold their slot for the duration of execution.\n\n#### Durable Tasks"},"104":{"title":"Spawning from Durable Tasks","pageTitle":"Child Spawning","pageRoute":"hatchet://docs/v1/child-spawning","content":"A durable task can spawn child tasks at runtime. This is one of the core reasons to choose durable tasks over DAGs: the shape of work is decided as the task runs, not declared upfront.\n\n> **Info:** Waiting for child results puts the parent task into an [evictable\n>   state](/v1/task-eviction), the worker slot is freed and the parent is\n>   re-queued when results are available.\n\nBecause the parent is evicted while children execute:\n\n- **No slot waste** — the parent doesn't hold a worker slot while N children run across your fleet.\n- **No deadlocks** — because the parent is evicted, it can't starve its own children for slots.\n- **Dynamic N** — you decide how many children to spawn based on runtime data (input size, API responses, agent reasoning).\n\n### Spawning child tasks\n\nUse the context object to spawn a child task from within a durable task. The child runs independently on any available worker.\n\n#### Python\n\n```python\nfrom examples.fanout.worker import ChildInput, child_wf\n\n# 👀 example: run this inside of a parent task to spawn a child\nchild_wf.run(\n    ChildInput(a=\"b\"),\n)\n```\n\n#### Typescript\n\n```typescript\nexport const parentSingleChild = hatchet.task({\n  name: 'parent-single-child',\n  fn: async () => {\n    const childRes = await child.run({ N: 1 });\n\n    return {\n      Result: childRes.Value,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\n// Inside a parent task\nchildResult, err := childWorkflow.Run(hCtx, ChildInput{\n\tValue: 1,\n})\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nFANOUT_CHILD_WF.run({ \"a\" => \"b\" })\n```\n\n### Parallel fan-out\n\nSpawn many children at once and wait for all results. The parent is evicted during the wait, so it consumes no resources while children run.\n\n#### Python\n\n```python\nasync def run_child_workflows(n: int) -> list[dict[str, Any]]:\n    return await child_wf.aio_run_many(\n        [\n            child_wf.create_bulk_run_item(\n                input=ChildInput(a=str(i)),\n            )\n            for i in range(n)\n        ]\n    )\n```\n\n#### Typescript\n\n```typescript\ntype ParentInput = {\n  N: number;\n};\n\nexport const parent = hatchet.task({\n  name: 'parent',\n  fn: async (input: ParentInput, ctx) => {\n    const n = input.N;\n    const promises = [];\n\n    for (let i = 0; i < n; i++) {\n      promises.push(child.run({ N: i }));\n    }\n\n    const childRes = await Promise.all(promises);\n    const sum = childRes.reduce((acc, curr) => acc + curr.Value, 0);\n\n    return {\n      Result: sum,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\n// Run multiple child tasks in parallel using goroutines\nvar wg sync.WaitGroup\nvar mu sync.Mutex\nresults := make([]*ChildOutput, 0, n)\n\nwg.Add(n)\nfor i := 0; i < n; i++ {\n\tgo func(index int) {\n\t\tdefer wg.Done()\n\t\tresult, err := childWorkflow.Run(hCtx, ChildInput{Value: index})\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar childOutput ChildOutput\n\t\terr = result.Into(&childOutput)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tmu.Lock()\n\t\tresults = append(results, &childOutput)\n\t\tmu.Unlock()\n\t}(i)\n}\nwg.Wait()\n```\n\n#### Ruby\n\n```ruby\ndef run_child_workflows(n)\n  FANOUT_CHILD_WF.run_many(\n    n.times.map do |i|\n      FANOUT_CHILD_WF.create_bulk_run_item(\n        input: { \"a\" => i.to_s }\n      )\n    end\n  )\nend\n```\n\n### What children can be\n\nA durable task can spawn any runnable:\n\nChild type, Example\n\n**Regular task**, Spawn a stateless task for a quick computation or API call.\n**Durable task**, Spawn another durable task that has its own checkpoints, sleeps, and event waits.\n**DAG workflow**, Spawn an entire multi-task workflow and wait for its final output.\n\n### Error handling\n\n#### Python\n\n```python\ntry:\n    child_wf.run(\n        ChildInput(a=\"b\"),\n    )\nexcept Exception as e:\n    print(f\"Child workflow failed: {e}\")\n```\n\n#### Typescript\n\n```typescript\nexport const withErrorHandling = hatchet.task({\n  name: 'parent-error-handling',\n  fn: async () => {\n    try {\n      const childRes = await child.run({ N: 1 });\n\n      return {\n        Result: childRes.Value,\n      };\n    } catch (error) {\n      // decide how to proceed here\n      return {\n        Result: -1,\n      };\n    }\n  },\n});\n```\n\n#### Go\n\n```go\nresult, err := childWorkflow.Run(hCtx, ChildInput{Value: 1})\nif err != nil {\n\t// Handle error from child workflow\n\tfmt.Printf(\"Child workflow failed: %v\\n\", err)\n\t// Decide how to proceed - retry, skip, or fail the parent\n}\n```\n\n#### Ruby\n\n```ruby\nbegin\n  FANOUT_CHILD_WF.run({ \"a\" => \"b\" })\nrescue StandardError => e\n  puts \"Child workflow failed: #{e.message}\"\nend\n```\n\n#### DAGs"},"105":{"title":"Spawning from DAG Tasks","pageTitle":"Child Spawning","pageRoute":"hatchet://docs/v1/child-spawning","content":"DAG tasks can also spawn child tasks procedurally during execution. This lets you combine a fixed pipeline structure with dynamic child work inside individual tasks.\n\n### Creating parent and child tasks\n\nTo implement child task spawning, you first need to create both parent and child task definitions.\n\n#### Python\n\nFirst, we'll declare a couple of tasks for the parent and child:\n\n```python\nclass ParentInput(BaseModel):\n    n: int = 100\n\n\nclass ChildInput(BaseModel):\n    a: str\n\n\nparent_wf = hatchet.workflow(name=\"FanoutParent\", input_validator=ParentInput)\nchild_wf = hatchet.workflow(name=\"FanoutChild\", input_validator=ChildInput)\n\n\n@parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:\n    print(\"spawning child\")\n\n    result = await child_wf.aio_run_many(\n        [\n            child_wf.create_bulk_run_item(\n                input=ChildInput(a=str(i)),\n                additional_metadata={\"hello\": \"earth\"},\n                key=f\"child{i}\",\n            )\n            for i in range(input.n)\n        ],\n    )\n\n    print(f\"results {result}\")\n\n    return {\"results\": result}\n```\n\nWe also created a step on the parent task that spawns the child tasks. Now, we'll add a couple of steps to the child task:\n\n```python\n@child_wf.task()\nasync def process(input: ChildInput, ctx: Context) -> dict[str, str]:\n    print(f\"child process {input.a}\")\n    return {\"status\": input.a}\n\n\n@child_wf.task(parents=[process])\nasync def process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n    process_output = ctx.task_output(process)\n    a = process_output[\"status\"]\n\n    return {\"status2\": a + \"2\"}\n```\n\nAnd that's it! The fanout parent will run and spawn the child, and then will collect the results from its steps.\n\n#### Typescript\n\n```typescript\nimport sleep from '@hatchet-dev/typescript-sdk/util/sleep';\nimport { hatchet } from '../hatchet-client';\n\n// (optional) Define the input type for the workflow\nexport type ChildInput = {\n  Message: string;\n};\n\nexport type ParentInput = {\n  Message: string;\n};\n\nexport const child = hatchet.workflow({\n  name: 'child',\n});\n\nexport const child1 = child.task({\n  name: 'child1',\n  fn: async (input: ChildInput, ctx) => {\n    await sleep(30 * 1000);\n\n    ctx.logger.info('hello from the child1', { hello: 'moon' });\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n\nexport const child2 = child.task({\n  name: 'child2',\n  fn: (input: ChildInput, ctx) => {\n    ctx.logger.info('hello from the child2');\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n\nexport const child3 = child.task({\n  name: 'child3',\n  parents: [child1, child2],\n  fn: async (input: ChildInput, ctx) => {\n    ctx.logger.info('hello from the child3');\n    return {\n      TransformedMessage: input.Message.toLowerCase(),\n    };\n  },\n});\n\nexport const parent = hatchet.task({\n  name: 'parent',\n  executionTimeout: '5m',\n  fn: async (input: ParentInput, ctx) => {\n    const c = await ctx.runChild(child, {\n      Message: input.Message,\n    });\n\n    return {\n      TransformedMessage: 'not implemented',\n    };\n  },\n});\n```\n\n#### Go\n\n```go\ntype ParentInput struct {\n\tCount int `json:\"count\"`\n}\n\ntype ParentOutput struct {\n\tSum int `json:\"sum\"`\n}\n\nfunc Parent(client *hatchet.Client) *hatchet.StandaloneTask {\n\treturn client.NewStandaloneTask(\"parent-task\",\n\t\tfunc(ctx hatchet.Context, input ParentInput) (ParentOutput, error) {\n\t\t\tlog.Printf(\"Parent workflow spawning %d child workflows\", input.Count)\n\n\t\t\t// Spawn multiple child workflows and collect results\n\t\t\tsum := 0\n\t\t\tfor i := 0; i < input.Count; i++ {\n\t\t\t\tlog.Printf(\"Spawning child workflow %d/%d\", i+1, input.Count)\n\n\t\t\t\t// Spawn child workflow and wait for result\n\t\t\t\tchildResult, err := Child(client).Run(ctx, ChildInput{\n\t\t\t\t\tValue: i + 1,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ParentOutput{}, fmt.Errorf(\"failed to spawn child workflow %d: %w\", i, err)\n\t\t\t\t}\n\n\t\t\t\tvar childOutput ChildOutput\n\t\t\t\terr = childResult.Into(&childOutput)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ParentOutput{}, fmt.Errorf(\"failed to get child workflow result: %w\", err)\n\t\t\t\t}\n\n\t\t\t\tsum += childOutput.Result\n\n\t\t\t\tlog.Printf(\"Child workflow %d completed with result: %d\", i+1, childOutput.Result)\n\t\t\t}\n\n\t\t\tlog.Printf(\"All child workflows completed. Total sum: %d\", sum)\n\t\t\treturn ParentOutput{\n\t\t\t\tSum: sum,\n\t\t\t}, nil\n\t\t},\n\t)\n}\n\ntype ChildInput struct {\n\tValue int `json:\"value\"`\n}\n\ntype ChildOutput struct {\n\tResult int `json:\"result\"`\n}\n\nfunc Child(client *hatchet.Client) *hatchet.StandaloneTask {\n\treturn client.NewStandaloneTask(\"child-task\",\n\t\tfunc(ctx hatchet.Context, input ChildInput) (ChildOutput, error) {\n\t\t\treturn ChildOutput{\n\t\t\t\tResult: input.Value * 2,\n\t\t\t}, nil\n\t\t},\n\t)\n}\n```\n\n#### Ruby\n\n```ruby\nFANOUT_PARENT_WF = HATCHET.workflow(name: \"FanoutParent\")\nFANOUT_CHILD_WF = HATCHET.workflow(name: \"FanoutChild\")\n\nFANOUT_PARENT_WF.task(:spawn, execution_timeout: 300) do |input, ctx|\n  puts \"spawning child\"\n  n = input[\"n\"] || 100\n\n  result = FANOUT_CHILD_WF.run_many(\n    n.times.map do |i|\n      FANOUT_CHILD_WF.create_bulk_run_item(\n        input: { \"a\" => i.to_s },\n        options: Hatchet::TriggerWorkflowOptions.new(\n          additional_metadata: { \"hello\" => \"earth\" },\n          key: \"child#{i}\"\n        )\n      )\n    end\n  )\n\n  puts \"results #{result}\"\n  { \"results\" => result }\nend\n```\n```ruby\nFANOUT_CHILD_PROCESS = FANOUT_CHILD_WF.task(:process) do |input, ctx|\n  puts \"child process #{input['a']}\"\n  { \"status\" => input[\"a\"] }\nend\n\nFANOUT_CHILD_WF.task(:process2, parents: [FANOUT_CHILD_PROCESS]) do |input, ctx|\n  process_output = ctx.task_output(FANOUT_CHILD_PROCESS)\n  a = process_output[\"status\"]\n  { \"status2\" => \"#{a}2\" }\nend\n```\n\n### Running child tasks\n\nTo spawn and run a child task from a parent task, use the appropriate method for your language:\n\n#### Python\n\n```python\nfrom examples.fanout.worker import ChildInput, child_wf\n\n# 👀 example: run this inside of a parent task to spawn a child\nchild_wf.run(\n    ChildInput(a=\"b\"),\n)\n```\n\n#### Typescript\n\n```typescript\nexport const parentSingleChild = hatchet.task({\n  name: 'parent-single-child',\n  fn: async () => {\n    const childRes = await child.run({ N: 1 });\n\n    return {\n      Result: childRes.Value,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\n// Inside a parent task\nchildResult, err := childWorkflow.Run(hCtx, ChildInput{\n\tValue: 1,\n})\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nFANOUT_CHILD_WF.run({ \"a\" => \"b\" })\n```\n\n### Parallel child task execution\n\nSpawn multiple child tasks in parallel:\n\n#### Python\n\n```python\nasync def run_child_workflows(n: int) -> list[dict[str, Any]]:\n    return await child_wf.aio_run_many(\n        [\n            child_wf.create_bulk_run_item(\n                input=ChildInput(a=str(i)),\n            )\n            for i in range(n)\n        ]\n    )\n```\n\n#### Typescript\n\n```typescript\ntype ParentInput = {\n  N: number;\n};\n\nexport const parent = hatchet.task({\n  name: 'parent',\n  fn: async (input: ParentInput, ctx) => {\n    const n = input.N;\n    const promises = [];\n\n    for (let i = 0; i < n; i++) {\n      promises.push(child.run({ N: i }));\n    }\n\n    const childRes = await Promise.all(promises);\n    const sum = childRes.reduce((acc, curr) => acc + curr.Value, 0);\n\n    return {\n      Result: sum,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\n// Run multiple child tasks in parallel using goroutines\nvar wg sync.WaitGroup\nvar mu sync.Mutex\nresults := make([]*ChildOutput, 0, n)\n\nwg.Add(n)\nfor i := 0; i < n; i++ {\n\tgo func(index int) {\n\t\tdefer wg.Done()\n\t\tresult, err := childWorkflow.Run(hCtx, ChildInput{Value: index})\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar childOutput ChildOutput\n\t\terr = result.Into(&childOutput)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tmu.Lock()\n\t\tresults = append(results, &childOutput)\n\t\tmu.Unlock()\n\t}(i)\n}\nwg.Wait()\n```\n\n#### Ruby\n\n```ruby\ndef run_child_workflows(n)\n  FANOUT_CHILD_WF.run_many(\n    n.times.map do |i|\n      FANOUT_CHILD_WF.create_bulk_run_item(\n        input: { \"a\" => i.to_s }\n      )\n    end\n  )\nend\n```\n\n### Error handling\n\n#### Python\n\n```python\ntry:\n    child_wf.run(\n        ChildInput(a=\"b\"),\n    )\nexcept Exception as e:\n    print(f\"Child workflow failed: {e}\")\n```\n\n#### Typescript\n\n```typescript\nexport const withErrorHandling = hatchet.task({\n  name: 'parent-error-handling',\n  fn: async () => {\n    try {\n      const childRes = await child.run({ N: 1 });\n\n      return {\n        Result: childRes.Value,\n      };\n    } catch (error) {\n      // decide how to proceed here\n      return {\n        Result: -1,\n      };\n    }\n  },\n});\n```\n\n#### Go\n\n```go\nresult, err := childWorkflow.Run(hCtx, ChildInput{Value: 1})\nif err != nil {\n\t// Handle error from child workflow\n\tfmt.Printf(\"Child workflow failed: %v\\n\", err)\n\t// Decide how to proceed - retry, skip, or fail the parent\n}\n```\n\n#### Ruby\n\n```ruby\nbegin\n  FANOUT_CHILD_WF.run({ \"a\" => \"b\" })\nrescue StandardError => e\n  puts \"Child workflow failed: #{e.message}\"\nend\n```"},"106":{"title":"Common Patterns","pageTitle":"Child Spawning","pageRoute":"hatchet://docs/v1/child-spawning","content":"### Dynamic fan-out / fan-in\n\nProcess a list of items whose length is only known at runtime. Spawn one child per item, collect all results, then continue. Document processing and batch processing are canonical examples: when a batch of files arrives, a parent fans out to one child per document; each child parses, extracts, and validates its document in parallel across your worker fleet.\n\n\n[Concurrency](/v1/concurrency) controls how many children run simultaneously. Hatchet distributes child tasks across available workers, so adding workers increases throughput without code changes. For rate-limited external services (OCR, LLM APIs), combine with [Rate Limits](/v1/rate-limits) to throttle child execution across all workers.\n\n### Agent loops\n\nAn **agent loop** is implemented by having a durable task spawn a new child run of itself with updated input until a termination condition is met. Each iteration is a separate child task, giving full observability in the dashboard. AI agents use this pattern when they reason about what to do, spawn a subtask (or a sub-workflow), inspect the result, and decide whether to continue, branch, or stop.\n\n\n### Recursive workflows\n\nA durable task spawns child durable tasks, each of which may spawn their own children. This creates a tree of work that's entirely driven by runtime logic, useful for crawlers, recursive search, and tree-structured computations."},"107":{"title":"Use cases","pageTitle":"Child Spawning","pageRoute":"hatchet://docs/v1/child-spawning","content":"1. **Dynamic fan-out processing** — When the number of parallel tasks is determined at runtime.\n2. **Reusable workflow components** — Create modular workflows that can be reused across different parent workflows.\n3. **Resource-intensive operations** — Spread computation across multiple workers.\n4. **Agent-based systems** — Allow AI agents to spawn new workflows based on their reasoning.\n5. **Long-running operations** — Break down long operations into smaller, trackable units of work."},"108":{"title":"Sleep & Delays","pageTitle":"Sleep & Delays","pageRoute":"hatchet://docs/v1/sleep","content":"# Sleep & Delays\n\nSleep pauses a task for a specified duration while freeing the worker slot. No resources are consumed during the wait, whether the pause lasts seconds or weeks.\n\n\nBoth durable tasks and DAGs support sleeping, but the API differs: durable tasks call `SleepFor` dynamically at runtime, while DAGs declare a sleep condition upfront on the task definition.\n\n#### Durable Tasks"},"109":{"title":"Durable Sleep","pageTitle":"Sleep & Delays","pageRoute":"hatchet://docs/v1/sleep","content":"Durable sleep pauses execution for a specified amount of time and frees the worker slot until the sleep expires.\n\n> **Info:** Sleeping puts the task into an [evictable state](/v1/task-eviction), the\n>   worker slot is freed and the task is re-queued when the sleep expires.\n\nUnlike a language-level sleep (e.g. `time.sleep` in Python or `setTimeout` in Node), durable sleep is guaranteed to respect the original duration across interruptions. A language-level sleep ties the wait to the local process, so if the process restarts, the sleep starts over from zero.\n\nFor example, say you'd like to send a notification to a user after 24 hours. With `time.sleep`, if the task is interrupted after 23 hours, it will restart and sleep for 24 hours again (47 hours total). With durable sleep, Hatchet tracks the original deadline server-side, so the task will only sleep for 1 more hour on restart.\n\n### Using durable sleep\n\nDurable sleep can be used by calling the `SleepFor` method on the `DurableContext` object. This method takes a duration as an argument and will sleep for that duration.\n\n#### Python\n\n```python\n@hatchet.durable_task(name=\"DurableSleepTask\")\nasync def durable_sleep_task(input: EmptyModel, ctx: DurableContext) -> None:\n    res = await ctx.aio_sleep_for(timedelta(seconds=5))\n\n    print(\"got result\", res)\n```\n\n#### Typescript\n\n```typescript\ndurableSleep.durableTask({\n  name: 'durable-sleep',\n  executionTimeout: '10m',\n  fn: async (_, ctx) => {\n    console.log('sleeping for 5s');\n    const sleepRes = await ctx.sleepFor('5s');\n    console.log('done sleeping for 5s', sleepRes);\n\n    return {\n      Value: 'done',\n    };\n  },\n});\n```\n\n#### Go\n\n```go\ntask := client.NewStandaloneDurableTask(\"long-running-task\", func(ctx hatchet.DurableContext, input DurableInput) (DurableOutput, error) {\n\tlog.Printf(\"Starting task, will sleep for %d seconds\", input.Delay)\n\n\tif _, err := ctx.SleepFor(time.Duration(input.Delay) * time.Second); err != nil {\n\t\treturn DurableOutput{}, err\n\t}\n\n\tlog.Printf(\"Finished sleeping, processing message: %s\", input.Message)\n\n\treturn DurableOutput{\n\t\tProcessedAt: time.Now().Format(time.RFC3339),\n\t\tMessage:     \"Processed: \" + input.Message,\n\t}, nil\n})\n```\n\n#### Ruby\n\n```ruby\nDURABLE_SLEEP_TASK = HATCHET.durable_task(name: \"DurableSleepTask\") do |input, ctx|\n  res = ctx.sleep_for(duration: 5)\n\n  puts \"got result #{res}\"\nend\n```\n\n#### DAGs"},"110":{"title":"Sleep Conditions","pageTitle":"Sleep & Delays","pageRoute":"hatchet://docs/v1/sleep","content":"Sleep conditions pause a DAG task for a specified duration before it runs. Use them when a task should wait for a fixed amount of time after its parent tasks complete.\n\nUnlike durable sleep (which is called dynamically at runtime), DAG sleep conditions are declared upfront on the task definition. Both free the worker slot during the wait.\n\n### Using sleep conditions\n\nDeclare a task with a `wait_for` sleep condition. The task will wait for its parent tasks to complete, then sleep for the specified duration before executing.\n\n#### Python\n\n```python\n@task_condition_workflow.task(\n    parents=[start], wait_for=[SleepCondition(timedelta(seconds=10))]\n)\ndef wait_for_sleep(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\n#### Typescript\n\n```typescript\nconst waitForSleep = taskConditionWorkflow.task({\n  name: 'waitForSleep',\n  parents: [start],\n  waitFor: [new SleepCondition('10s')],\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nwaitForSleep := workflow.NewTask(\"wait-for-sleep\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n},\n\thatchet.WithParents(start),\n\thatchet.WithWaitFor(hatchet.SleepCondition(10*time.Second)),\n)\n```\n\n#### Ruby\n\n```ruby\nWAIT_FOR_SLEEP = TASK_CONDITION_WORKFLOW.task(\n  :wait_for_sleep,\n  parents: [COND_START],\n  wait_for: [Hatchet::SleepCondition.new(10)]\n) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```\n\nThis task will first wait for its parent to complete, then sleep for the specified duration before executing.\n\n### Combining with other conditions\n\nSleep conditions can be combined with other conditions using or groups. For example, you can wait for _either_ a sleep duration or an event (whichever comes first). See [Combining Conditions](/v1/conditions#or-groups) for details."},"111":{"title":"Wait For Events","pageTitle":"Wait For Events","pageRoute":"hatchet://docs/v1/events","content":"# Events\n\nTasks can pause until an external event arrives before continuing. This is the foundation for human-in-the-loop workflows, webhook-driven pipelines, and any flow that depends on signals from outside the task.\n\n\nBoth durable tasks and DAGs support waiting for events. Durable tasks call `WaitForEvent` dynamically at runtime, while DAGs declare event conditions upfront on the task definition.\n\nEvents are delivered by [pushing events](/v1/external-events/pushing-events) into Hatchet using the event client. The event key you push must match the key your task is waiting for.\n\n#### Durable Tasks"},"112":{"title":"Wait For Events","pageTitle":"Wait For Events","pageRoute":"hatchet://docs/v1/events","content":"Wait For Events lets a durable task pause until an external event arrives. Even if the task is interrupted and requeued while waiting, the event will still be processed. When it resumes, it reads the event from the durable event log and continues.\n\n> **Info:** Waiting for an event puts the task into an [evictable\n>   state](/v1/task-eviction), the worker slot is freed and the task is re-queued\n>   when the event arrives.\n\n### Declaring a wait for event\n\nWait For Event is declared using the context method `WaitFor` (or utility method `WaitForEvent`) on the `DurableContext` object.\n\n#### Python\n\n```python\n@hatchet.durable_task(name=\"DurableEventTask\")\nasync def durable_event_task(input: EmptyModel, ctx: DurableContext) -> None:\n    res = await ctx.aio_wait_for_event(\n        \"user:update\",\n    )\n\n    print(\"got event\", res)\n```\n\n#### Typescript\n\n```typescript\nexport const durableEvent = hatchet.durableTask({\n  name: 'durable-event',\n  executionTimeout: '10m',\n  fn: async (_, ctx) => {\n    const res = await ctx.waitForEvent(EVENT_KEY);\n\n    console.log('res', res);\n\n    return {\n      Value: 'done',\n    };\n  },\n});\n```\n\n#### Go\n\n```go\ntask := client.NewStandaloneDurableTask(\"long-running-task\", func(ctx hatchet.DurableContext, input DurableInput) (DurableOutput, error) {\n\tlog.Printf(\"Starting task, will sleep for %d seconds\", input.Delay)\n\n\tif _, err := ctx.WaitForEvent(\"user:updated\", \"\"); err != nil {\n\t\treturn DurableOutput{}, err\n\t}\n\n\tlog.Printf(\"Finished waiting for event, processing message: %s\", input.Message)\n\n\treturn DurableOutput{\n\t\tProcessedAt: time.Now().Format(time.RFC3339),\n\t\tMessage:     \"Processed: \" + input.Message,\n\t}, nil\n})\n```\n\n#### Ruby\n\n```ruby\nDURABLE_EVENT_TASK = HATCHET.durable_task(name: \"DurableEventTask\") do |input, ctx|\n  res = ctx.wait_for(\n    \"event\",\n    Hatchet::UserEventCondition.new(event_key: \"user:update\")\n  )\n\n  puts \"got event #{res}\"\nend\n\nDURABLE_EVENT_TASK_WITH_FILTER = HATCHET.durable_task(name: \"DurableEventWithFilterTask\") do |input, ctx|\n```\n\n### Event filters\n\nEvents can be filtered using [CEL](https://github.com/google/cel-spec) expressions. For example, to only receive `user:update` events for a specific user:\n\n#### Python\n\n```python\nres = await ctx.aio_wait_for_event(\"user:update\", \"input.user_id == '1234'\")\n```\n\n#### Typescript\n\n```typescript\nconst res = await ctx.waitForEvent(EVENT_KEY, \"input.userId == '1234'\");\n```\n\n#### Go\n\n```go\nif _, err := ctx.WaitForEvent(\"user:updated\", \"input.status_code == 200\"); err != nil {\n\treturn DurableOutput{}, err\n}\n```\n\n#### Ruby\n\n```ruby\nres = ctx.wait_for(\n    \"event\",\n    Hatchet::UserEventCondition.new(\n      event_key: \"user:update\",\n      expression: \"input.user_id == '1234'\"\n    )\n  )\n\n  puts \"got event #{res}\"\nend\n```\n\n### Pushing events\n\nFor a waiting task to resume, something must [push an event](/v1/external-events/pushing-events) into Hatchet with a matching key. You can do this from any service that has access to the Hatchet client.\n\n#### Python\n\n```python\nhatchet.event.push(\"user:create\", {\"should_skip\": False})\n```\n\n#### Typescript\n\n```typescript\nconst res = await hatchet.events.push('simple-event:create', {\n  Message: 'hello',\n  ShouldSkip: false,\n});\n```\n\n#### Go\n\n```go\nerr := client.Events().Push(\n\tcontext.Background(),\n\t\"simple-event:create\",\n\tEventInput{\n\t\tMessage: \"Hello, World!\",\n\t},\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nHATCHET.event.push(\"user:create\", { \"should_skip\" => false })\n```\n\nWhen the pushed event's key matches what a durable task is waiting for (and passes any CEL filter), the task is re-queued and resumes from its checkpoint.\n\n#### DAGs"},"113":{"title":"Event Conditions","pageTitle":"Wait For Events","pageRoute":"hatchet://docs/v1/events","content":"Event conditions let a DAG task react to external events. A task can wait for an event before running, be skipped when an event arrives, or be cancelled by an event.\n\nUnlike durable tasks (where `WaitForEvent` is called dynamically at runtime), DAG event conditions are declared upfront on the task definition.\n\n### Usage modes\n\nEvent conditions can be used with three operators:\n\n- **`wait_for`** — the task waits for the event before starting.\n- **`skip_if`** — the task is skipped if the event arrives.\n- **`cancel_if`** — the task is cancelled if the event arrives.\n\n> **Warning:** A task cancelled by `cancel_if` behaves like any other cancellation in Hatchet\n>   — downstream tasks will be cancelled as well.\n\n### Waiting for an event\n\nDeclare a task with a `wait_for` event condition. The task will not start until the specified event is pushed into Hatchet.\n\n#### Python\n\n```python\n@task_condition_workflow.task(\n    parents=[start],\n    wait_for=[\n        or_(\n            SleepCondition(duration=timedelta(minutes=1)),\n            UserEventCondition(event_key=\"wait_for_event:start\"),\n        )\n    ],\n)\ndef wait_for_event(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\n#### Typescript\n\n```typescript\nconst waitForEvent = taskConditionWorkflow.task({\n  name: 'waitForEvent',\n  parents: [start],\n  waitFor: [Or(new SleepCondition('1m'), new UserEventCondition('wait_for_event:start', 'true'))],\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nwaitForEvent := workflow.NewTask(\"wait-for-event\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n},\n\thatchet.WithParents(start),\n\thatchet.WithWaitFor(hatchet.OrCondition(\n\t\thatchet.SleepCondition(1*time.Minute),\n\t\thatchet.UserEventCondition(\"wait_for_event:start\", \"\"),\n\t)),\n)\n```\n\n#### Ruby\n\n```ruby\nWAIT_FOR_EVENT = TASK_CONDITION_WORKFLOW.task(\n  :wait_for_event,\n  parents: [COND_START],\n  wait_for: [\n    Hatchet.or_(\n      Hatchet::SleepCondition.new(60),\n      Hatchet::UserEventCondition.new(event_key: \"wait_for_event:start\")\n    )\n  ]\n) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```\n\n### Skipping on an event\n\nDeclare a task with a `skip_if` event condition. The task will be skipped if the event arrives before the task starts.\n\n#### Python\n\n```python\n@task_condition_workflow.task(\n    parents=[start],\n    wait_for=[SleepCondition(timedelta(seconds=30))],\n    skip_if=[UserEventCondition(event_key=\"skip_on_event:skip\")],\n)\ndef skip_on_event(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\n#### Typescript\n\n```typescript\nconst skipOnEvent = taskConditionWorkflow.task({\n  name: 'skipOnEvent',\n  parents: [start],\n  waitFor: [new SleepCondition('10s')],\n  skipIf: [new UserEventCondition('skip_on_event:skip', 'true')],\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nskipOnEvent := workflow.NewTask(\"skip-on-event\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n},\n\thatchet.WithParents(start),\n\thatchet.WithWaitFor(hatchet.SleepCondition(30*time.Second)),\n\thatchet.WithSkipIf(hatchet.UserEventCondition(\"skip_on_event:skip\", \"\")),\n)\n```\n\n#### Ruby\n\n```ruby\nSKIP_ON_EVENT = TASK_CONDITION_WORKFLOW.task(\n  :skip_on_event,\n  parents: [COND_START],\n  wait_for: [Hatchet::SleepCondition.new(30)],\n  skip_if: [Hatchet::UserEventCondition.new(event_key: \"skip_on_event:skip\")]\n) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```\n\n### Event filters\n\nEvents can be filtered using [CEL](https://github.com/google/cel-spec) expressions. The CEL expression is evaluated against the event payload, and the condition only matches if the expression returns `true`. This works identically to event filters in durable tasks.\n\n### Pushing events\n\nFor a waiting task to proceed, something must [push an event](/v1/external-events/pushing-events) into Hatchet with a matching key. You can do this from any service that has access to the Hatchet client.\n\n#### Python\n\n```python\nhatchet.event.push(\"user:create\", {\"should_skip\": False})\n```\n\n#### Typescript\n\n```typescript\nconst res = await hatchet.events.push('simple-event:create', {\n  Message: 'hello',\n  ShouldSkip: false,\n});\n```\n\n#### Go\n\n```go\nerr := client.Events().Push(\n\tcontext.Background(),\n\t\"simple-event:create\",\n\tEventInput{\n\t\tMessage: \"Hello, World!\",\n\t},\n)\nif err != nil {\n\treturn err\n}\n```\n\n#### Ruby\n\n```ruby\nHATCHET.event.push(\"user:create\", { \"should_skip\" => false })\n```\n\n### Combining with other conditions\n\nEvent conditions can be combined with parent and sleep conditions using or groups. For example, you can wait for _either_ an event or a timeout (whichever comes first). See [Conditions & Branching](/v1/conditions) for details."},"114":{"title":"Conditions & Branching","pageTitle":"Conditions & Branching","pageRoute":"hatchet://docs/v1/conditions","content":"# Conditions & Branching\n\nWorkflows often need to branch: run different paths depending on data, skip steps when conditions aren't met, or wait for a combination of signals before proceeding. Both durable tasks and DAGs support conditional logic, but the approach differs.\n\n\n#### Durable Tasks"},"115":{"title":"Procedural Branching","pageTitle":"Conditions & Branching","pageRoute":"hatchet://docs/v1/conditions","content":"Durable tasks use standard language control flow (`if`/`else`, `match`, loops) to branch at runtime. Because the task is a single long-running function, you can make decisions based on any data available during execution: inputs, intermediate results, API responses, or child task outputs.\n\n```python\n@workflow.durable_task()\nasync def process(input: ProcessInput, ctx: DurableContext):\n    result = await ctx.run_child(analyze_task, input)\n\n    if result[\"score\"] > 0.8:\n        await ctx.run_child(fast_path_task, result)\n    else:\n        await ctx.run_child(slow_path_task, result)\n        await ctx.run_child(review_task, result)\n```\n\nThis is one of the key advantages of durable tasks: branching logic is expressed directly in code, making it easy to handle complex, dynamic flows. Each branch can spawn different children, sleep for different durations, or wait for different events.\n\n> **Warning:** Branching logic must be **deterministic** between checkpoints. If the task is\n>   evicted and replayed, the same branches must execute in the same order. Base\n>   decisions on checkpoint outputs (child results, event payloads) rather than\n>   wall-clock time or external state that may change between replays. See [Best\n>   Practices](/v1/patterns/mixing-patterns#determinism-in-durable-tasks) for\n>   details."},"116":{"title":"Or Groups","pageTitle":"Conditions & Branching","pageRoute":"hatchet://docs/v1/conditions","content":"Durable tasks can combine multiple wait conditions using or groups. An or group evaluates to `True` if **at least one** of its conditions is satisfied, letting you express \"proceed on timeout or event, whichever comes first.\"\n\n#### Python\n\n```python\n@durable_workflow.durable_task()\nasync def wait_for_or_group_1(\n    _i: EmptyModel, ctx: DurableContext\n) -> dict[str, str | int | float]:\n    start = time.time()\n    wait_result = await ctx.aio_wait_for(\n        uuid4().hex,\n        or_(\n            SleepCondition(timedelta(seconds=SLEEP_TIME)),\n            UserEventCondition(event_key=EVENT_KEY),\n        ),\n    )\n\n    key = list(wait_result.keys())[0]\n    event_id = list(wait_result[key].keys())[0]\n\n    return {\n        \"runtime\": time.time() - start,\n        \"key\": key,\n        \"event_id\": event_id,\n    }\n```\n\n`or_()` wraps a `SleepCondition` and a `UserEventCondition` into a single or group. The task will resume as soon as either the sleep expires or the event arrives.\n\n#### Typescript\n\n```typescript\nexport const durableEvent = hatchet.durableTask({\n  name: 'durable-event',\n  executionTimeout: '10m',\n  fn: async (_, ctx) => {\n    const res = await ctx.waitForEvent(EVENT_KEY);\n\n    console.log('res', res);\n\n    return {\n      Value: 'done',\n    };\n  },\n});\n```\n\n#### Go\n\n```go\ntask := client.NewStandaloneDurableTask(\"long-running-task\", func(ctx hatchet.DurableContext, input DurableInput) (DurableOutput, error) {\n\tlog.Printf(\"Starting task, will sleep for %d seconds\", input.Delay)\n\n\tif _, err := ctx.WaitForEvent(\"user:updated\", \"\"); err != nil {\n\t\treturn DurableOutput{}, err\n\t}\n\n\tlog.Printf(\"Finished waiting for event, processing message: %s\", input.Message)\n\n\treturn DurableOutput{\n\t\tProcessedAt: time.Now().Format(time.RFC3339),\n\t\tMessage:     \"Processed: \" + input.Message,\n\t}, nil\n})\n```\n\n#### Ruby\n\n```ruby\nDURABLE_EVENT_TASK = HATCHET.durable_task(name: \"DurableEventTask\") do |input, ctx|\n  res = ctx.wait_for(\n    \"event\",\n    Hatchet::UserEventCondition.new(event_key: \"user:update\")\n  )\n\n  puts \"got event #{res}\"\nend\n\nDURABLE_EVENT_TASK_WITH_FILTER = HATCHET.durable_task(name: \"DurableEventWithFilterTask\") do |input, ctx|\n```\n\n#### DAGs"},"117":{"title":"Parent Conditions","pageTitle":"Conditions & Branching","pageRoute":"hatchet://docs/v1/conditions","content":"Parent conditions let a DAG task decide whether to run based on the output of a parent task. This enables branching logic within a DAG: different paths can execute depending on runtime data, while the overall graph structure remains fixed and visible in the dashboard.\n\nParent conditions can be used with two operators:\n\n- **`skip_if`** — skip the task if the parent output matches the condition.\n- **`cancel_if`** — cancel the task (and its downstream dependents) if the parent output matches the condition.\n\n> **Warning:** A task cancelled by `cancel_if` behaves like any other cancellation in Hatchet\n>   — downstream tasks will be cancelled as well.\n\n### Branching example\n\nA common pattern is to create two sibling tasks with complementary parent conditions. For example, one task runs when a value is greater than 50 and the other runs when it is less than or equal to 50. Only one branch executes per run.\n\nFirst, declare a base task that returns a value:\n\n#### Python\n\n```python\n@task_condition_workflow.task()\ndef start(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\n#### Typescript\n\n```typescript\nconst start = taskConditionWorkflow.task({\n  name: 'start',\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nstart := workflow.NewTask(\"start\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n})\n```\n\n#### Ruby\n\n```ruby\nCOND_START = TASK_CONDITION_WORKFLOW.task(:start) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```\n\nThen add two branches that use `ParentCondition` with `skip_if`:\n\n#### Python\n\n```python\n@task_condition_workflow.task(\n    parents=[wait_for_sleep],\n    skip_if=[\n        ParentCondition(\n            parent=wait_for_sleep,\n            expression=\"output.random_number > 50\",\n        )\n    ],\n)\ndef left_branch(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n\n\n@task_condition_workflow.task(\n    parents=[wait_for_sleep],\n    skip_if=[\n        ParentCondition(\n            parent=wait_for_sleep,\n            expression=\"output.random_number <= 50\",\n        )\n    ],\n)\ndef right_branch(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\n#### Typescript\n\n```typescript\nconst leftBranch = taskConditionWorkflow.task({\n  name: 'leftBranch',\n  parents: [waitForSleep],\n  skipIf: [new ParentCondition(waitForSleep, 'output.randomNumber > 50')],\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n\nconst rightBranch = taskConditionWorkflow.task({\n  name: 'rightBranch',\n  parents: [waitForSleep],\n  skipIf: [new ParentCondition(waitForSleep, 'output.randomNumber <= 50')],\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nleftBranch := workflow.NewTask(\"left-branch\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n},\n\thatchet.WithParents(waitForSleep),\n\thatchet.WithSkipIf(hatchet.ParentCondition(waitForSleep, \"output.random_number > 50\")),\n)\n\nrightBranch := workflow.NewTask(\"right-branch\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n},\n\thatchet.WithParents(waitForSleep),\n\thatchet.WithSkipIf(hatchet.ParentCondition(waitForSleep, \"output.random_number <= 50\")),\n)\n```\n\n#### Ruby\n\n```ruby\nLEFT_BRANCH = TASK_CONDITION_WORKFLOW.task(\n  :left_branch,\n  parents: [WAIT_FOR_SLEEP],\n  skip_if: [\n    Hatchet::ParentCondition.new(\n      parent: WAIT_FOR_SLEEP,\n      expression: \"output.random_number > 50\"\n    )\n  ]\n) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n\nRIGHT_BRANCH = TASK_CONDITION_WORKFLOW.task(\n  :right_branch,\n  parents: [WAIT_FOR_SLEEP],\n  skip_if: [\n    Hatchet::ParentCondition.new(\n      parent: WAIT_FOR_SLEEP,\n      expression: \"output.random_number <= 50\"\n    )\n  ]\n) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```\n\nThese two tasks check whether the output of the base task was greater or less than `50`, respectively. Only one of the two will run per workflow execution.\n\n### Checking if a task was skipped\n\nDownstream tasks can check whether a parent was skipped using `ctx.was_skipped`:\n\n#### Python\n\n```python\n@task_condition_workflow.task(\n    parents=[\n        start,\n        wait_for_sleep,\n        wait_for_event,\n        skip_on_event,\n        left_branch,\n        right_branch,\n    ],\n)\ndef sum(input: EmptyModel, ctx: Context) -> RandomSum:\n    one = ctx.task_output(start).random_number\n    two = ctx.task_output(wait_for_event).random_number\n    three = ctx.task_output(wait_for_sleep).random_number\n    four = (\n        ctx.task_output(skip_on_event).random_number\n        if not ctx.was_skipped(skip_on_event)\n        else 0\n    )\n\n    five = (\n        ctx.task_output(left_branch).random_number\n        if not ctx.was_skipped(left_branch)\n        else 0\n    )\n    six = (\n        ctx.task_output(right_branch).random_number\n        if not ctx.was_skipped(right_branch)\n        else 0\n    )\n\n    return RandomSum(sum=one + two + three + four + five + six)\n```\n\n#### Typescript\n\n```typescript\ntaskConditionWorkflow.task({\n  name: 'sum',\n  parents: [start, waitForSleep, waitForEvent, skipOnEvent, leftBranch, rightBranch],\n  fn: async (_, ctx: Context<any, any>) => {\n    const one = (await ctx.parentOutput(start)).randomNumber;\n    const two = (await ctx.parentOutput(waitForEvent)).randomNumber;\n    const three = (await ctx.parentOutput(waitForSleep)).randomNumber;\n    const four = (await ctx.parentOutput(skipOnEvent))?.randomNumber || 0;\n    const five = (await ctx.parentOutput(leftBranch))?.randomNumber || 0;\n    const six = (await ctx.parentOutput(rightBranch))?.randomNumber || 0;\n\n    return {\n      sum: one + two + three + four + five + six,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\n_ = workflow.NewTask(\"sum\", func(ctx hatchet.Context, _ any) (RandomSum, error) {\n\tvar startOut StepOutput\n\terr := ctx.ParentOutput(start, &startOut)\n\tif err != nil {\n\t\treturn RandomSum{}, err\n\t}\n\n\tvar waitForEventOut StepOutput\n\terr = ctx.ParentOutput(waitForEvent, &waitForEventOut)\n\tif err != nil {\n\t\treturn RandomSum{}, err\n\t}\n\n\tvar waitForSleepOut StepOutput\n\terr = ctx.ParentOutput(waitForSleep, &waitForSleepOut)\n\tif err != nil {\n\t\treturn RandomSum{}, err\n\t}\n\n\ttotal := startOut.RandomNumber + waitForEventOut.RandomNumber + waitForSleepOut.RandomNumber\n\n\tif !ctx.WasSkipped(skipOnEvent) {\n\t\tvar out StepOutput\n\t\terr = ctx.ParentOutput(skipOnEvent, &out)\n\t\tif err == nil {\n\t\t\ttotal += out.RandomNumber\n\t\t}\n\t}\n\n\tif !ctx.WasSkipped(leftBranch) {\n\t\tvar out StepOutput\n\t\terr = ctx.ParentOutput(leftBranch, &out)\n\t\tif err == nil {\n\t\t\ttotal += out.RandomNumber\n\t\t}\n\t}\n\n\tif !ctx.WasSkipped(rightBranch) {\n\t\tvar out StepOutput\n\t\terr = ctx.ParentOutput(rightBranch, &out)\n\t\tif err == nil {\n\t\t\ttotal += out.RandomNumber\n\t\t}\n\t}\n\n\treturn RandomSum{Sum: total}, nil\n}, hatchet.WithParents(\n\tstart,\n\twaitForSleep,\n\twaitForEvent,\n\tskipOnEvent,\n\tleftBranch,\n\trightBranch,\n))\n```\n\n#### Ruby\n\n```ruby\nTASK_CONDITION_WORKFLOW.task(\n  :sum,\n  parents: [COND_START, WAIT_FOR_SLEEP, WAIT_FOR_EVENT, SKIP_ON_EVENT, LEFT_BRANCH, RIGHT_BRANCH]\n) do |input, ctx|\n  one = ctx.task_output(COND_START)[\"random_number\"]\n  two = ctx.task_output(WAIT_FOR_EVENT)[\"random_number\"]\n  three = ctx.task_output(WAIT_FOR_SLEEP)[\"random_number\"]\n  four = ctx.was_skipped?(SKIP_ON_EVENT) ? 0 : ctx.task_output(SKIP_ON_EVENT)[\"random_number\"]\n  five = ctx.was_skipped?(LEFT_BRANCH) ? 0 : ctx.task_output(LEFT_BRANCH)[\"random_number\"]\n  six = ctx.was_skipped?(RIGHT_BRANCH) ? 0 : ctx.task_output(RIGHT_BRANCH)[\"random_number\"]\n\n  { \"sum\" => one + two + three + four + five + six }\nend\n```"},"118":{"title":"Or Groups","pageTitle":"Conditions & Branching","pageRoute":"hatchet://docs/v1/conditions","content":"DAG tasks can declare multiple conditions that work together to control when and whether a task runs. Conditions of different types (parent conditions, [event conditions](/v1/events), and [sleep conditions](/v1/sleep)) can be mixed on a single task using **or groups**.\n\nAn **or group** is a set of conditions combined with an `Or` operator. The group evaluates to `True` if **at least one** of its conditions is satisfied. Multiple or groups on the same task are combined with `AND`, so every group must have at least one satisfied condition for the task to proceed.\n\nThis lets you express arbitrarily complex sets of conditions in [conjunctive normal form](https://en.wikipedia.org/wiki/Conjunctive_normal_form) (CNF).\n\n### Sleep + Event example\n\nThe most common combination is a sleep condition with an event condition: proceed when an external signal arrives _or_ after a timeout (whichever comes first). This is ideal for human-in-the-loop workflows where you want a deadline.\n\n#### Python\n\n```python\n@task_condition_workflow.task(\n    parents=[start],\n    wait_for=[\n        or_(\n            SleepCondition(duration=timedelta(minutes=1)),\n            UserEventCondition(event_key=\"wait_for_event:start\"),\n        )\n    ],\n)\ndef wait_for_event(input: EmptyModel, ctx: Context) -> StepOutput:\n    return StepOutput(random_number=random.randint(1, 100))\n```\n\n`or_()` wraps a `SleepCondition` and a `UserEventCondition` into a single or group. The task will start as soon as either the sleep expires or the event arrives.\n\n#### Typescript\n\n```typescript\nconst waitForEvent = taskConditionWorkflow.task({\n  name: 'waitForEvent',\n  parents: [start],\n  waitFor: [Or(new SleepCondition('1m'), new UserEventCondition('wait_for_event:start', 'true'))],\n  fn: () => {\n    return {\n      randomNumber: Math.floor(Math.random() * 100) + 1,\n    };\n  },\n});\n```\n\n`Or()` wraps a `SleepCondition` and a `UserEventCondition` into a single or group. The task will start as soon as either the sleep expires or the event arrives.\n\n#### Go\n\n```go\nwaitForEvent := workflow.NewTask(\"wait-for-event\", func(ctx hatchet.Context, _ any) (StepOutput, error) {\n\treturn StepOutput{RandomNumber: rand.Intn(100) + 1}, nil //nolint:gosec\n},\n\thatchet.WithParents(start),\n\thatchet.WithWaitFor(hatchet.OrCondition(\n\t\thatchet.SleepCondition(1*time.Minute),\n\t\thatchet.UserEventCondition(\"wait_for_event:start\", \"\"),\n\t)),\n)\n```\n\n`hatchet.WithWaitFor` and `hatchet.WithSkipIf` attach conditions to the task. The task will wait for the sleep to expire before starting, and will be skipped if the event arrives.\n\n#### Ruby\n\n```ruby\nWAIT_FOR_EVENT = TASK_CONDITION_WORKFLOW.task(\n  :wait_for_event,\n  parents: [COND_START],\n  wait_for: [\n    Hatchet.or_(\n      Hatchet::SleepCondition.new(60),\n      Hatchet::UserEventCondition.new(event_key: \"wait_for_event:start\")\n    )\n  ]\n) do |input, ctx|\n  { \"random_number\" => rand(1..100) }\nend\n```\n\n`Hatchet.or_()` wraps a `SleepCondition` and a `UserEventCondition` into a single or group. The task will start as soon as either the sleep expires or the event arrives.\n\n### Multiple or groups\n\nFor more complex logic, you can declare multiple or groups on a single task. Consider three conditions:\n\n- **Condition A**: Parent output is greater than 50\n- **Condition B**: Sleep for 30 seconds\n- **Condition C**: Receive the `payment:processed` event\n\nTo proceed if (A _or_ B) **and** (A _or_ C), declare two or groups:\n\n1. Group 1: `A or B`\n2. Group 2: `A or C`\n\nThe task will run once both groups are satisfied. If A is true, both groups pass immediately. If A is false, the task needs both B (sleep expires) and C (event arrives).\n\n### Common combinations\n\nCombination, Use case\n\nSleep + Event, Proceed after a timeout _or_ when an external signal arrives (whichever comes first)\nParent + Event, Proceed if a parent output meets a threshold _or_ a manual override event arrives\nParent + Sleep, Proceed if a parent indicates readiness _or_ after a maximum wait time\nAll three, Complex gates combining data-driven, time-based, and event-driven conditions"},"119":{"title":"Error Handling","pageTitle":"Error Handling","pageRoute":"hatchet://docs/v1/on-failure","content":"# Error Handling\n\nWhen a task fails, you need a way to run cleanup logic, send notifications, or trigger compensating actions. Both durable tasks and DAGs support error handling, but the mechanism differs: durable tasks use standard try/catch blocks, while DAGs declare a special on-failure task.\n\n#### Durable Tasks"},"120":{"title":"Try/Catch in Durable Tasks","pageTitle":"Error Handling","pageRoute":"hatchet://docs/v1/on-failure","content":"Durable tasks are regular functions, so you handle errors with your language's native error handling (`try`/`except` in Python, `try`/`catch` in TypeScript/Go). This gives you full control over what happens when a child task or operation fails.\n\n### Handling child task errors\n\nWhen spawning child tasks, wrap the call in a try/catch block to handle failures gracefully:\n\n#### Python\n\n```python\ntry:\n    child_wf.run(\n        ChildInput(a=\"b\"),\n    )\nexcept Exception as e:\n    print(f\"Child workflow failed: {e}\")\n```\n\n#### Typescript\n\n```typescript\nexport const withErrorHandling = hatchet.task({\n  name: 'parent-error-handling',\n  fn: async () => {\n    try {\n      const childRes = await child.run({ N: 1 });\n\n      return {\n        Result: childRes.Value,\n      };\n    } catch (error) {\n      // decide how to proceed here\n      return {\n        Result: -1,\n      };\n    }\n  },\n});\n```\n\n#### Go\n\n```go\nresult, err := childWorkflow.Run(hCtx, ChildInput{Value: 1})\nif err != nil {\n\t// Handle error from child workflow\n\tfmt.Printf(\"Child workflow failed: %v\\n\", err)\n\t// Decide how to proceed - retry, skip, or fail the parent\n}\n```\n\n#### Ruby\n\n```ruby\nbegin\n  FANOUT_CHILD_WF.run({ \"a\" => \"b\" })\nrescue StandardError => e\n  puts \"Child workflow failed: #{e.message}\"\nend\n```\n\n### Common patterns\n\n- **Retry with backoff** — Catch the error, sleep, and retry the child task.\n- **Fallback logic** — If a primary path fails, spawn a different child task as a fallback.\n- **Partial failure handling** — In a fan-out, collect results from successful children and handle failures individually rather than failing the entire workflow.\n- **Cleanup** — Release resources, cancel in-progress work, or notify external systems.\n\n#### DAGs"},"121":{"title":"On-Failure Tasks","pageTitle":"Error Handling","pageRoute":"hatchet://docs/v1/on-failure","content":"The on-failure task is a special task that runs when any task in the workflow fails. It lets you handle errors, perform cleanup, or trigger notifications declaratively as part of the workflow definition.\n\n### Defining an on-failure task\n\nYou can define an on-failure task on your workflow the same as you'd define any other task:\n\n#### Python\n\n```python\n# This workflow will fail because the step will throw an error\n# we define an onFailure step to handle this case\n\non_failure_wf = hatchet.workflow(name=\"OnFailureWorkflow\")\n\n\n@on_failure_wf.task(execution_timeout=timedelta(seconds=1))\ndef step1(input: EmptyModel, ctx: Context) -> None:\n    # 👀 this step will always raise an exception\n    raise Exception(ERROR_TEXT)\n\n\n# 👀 After the workflow fails, this special step will run\n@on_failure_wf.on_failure_task()\ndef on_failure(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    # 👀 we can do things like perform cleanup logic\n    # or notify a user here\n\n    # 👀 Fetch the errors from upstream step runs from the context\n    print(ctx.task_run_errors)\n\n    return {\"status\": \"success\"}\n```\n\nNote: Only one on-failure task can be defined per workflow.\n\n\n#### Typescript\n\n```typescript\n// This workflow will fail because `step1` throws. We define an `onFailure` handler to run cleanup.\nexport const failureWorkflow = hatchet.workflow({\n  name: 'on-failure-workflow',\n});\n\nfailureWorkflow.task({\n  name: 'step1',\n  executionTimeout: '1s',\n  fn: async () => {\n    throw new Error(ERROR_TEXT);\n  },\n});\n\n// 👀 After the workflow fails, this special step will run\nfailureWorkflow.onFailure({\n  name: 'on_failure',\n  fn: async (_input, ctx) => {\n    console.log('onFailure for run:', ctx.workflowRunId());\n    console.log('upstream errors:', ctx.errors());\n\n    return {\n      status: 'success',\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nmultiStepWorkflow.OnFailure(func(ctx hatchet.Context, input FailureInput) (FailureHandlerOutput, error) {\n\tlog.Printf(\"Multi-step failure handler called for input: %s\", input.Message)\n\n\tstepErrors := ctx.StepRunErrors()\n\tvar errorDetails string\n\tfor stepName, errorMsg := range stepErrors {\n\t\tlog.Printf(\"Multi-step: Step '%s' failed with error: %s\", stepName, errorMsg)\n\t\terrorDetails += stepName + \": \" + errorMsg + \"; \"\n\t}\n\n\t// Access successful step outputs for cleanup\n\tvar step1Output TaskOutput\n\tif err := ctx.StepOutput(\"first-step\", &step1Output); err == nil {\n\t\tlog.Printf(\"First step completed successfully with: %s\", step1Output.Message)\n\t}\n\n\treturn FailureHandlerOutput{\n\t\tFailureHandled: true,\n\t\tErrorDetails:   \"Multi-step workflow failed: \" + errorDetails,\n\t\tOriginalInput:  input.Message,\n\t}, nil\n})\n```\n\n#### Ruby\n\n```ruby\n# This workflow will fail because the step will throw an error\n# we define an onFailure step to handle this case\n\nON_FAILURE_WF = HATCHET.workflow(name: \"OnFailureWorkflow\")\n\nON_FAILURE_WF.task(:step1, execution_timeout: 1) do |input, ctx|\n  # This step will always raise an exception\n  raise ERROR_TEXT\nend\n\n# After the workflow fails, this special step will run\nON_FAILURE_WF.on_failure_task do |input, ctx|\n  # We can do things like perform cleanup logic\n  # or notify a user here\n\n  # Fetch the errors from upstream step runs from the context\n  puts ctx.task_run_errors.inspect\n\n  { \"status\" => \"success\" }\nend\n```\n\nThe on-failure task will be executed only if any of the main tasks in the workflow fail.\n\n### Use cases\n\n- Performing cleanup tasks after a task failure in a workflow\n- Sending notifications or alerts about the failure\n- Logging additional information for debugging purposes\n- Triggering a compensating action or a fallback task"},"122":{"title":"Resource Management","pageTitle":"Resource Management","pageRoute":"hatchet://docs/v1/task-eviction","content":"# Resource Management During Waits\n\nWhen a task needs to wait (for time, an event, or child results), how does Hatchet handle the worker slot? The answer depends on which pattern you're using.\n\n#### Durable Tasks"},"123":{"title":"Task Eviction","pageTitle":"Resource Management","pageRoute":"hatchet://docs/v1/task-eviction","content":"When a durable task enters a wait, whether from `SleepFor`, `WaitForEvent`, or `WaitFor`, Hatchet **evicts** the task from the worker. The worker slot is released, the task's progress is persisted in the durable event log, and the task does not consume slots or hold resources while it is idle.\n\nThis is what makes durable tasks fundamentally different from regular tasks: a regular task consumes a slot for the entire duration of execution, even if it's just sleeping. A durable task gives the slot back the moment it starts waiting.\n\n### How eviction works\n\n```mermaid\ngraph LR\n    QUEUED -->|Assigned to worker| RUNNING\n    RUNNING -->|Hits SleepFor / WaitForEvent| EVICTED\n    EVICTED -->|Wait completes or event arrives| QUEUED\n```\n\n1. **Task reaches a wait.** The durable task calls `SleepFor`, `WaitForEvent`, or `WaitFor`.\n2. **Checkpoint is written.** Hatchet records the current progress in the durable event log.\n3. **Worker slot is freed.** The task is evicted from the worker. The slot is immediately available for other tasks.\n4. **Wait completes.** When the sleep expires or the expected event arrives, Hatchet re-queues the task.\n5. **Task resumes on any available worker.** A worker picks up the task, replays the event log to the last checkpoint, and continues execution from where it left off.\n\nThe resumed task does not need to run on the same worker that originally started it. Any worker that has registered the task can pick it up.\n\n### Why eviction matters\n\nWithout eviction, a task that sleeps for 24 hours would consume a slot for the entire duration, wasting capacity that could be running other work. With eviction, the slot is freed immediately.\n\nThis is especially important for:\n\n- **Long waits** — Tasks that sleep for hours or days should not hold slots.\n- **Human-in-the-loop** — Waiting for a human to approve or respond could take minutes or weeks. Eviction ensures no resources are held in the meantime.\n- **Large fan-outs** — A parent task that spawns thousands of children and waits for results can release its slot while the children run, preventing deadlocks where the parent holds resources that the children need.\n\n### Separate slot pools\n\nDurable tasks consume slots from a **separate slot pool** than regular tasks. This prevents a common deadlock: if durable and regular tasks shared the same pool, a durable task waiting on child tasks could hold the very slot those children need to execute.\n\nBy isolating slot pools, Hatchet ensures that durable tasks waiting on children never starve the workers that need to run those children.\n\n### Eviction and determinism\n\nBecause a task may be evicted and resumed on a different worker at any time, the code between checkpoints must be [deterministic](/v1/patterns/mixing-patterns#determinism-in-durable-tasks). On resume, Hatchet replays the event log; it does not re-execute completed operations. If the code has changed between the original run and the replay, the checkpoint sequence may not match, leading to unexpected behavior.\n\n#### DAGs"},"124":{"title":"No Eviction Needed","pageTitle":"Resource Management","pageRoute":"hatchet://docs/v1/task-eviction","content":"DAG tasks do not require eviction because they are **never assigned to a worker until they can actually run**. A worker slot is only allocated when all of the task's conditions are met: parent tasks have completed, sleep durations have elapsed, and expected events have arrived.\n\nThis means resources are only consumed during active execution, never during waits.\n\n### How DAG scheduling works\n\n```mermaid\ngraph LR\n    PENDING -->|\"All conditions met (parents, sleep, events)\"| QUEUED\n    QUEUED -->|Assigned to worker| RUNNING\n    RUNNING -->|Completes| COMPLETED\n```\n\n1. **Task is pending.** The task exists in the workflow but is not queued. No worker slot is allocated. No resources are consumed.\n2. **Conditions are met.** All parent tasks have completed, any sleep duration has elapsed, and any required events have arrived.\n3. **Task is queued.** Only now does Hatchet place the task in the queue for worker assignment.\n4. **Task runs to completion.** A worker picks up the task, executes it, and the slot is freed.\n\n### Why this matters\n\nBecause DAG tasks are only scheduled when ready, there is no wasted capacity:\n\n- **Sleep conditions** — A task that waits 24 hours after its parent completes does not hold a slot. It sits in a pending state until the timer expires, then gets queued.\n- **Event conditions** — A task waiting for an external event consumes no resources. When the event arrives, the task is queued and assigned a slot.\n- **Parent dependencies** — Tasks waiting on upstream results are not queued until those results are available.\n\nThis is one of the advantages of DAGs: the scheduling model is simpler. You declare the conditions upfront, and Hatchet handles the timing. There is no eviction, no checkpointing, and no replay, because the task never starts until it's ready to run straight through.\n\n> **Info:** If you need a task to start running and then pause partway through (for\n>   example, to wait for an event based on intermediate results), use a [durable\n>   task](/v1/patterns/durable-task-execution) instead. DAG tasks run from start\n>   to finish once scheduled."},"125":{"title":"Running with Docker","pageTitle":"Running with Docker","pageRoute":"hatchet://docs/v1/docker","content":"# Dockerizing Hatchet Applications\n\nThis guide explains how to create Dockerfiles for Hatchet applications. There are examples for Python, TypeScript, Go, and Ruby applications here."},"126":{"title":"Entrypoint Configuration for Hatchet","pageTitle":"Running with Docker","pageRoute":"hatchet://docs/v1/docker","content":"Before creating your Dockerfile, understand that Hatchet workers require specific entry point configuration:\n\n1. The entry point must run code that runs the Hatchet worker. This can be done by calling the `worker.start()` method in your respective SDK.\n2. Proper environment variables must be set for Hatchet SDK\n3. The worker should be configured to handle your workflows using the `worker.register` method or by passing workflows into the worker constructor or factory."},"127":{"title":"Example Dockerfiles","pageTitle":"Running with Docker","pageRoute":"hatchet://docs/v1/docker","content":"#### Python - Poetry\n\n```dockerfile\nFROM python:3.13-slim\n\nENV PYTHONUNBUFFERED=1 \\\n POETRY_VERSION=1.4.2 \\\n HATCHET_ENV=production\n\n# Install system dependencies and Poetry\nRUN apt-get update && \\\n apt-get install -y curl && \\\n curl -sSL https://install.python-poetry.org | python3 - && \\\n ln -s /root/.local/bin/poetry /usr/local/bin/poetry && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/\\*\n\nWORKDIR /app\n\nCOPY pyproject.toml poetry.lock\\* /app/\n\nRUN poetry config virtualenvs.create false && \\\n poetry install --no-interaction --no-ansi\n\nCOPY . /app\n\nCMD [\"poetry\", \"run\", \"python\", \"worker.py\"]\n```\n\n> **Info:** If you're using a poetry script to run your worker, you can replace `poetry run python worker.py` with `poetry run <script-name>` in the CMD.\n\n#### Python - pip\n\n```dockerfile\nFROM python:3.13-slim\n\nENV PYTHONUNBUFFERED=1 \\\n HATCHET_ENV=production\n\nWORKDIR /app\n\nCOPY requirements.txt .\n\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY . /app\n\nCMD [\"python\", \"worker.py\"]\n```\n\n#### JavaScript - npm\n\n```dockerfile\n# Stage 1: Build\nFROM node:18 AS builder\n\nWORKDIR /app\n\nCOPY package\\*.json ./\n\nRUN npm ci\n\nCOPY . .\n\nRUN npm run build\n\n# Stage 2: Production\nFROM node:22-alpine\n\nWORKDIR /app\n\nCOPY package\\*.json ./\n\nRUN npm ci --omit=dev\n\nCOPY --from=builder /app/dist ./dist\n\nENV NODE_ENV=production\n\nCMD [\"node\", \"dist/worker.js\"]\n```\n\n> **Info:** Use `npm ci` instead of `npm install` for more reliable builds. It's faster and ensures consistent installs across environments.\n\n#### JavaScript - pnpm\n\n```dockerfile\n# Stage 1: Build\nFROM node:18 AS builder\n\nWORKDIR /app\n\n# Install pnpm\nRUN npm install -g pnpm\n\nCOPY pnpm-lock.yaml package.json ./\n\nRUN pnpm install --frozen-lockfile\n\nCOPY . .\n\nRUN pnpm build\n\n# Stage 2: Production\nFROM node:22-alpine\n\nWORKDIR /app\n\nRUN npm install -g pnpm\n\nCOPY pnpm-lock.yaml package.json ./\n\nRUN pnpm install --frozen-lockfile --prod\n\nCOPY --from=builder /app/dist ./dist\n\nENV NODE_ENV=production\n\nCMD [\"node\", \"dist/worker.js\"]\n```\n\n> **Info:** PNPM's `--frozen-lockfile` flag ensures consistent installs and fails if an update is needed.\n\n#### JavaScript - yarn\n\n```dockerfile\n# Stage 1: Build\nFROM node:18 AS builder\n\nWORKDIR /app\n\nCOPY package.json yarn.lock ./\n\nRUN yarn install --frozen-lockfile\n\nCOPY . .\n\nRUN yarn build\n\n# Stage 2: Production\nFROM node:22-alpine\n\nWORKDIR /app\n\nCOPY package.json yarn.lock ./\n\nRUN yarn install --frozen-lockfile --production\n\nCOPY --from=builder /app/dist ./dist\n\nENV NODE_ENV=production\n\nCMD [\"node\", \"dist/worker.js\"]\n\n```\n\n> **Info:** Yarn's `--frozen-lockfile` ensures your dependencies match the lock file exactly.\n\n#### Go\n\n```dockerfile\n# Stage 1: Build\nFROM golang:1.25-alpine3.21 AS builder\n\nWORKDIR /app\n\nCOPY . .\n\nRUN go mod download\n\nRUN go build -o hatchet-worker .\n\n# Stage 2: Production\n\nFROM golang:1.25-alpine3.21\n\nWORKDIR /app\n\nCOPY --from=builder hatchet-worker .\n\nCMD [\"/app/hatchet-worker\"]\n\n```\n\n#### Ruby\n\n```dockerfile\nFROM ruby:3.3-slim\n\nENV HATCHET_ENV=production\n\n# Install system dependencies for native gems\n\nRUN apt-get update && \\\n apt-get install -y build-essential && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/\\*\n\nWORKDIR /app\n\nCOPY Gemfile Gemfile.lock ./\n\nRUN bundle config set --local without 'development test' && \\\n bundle install\n\nCOPY . /app\n\nCMD [\"bundle\", \"exec\", \"ruby\", \"worker.rb\"]\n\n```\n\n> **Info:** If you're using a Rake task or binstub to start your worker, replace the CMD with the appropriate command, e.g. `CMD [\"bundle\", \"exec\", \"rake\", \"hatchet:worker\"]`.\n```"},"128":{"title":"Autoscaling Workers","pageTitle":"Autoscaling Workers","pageRoute":"hatchet://docs/v1/autoscaling-workers","content":"# Autoscaling Workers\n\nHatchet provides a Task Stats API that enables you to implement autoscaling for your worker pools. By querying real-time queue depths and task distribution, you can dynamically scale workers based on actual workload demand."},"129":{"title":"Task Stats API","pageTitle":"Autoscaling Workers","pageRoute":"hatchet://docs/v1/autoscaling-workers","content":"The Task Stats endpoint returns current statistics for queued and running tasks across your tenant, broken down by task name, queue, and concurrency group.\n\n### Endpoint\n\n```\nGET /api/v1/tenants/{tenantId}/task-stats\n```\n\n### Authentication\n\nThe endpoint requires Bearer token authentication using a valid API token:\n\n```\nAuthorization: Bearer \n```\n\n### Response Format\n\nThe response is a JSON object keyed by task name, with each task containing statistics for queued and running states:\n\n```json\n{\n  \"my-task\": {\n    \"queued\": {\n      \"total\": 150,\n      \"queues\": {\n        \"my-task:default\": 100,\n        \"my-task:priority\": 50\n      },\n      \"concurrency\": [\n        {\n          \"expression\": \"input.user_id\",\n          \"type\": \"GROUP_ROUND_ROBIN\",\n          \"keys\": {\n            \"user-123\": 10,\n            \"user-456\": 15\n          }\n        }\n      ],\n      \"oldest\": \"2024-01-15T10:30:00Z\"\n    },\n    \"running\": {\n      \"total\": 25,\n      \"oldest\": \"2024-01-15T10:25:00Z\",\n      \"concurrency\": []\n    }\n  }\n}\n```\n\nEach task stat includes:\n\n- **total**: The total count of tasks in this state\n- **concurrency**: Distribution across concurrency groups (if concurrency limits are configured)\n- **oldest**: Timestamp of the oldest task in the specified state\n\nThese are available only for `queued` tasks:\n\n- **queues**: A breakdown of task counts by queue name\n\n### Example Usage\n\n```bash\ncurl -H \"Authorization: Bearer your-api-token-here\" \\\n  https://cloud.onhatchet.run/api/v1/tenants/707d0855-80ab-4e1f-a156-f1c4546cbf52/task-stats\n```"},"130":{"title":"Autoscaling with KEDA","pageTitle":"Autoscaling Workers","pageRoute":"hatchet://docs/v1/autoscaling-workers","content":"[KEDA](https://keda.sh) (Kubernetes Event-driven Autoscaling) can use the Task Stats API to automatically scale your worker deployments based on queue depth.\n\n### Setting Up a KEDA ScaledObject\n\nCreate a `ScaledObject` that queries the Hatchet Task Stats API and scales your worker deployment based on the number of queued tasks:\n\n```yaml\napiVersion: keda.sh/v1alpha1\nkind: ScaledObject\nmetadata:\n  name: hatchet-worker-scaler\nspec:\n  scaleTargetRef:\n    name: hatchet-worker\n  minReplicaCount: 1\n  maxReplicaCount: 10\n  triggers:\n    - type: metrics-api\n      metadata:\n        targetValue: \"100\"\n        url: \"https://cloud.onhatchet.run/api/v1/tenants/YOUR_TENANT_ID/task-stats\"\n        valueLocation: \"my-task.queued.total\"\n        authMode: \"bearer\"\n      authenticationRef:\n        name: hatchet-api-token\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: hatchet-api-token\ntype: Opaque\nstringData:\n  token: \"your-api-token-here\"\n---\napiVersion: keda.sh/v1alpha1\nkind: TriggerAuthentication\nmetadata:\n  name: hatchet-api-token\nspec:\n  secretTargetRef:\n    - parameter: token\n      name: hatchet-api-token\n      key: token\n```\n\n> **Info:** The `valueLocation` field uses JSONPath-style notation to extract a specific\n>   value from the response. Adjust `my-task` to match your actual task name.\n\n### Scaling Based on Multiple Tasks\n\nIf you have multiple task types handled by the same worker, you can create multiple triggers or use a custom metrics endpoint that aggregates the totals:\n\n```yaml\ntriggers:\n  - type: metrics-api\n    metadata:\n      targetValue: \"50\"\n      url: \"https://cloud.onhatchet.run/api/v1/tenants/YOUR_TENANT_ID/task-stats\"\n      valueLocation: \"task-a.queued.total\"\n      authMode: \"bearer\"\n    authenticationRef:\n      name: hatchet-api-token\n  - type: metrics-api\n    metadata:\n      targetValue: \"50\"\n      url: \"https://cloud.onhatchet.run/api/v1/tenants/YOUR_TENANT_ID/task-stats\"\n      valueLocation: \"task-b.queued.total\"\n      authMode: \"bearer\"\n    authenticationRef:\n      name: hatchet-api-token\n```"},"131":{"title":"Sticky Assignment","pageTitle":"Sticky Assignment","pageRoute":"hatchet://docs/v1/advanced-assignment/sticky-assignment","content":"# Sticky Worker Assignment (Beta)\n\n> **Info:** This feature is currently in beta and may be subject to change.\n\nSticky assignment is a task property that allows you to specify that all child tasks should be assigned to the same worker for the duration of its execution. This can be useful in situations like when you need to maintain expensive local memory state across multiple tasks in a workflow or ensure that certain tasks are processed by the same worker for consistency.\n\n> **Warning:** This feature is only compatible with long lived workers, and not webhook\n>   workers."},"132":{"title":"Setting Sticky Assignment","pageTitle":"Sticky Assignment","pageRoute":"hatchet://docs/v1/advanced-assignment/sticky-assignment","content":"Sticky assignment is set on the task level by adding the `sticky` property to the task definition. When a task is marked as sticky, all steps within that task will be assigned to the same worker for the duration of the task execution.\n\n> **Warning:** While sticky assignment can be useful in certain scenarios, it can also\n>   introduce potential bottlenecks if the assigned worker becomes unavailable, or\n>   if local state is not maintained when the job is picked up. Be sure to\n>   consider the implications of sticky assignment when designing your tasks and\n>   have a plan in place to handle local state issues.\n\nThere are two strategies for setting sticky assignment for [DAG](./dags.mdx) workflows:\n\n- `SOFT`: All tasks in the workflow will attempt to be assigned to the same worker, but if that worker is unavailable, it will be assigned to another worker.\n- `HARD`: All taks in the workflow will only be assigned to the same worker. If that worker is unavailable, the workflow run will not be assigned to another worker and will remain in a pending state until the original worker becomes available or timeout is reached. (See [Scheduling Timeouts](./timeouts.mdx#task-level-timeouts))\n\n#### Ruby\n\n```python\nsticky_workflow = hatchet.workflow(\n    name=\"StickyWorkflow\",\n    # 👀 Specify a sticky strategy when declaring the workflow\n    sticky=StickyStrategy.SOFT,\n)\n\n\n@sticky_workflow.task()\ndef step1a(input: EmptyModel, ctx: Context) -> dict[str, str | None]:\n    return {\"worker\": ctx.worker_id}\n\n\n@sticky_workflow.task()\ndef step1b(input: EmptyModel, ctx: Context) -> dict[str, str | None]:\n    return {\"worker\": ctx.worker_id}\n```\n\n#### Tab 2\n\n```typescript\nexport const sticky = hatchet.task({\n  name: 'sticky',\n  retries: 3,\n  sticky: StickyStrategy.SOFT,\n  fn: async (_, ctx) => {\n    // specify a child workflow to run on the same worker\n    const result = await child.run(\n      {\n        N: 1,\n      },\n      { sticky: true }\n    );\n\n    return {\n      result,\n    };\n  },\n});\n```\n\n#### Tab 3\n\n```go\nfunc StickyDag(client *hatchet.Client) *hatchet.Workflow {\n\tstickyDag := client.NewWorkflow(\"sticky-dag\",\n\t\thatchet.WithWorkflowStickyStrategy(types.StickyStrategy_SOFT),\n\t)\n\n\t_ = stickyDag.NewTask(\"sticky-task\",\n\t\tfunc(ctx worker.HatchetContext, input StickyInput) (interface{}, error) {\n\t\t\tworkerId := ctx.Worker().ID()\n\n\t\t\treturn &StickyResult{\n\t\t\t\tResult: workerId,\n\t\t\t}, nil\n\t\t},\n\t)\n\n\t_ = stickyDag.NewTask(\"sticky-task-2\",\n\t\tfunc(ctx worker.HatchetContext, input StickyInput) (interface{}, error) {\n\t\t\tworkerId := ctx.Worker().ID()\n\n\t\t\treturn &StickyResult{\n\t\t\t\tResult: workerId,\n\t\t\t}, nil\n\t\t},\n\t)\n\n\treturn stickyDag\n}\n```\n\n#### Tab 4\n\n```ruby\nSTICKY_WORKFLOW = HATCHET.workflow(\n  name: \"StickyWorkflow\",\n  # Specify a sticky strategy when declaring the workflow\n  sticky: :soft\n)\n\nSTEP1A = STICKY_WORKFLOW.task(:step1a) do |input, ctx|\n  { \"worker\" => ctx.worker.id }\nend\n\nSTEP1B = STICKY_WORKFLOW.task(:step1b) do |input, ctx|\n  { \"worker\" => ctx.worker.id }\nend\n```\n\nIn this example, the `sticky` property is set to `SOFT`, which means that the task will attempt to be assigned to the same worker for the duration of its execution. If the original worker is unavailable, the task will be assigned to another worker."},"133":{"title":"Sticky Child Tasks","pageTitle":"Sticky Assignment","pageRoute":"hatchet://docs/v1/advanced-assignment/sticky-assignment","content":"It is possible to spawn child tasks on the same worker as the parent task by setting the `sticky` property to `true` in the `run` method options. This can be useful when you need to maintain local state across multiple tasks or ensure that child tasks are processed by the same worker for consistency.\n\nHowever, the child task must:\n\n1. Specify a `sticky` strategy in the child task's definition\n2. Be registered with the same worker as the parent task\n\nIf either condition is not met, an error will be thrown when the child task is spawned.\n\n#### Ruby\n\n```python\nsticky_child_workflow = hatchet.workflow(\n    name=\"StickyChildWorkflow\", sticky=StickyStrategy.SOFT\n)\n\n\n@sticky_workflow.task(parents=[step1a, step1b])\nasync def step2(input: EmptyModel, ctx: Context) -> dict[str, str | None]:\n    ref = await sticky_child_workflow.aio_run(\n        sticky=True,\n        wait_for_result=False,\n    )\n\n    await ref.aio_result()\n\n    return {\"worker\": ctx.worker_id}\n\n\n@sticky_child_workflow.task()\ndef child(input: EmptyModel, ctx: Context) -> dict[str, str | None]:\n    return {\"worker\": ctx.worker_id}\n```\n\n#### Tab 2\n\n```typescript\nexport const sticky = hatchet.task({\n  name: 'sticky',\n  retries: 3,\n  sticky: StickyStrategy.SOFT,\n  fn: async (_, ctx) => {\n    // specify a child workflow to run on the same worker\n    const result = await child.run(\n      {\n        N: 1,\n      },\n      { sticky: true }\n    );\n\n    return {\n      result,\n    };\n  },\n});\n```\n\n#### Tab 3\n\n```go\nfunc Sticky(client *hatchet.Client) *hatchet.StandaloneTask {\n\tsticky := client.NewStandaloneTask(\"sticky-task\",\n\t\tfunc(ctx worker.HatchetContext, input StickyInput) (*StickyResult, error) {\n\t\t\t// Run a child workflow on the same worker\n\t\t\tchildWorkflow := Child(client)\n\t\t\tchildResult, err := childWorkflow.Run(ctx, ChildInput{N: 1}, hatchet.WithRunSticky(true))\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvar childOutput ChildResult\n\t\t\terr = childResult.Into(&childOutput)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &StickyResult{\n\t\t\t\tResult: fmt.Sprintf(\"child-result-%s\", childOutput.Result),\n\t\t\t}, nil\n\t\t},\n\t)\n\n\treturn sticky\n}\n```\n\n#### Tab 4\n\n```ruby\nSTICKY_CHILD_WORKFLOW = HATCHET.workflow(\n  name: \"StickyChildWorkflow\",\n  sticky: :soft\n)\n\nSTICKY_WORKFLOW.task(:step2, parents: [STEP1A, STEP1B]) do |input, ctx|\n  ref = STICKY_CHILD_WORKFLOW.run_no_wait(\n    options: Hatchet::TriggerWorkflowOptions.new(sticky: true)\n  )\n\n  ref.result\n\n  { \"worker\" => ctx.worker.id }\nend\n\nSTICKY_CHILD_WORKFLOW.task(:child) do |input, ctx|\n  { \"worker\" => ctx.worker.id }\nend\n```"},"134":{"title":"Worker Affinity","pageTitle":"Worker Affinity","pageRoute":"hatchet://docs/v1/advanced-assignment/worker-affinity","content":"# Worker Affinity Assignment (Beta)\n\n> **Info:** This feature is currently in beta and may be subject to change.\n\nIt is often desirable to assign workflows to specific workers based on certain criteria, such as worker capabilities, resource availability, or location. Worker affinity allows you to specify that a workflow should be assigned to a specific worker based on worker label state. Labels can be set dynamically on workers to reflect their current state, such as a specific model loaded into memory or specific disk requirements.\n\nSpecific tasks can then specify desired label state to ensure that workflows are assigned to workers that meet specific criteria. If no worker meets the specified criteria, the task run will remain in a pending state until a suitable worker becomes available or the task is cancelled. (See [Scheduling Timeouts](./timeouts.mdx#task-level-timeouts))"},"135":{"title":"Specifying Worker Labels","pageTitle":"Worker Affinity","pageRoute":"hatchet://docs/v1/advanced-assignment/worker-affinity","content":"Labels can be set on workers when they are registered with Hatchet. Labels are key-value pairs that can be used to specify worker capabilities, resource availability, or other criteria that can be used to match workflows to workers. Values can be strings or numbers, and multiple labels can be set on a worker.\n\n#### Python\n\n```python\nworker = hatchet.worker(\n    \"affinity-worker\",\n    slots=10,\n    labels={\n        \"model\": \"fancy-ai-model-v2\",\n        \"memory\": 512,\n    },\n    workflows=[affinity_worker_workflow],\n)\nworker.start()\n```\n\n#### Typescript\n\n```typescript\nconst workflow = hatchet.workflow({\n  name: 'affinity-workflow',\n  description: 'test',\n});\n\nworkflow.task({\n  name: 'step1',\n  fn: async (_, ctx) => {\n    const results = [];\n\n    for (let i = 0; i < 50; i++) {\n      const result = await childWorkflow.run({});\n      results.push(result);\n    }\n    console.log('Spawned 50 child workflows');\n    console.log('Results:', await Promise.all(results));\n\n    return { step1: 'step1 results!' };\n  },\n});\n```\n\n#### Go\n\n```go\nworker, err := client.NewWorker(\"affinity-worker\",\n\thatchet.WithWorkflows(affinityWorkflow),\n\thatchet.WithSlots(10),\n\thatchet.WithLabels(map[string]any{\n\t\t\"model\":  \"fancy-ai-model-v2\",\n\t\t\"memory\": 512,\n\t}),\n)\n```\n\n#### Ruby\n\n```ruby\ndef main\n  worker = HATCHET.worker(\n    \"affinity-worker\",\n    slots: 10,\n    labels: {\n      \"model\" => \"fancy-ai-model-v2\",\n      \"memory\" => 512\n    },\n    workflows: [AFFINITY_WORKER_WORKFLOW]\n  )\n  worker.start\nend\n```"},"136":{"title":"Specifying Step Desired Labels","pageTitle":"Worker Affinity","pageRoute":"hatchet://docs/v1/advanced-assignment/worker-affinity","content":"You can specify desired worker label state for specific tasks in a workflow by setting the `desired_worker_labels` property on the task definition. This property is an object where the keys are the label keys and the values are objects with the following properties:\n\n- `value`: The desired value of the label\n- `comparator` (default: `EQUAL`): The comparison operator to use when matching the label value.\n  - `EQUAL`: The label value must be equal to the desired value\n  - `NOT_EQUAL`: The label value must not be equal to the desired value\n  - `GREATER_THAN`: The label value must be greater than the desired value\n  - `GREATER_THAN_OR_EQUAL`: The label value must be greater than or equal to the desired value\n  - `LESS_THAN`: The label value must be less than the desired value\n  - `LESS_THAN_OR_EQUAL`: The label value must be less than or equal to the desired value\n- `required` (default: `true`): Whether the label is required for the task to run. If `true`, the task will remain in a pending state until a worker with the desired label state becomes available. If `false`, the worker will be prioritized based on the sum of the highest matching weights.\n- `weight` (optional, default: `100`): The weight of the label. Higher weights are prioritized over lower weights when selecting a worker for the task. If multiple workers have the same highest weight, the worker with the highest sum of weights will be selected. Ignored if `required` is `true`.\n\n#### Ruby\n\n```python\naffinity_worker_workflow = hatchet.workflow(name=\"AffinityWorkflow\")\n\n\n@affinity_worker_workflow.task(\n    desired_worker_labels=[\n        DesiredWorkerLabel(key=\"model\", value=\"fancy-ai-model-v2\", weight=10),\n        DesiredWorkerLabel(\n            key=\"memory\",\n            value=256,\n            required=True,\n            comparator=WorkerLabelComparator.LESS_THAN,\n        ),\n    ],\n)\n```\n\n#### Tab 2\n\n```typescript\nconst workflow = hatchet.workflow({\n  name: 'affinity-workflow',\n  description: 'test',\n});\n\nworkflow.task({\n  name: 'step1',\n  fn: async (_, ctx) => {\n    const results = [];\n\n    for (let i = 0; i < 50; i++) {\n      const result = await childWorkflow.run({});\n      results.push(result);\n    }\n    console.log('Spawned 50 child workflows');\n    console.log('Results:', await Promise.all(results));\n\n    return { step1: 'step1 results!' };\n  },\n});\n```\n\n#### Tab 3\n\n```go\n\terr = w.RegisterWorkflow(\n\t\t&worker.WorkflowJob{\n\t\t\tOn:          worker.Events(\"user:create:affinity\"),\n\t\t\tName:        \"affinity\",\n\t\t\tDescription: \"affinity\",\n\t\t\tSteps: []*worker.WorkflowStep{\n\t\t\t\tworker.Fn(func(ctx worker.HatchetContext) (result *taskOneOutput, err error) {\n\t\t\t\t\treturn &taskOneOutput{\n\t\t\t\t\t\tMessage: ctx.Worker().ID(),\n\t\t\t\t\t}, nil\n\t\t\t\t}).\n\t\t\t\t\tSetName(\"task-one\").\n\t\t\t\t\tSetDesiredLabels(map[string]*types.DesiredWorkerLabel{\n\t\t\t\t\t\t\"model\": {\n\t\t\t\t\t\t\tValue:  \"fancy-ai-model-v2\",\n\t\t\t\t\t\t\tWeight: 10,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"memory\": {\n\t\t\t\t\t\t\tValue:      512,\n\t\t\t\t\t\t\tRequired:   true,\n\t\t\t\t\t\t\tComparator: types.ComparatorPtr(types.WorkerLabelComparator_GREATER_THAN),\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t)\n```\n\n#### Tab 4\n\n```ruby\nAFFINITY_WORKER_WORKFLOW = HATCHET.workflow(name: \"AffinityWorkflow\")\n```\n\n> **Warning:** Use extra care when using worker affinity with [sticky assignment `HARD`\n>   strategy](./sticky-assignment.mdx). In this case, it is recommended to set\n>   desired labels on the first task of the workflow to ensure that the workflow\n>   is assigned to a worker that meets the desired criteria and remains on that\n>   worker for the duration of the workflow.\n\n### Dynamic Worker Labels\n\nLabels can also be set dynamically on workers using the `upsertLabels` method. This can be useful when worker state changes over time, such as when a new model is loaded into memory or when a worker's resource availability changes.\n\n#### Ruby\n\n```python\nasync def step(input: EmptyModel, ctx: Context) -> dict[str, str | None]:\n    if ctx.worker_labels.get(\"model\") != \"fancy-ai-model-v2\":\n        ctx.worker.upsert_labels({\"model\": \"unset\"})\n        # DO WORK TO EVICT OLD MODEL / LOAD NEW MODEL\n        ctx.worker.upsert_labels({\"model\": \"fancy-ai-model-v2\"})\n\n    return {\"worker\": ctx.worker_id}\n```\n\n#### Tab 2\n\n```typescript\nconst childWorkflow = hatchet.workflow({\n  name: 'child-affinity-workflow',\n  description: 'test',\n});\n\nchildWorkflow.task({\n  name: 'child-step1',\n  desiredWorkerLabels: {\n    model: {\n      value: 'xyz',\n      required: true,\n    },\n  },\n  fn: async (ctx) => {\n    return { childStep1: 'childStep1 results!' };\n  },\n});\n```\n\n#### Tab 3\n\n```go\n\terr = w.RegisterWorkflow(\n\t\t&worker.WorkflowJob{\n\t\t\tOn:          worker.Events(\"user:create:affinity\"),\n\t\t\tName:        \"affinity\",\n\t\t\tDescription: \"affinity\",\n\t\t\tSteps: []*worker.WorkflowStep{\n\t\t\t\tworker.Fn(func(ctx worker.HatchetContext) (result *taskOneOutput, err error) {\n\n    \t\t\t\tmodel := ctx.Worker().GetLabels()[\"model\"]\n\n    \t\t\t\tif model != \"fancy-vision-model\" {\n    \t\t\t\t\tctx.Worker().UpsertLabels(map[string]interface{}{\n    \t\t\t\t\t\t\"model\": nil,\n    \t\t\t\t\t})\n    \t\t\t\t\t// Do something to load the model\n            evictModel();\n            loadNewModel(\"fancy-vision-model\");\n    \t\t\t\t\tctx.Worker().UpsertLabels(map[string]interface{}{\n    \t\t\t\t\t\t\"model\": \"fancy-vision-model\",\n    \t\t\t\t\t})\n    \t\t\t\t}\n\n    \t\t\t\treturn &taskOneOutput{\n    \t\t\t\t\tMessage: ctx.Worker().ID(),\n    \t\t\t\t}, nil\n    \t\t\t}).\n    \t\t\t\tSetName(\"task-one\").\n    \t\t\t\tSetDesiredLabels(map[string]*types.DesiredWorkerLabel{\n    \t\t\t\t\t\"model\": {\n    \t\t\t\t\t\tValue:  \"fancy-vision-model\",\n    \t\t\t\t\t\tWeight: 10,\n    \t\t\t\t\t},\n    \t\t\t\t\t\"memory\": {\n    \t\t\t\t\t\tValue:      512,\n    \t\t\t\t\t\tRequired:   true,\n    \t\t\t\t\t\tComparator: types.WorkerLabelComparator_GREATER_THAN,\n    \t\t\t\t\t},\n    \t\t\t\t}),\n    \t\t},\n    \t},\n    )\n\n```\n\n#### Tab 4\n\n```ruby\nAFFINITY_WORKER_WORKFLOW.task(\n  :step,\n  desired_worker_labels: {\n    \"model\" => Hatchet::DesiredWorkerLabel.new(value: \"fancy-ai-model-v2\", weight: 10),\n    \"memory\" => Hatchet::DesiredWorkerLabel.new(\n      value: 256,\n      required: true,\n      comparator: :less_than\n    )\n  }\n) do |input, ctx|\n  if ctx.worker.labels[\"model\"] != \"fancy-ai-model-v2\"\n    ctx.worker.upsert_labels(\"model\" => \"unset\")\n    # DO WORK TO EVICT OLD MODEL / LOAD NEW MODEL\n    ctx.worker.upsert_labels(\"model\" => \"fancy-ai-model-v2\")\n  end\n\n  { \"worker\" => ctx.worker.id }\nend\n```"},"137":{"title":"Manual Slot Release","pageTitle":"Manual Slot Release","pageRoute":"hatchet://docs/v1/advanced-assignment/manual-slot-release","content":"# Manual Slot Release\n\nThe Hatchet execution model sets a number of available slots for running tasks in a workflow. When a task is running, it occupies a slot, and if a worker has no available slots, it will not be able to run any more tasks concurrently.\n\nIn some cases, you may have a task in your workflow that is resource-intensive and requires exclusive access to a shared resource, such as a database connection or a GPU compute instance. To ensure that other tasks in the workflow can run concurrently, you can manually release the slot after the resource-intensive task has completed, but the task still has non-resource-intensive work to do (i.e. upload or cleanup).\n\n> **Warning:** This is an advanced feature and should be used with caution. Manually\n>   releasing the slot can have unintended side effects on system performance and\n>   concurrency. For example, if the worker running the task dies, the task will\n>   not be reassigned and will remain in a running state until manually\n>   terminated."},"138":{"title":"Using Manual Slot Release","pageTitle":"Manual Slot Release","pageRoute":"hatchet://docs/v1/advanced-assignment/manual-slot-release","content":"You can manually release a slot in from within a running task in your workflow using the Hatchet context method `release_slot`:\n\n#### Go\n\n```python\nslot_release_workflow = hatchet.workflow(name=\"SlotReleaseWorkflow\")\n\n\n@slot_release_workflow.task()\ndef step1(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    print(\"RESOURCE INTENSIVE PROCESS\")\n    time.sleep(10)\n\n    # 👀 Release the slot after the resource-intensive process, so that other steps can run\n    ctx.release_slot()\n\n    print(\"NON RESOURCE INTENSIVE PROCESS\")\n    return {\"status\": \"success\"}\n```\n\n#### Ruby\n\n```go\n_ = workflow.NewTask(\"step1\", func(ctx hatchet.Context, _ any) (*StepOutput, error) {\n\tfmt.Println(\"RESOURCE INTENSIVE PROCESS\")\n\ttime.Sleep(10 * time.Second)\n\n\t// Release the slot after the resource-intensive process,\n\t// so that other steps can run on this worker.\n\tif releaseErr := ctx.ReleaseSlot(); releaseErr != nil {\n\t\treturn nil, fmt.Errorf(\"failed to release slot: %w\", releaseErr)\n\t}\n\n\tfmt.Println(\"NON RESOURCE INTENSIVE PROCESS\")\n\n\treturn &StepOutput{Status: \"success\"}, nil\n})\n```\n\n#### Tab 3\n\n```ruby\nSLOT_RELEASE_WORKFLOW = HATCHET.workflow(name: \"SlotReleaseWorkflow\")\n\nSLOT_RELEASE_WORKFLOW.task(:step1) do |input, ctx|\n  puts \"RESOURCE INTENSIVE PROCESS\"\n  sleep 10\n\n  # Release the slot after the resource-intensive process, so that other steps can run\n  ctx.release_slot\n\n  puts \"NON RESOURCE INTENSIVE PROCESS\"\n  { \"status\" => \"success\" }\nend\n```\n\nIn the above examples, the `release_slot()` method is called after the resource-intensive process has completed. This allows other tasks in the workflow to start executing while the current task continues with non-resource-intensive tasks.\n\n> **Info:** Manually releasing the slot does not terminate the current task. The task will\n>   continue executing until it completes or encounters an error."},"139":{"title":"Use Cases","pageTitle":"Manual Slot Release","pageRoute":"hatchet://docs/v1/advanced-assignment/manual-slot-release","content":"Some common use cases for Manual Slot Release include:\n\n- Performing data processing or analysis that requires significant CPU, GPU, or memory resources\n- Acquiring locks or semaphores to access shared resources\n- Executing long-running tasks that don't need to block other tasks after some initial work is done\n\nBy utilizing Manual Slot Release, you can optimize the concurrency and resource utilization of your workflows, allowing multiple tasks to run in parallel when possible."},"140":{"title":"Logging","pageTitle":"Logging","pageRoute":"hatchet://docs/v1/logging","content":"# Logging\n\nHatchet comes with a built-in logging view where you can push logs from your workflows. This is useful for debugging and monitoring your workflows.\n\n#### Ruby\n\nYou can use either Python's built-in `logging` package, or the `context.log` method for more control over the logs that are sent."},"141":{"title":"Using the built-in `logging` package","pageTitle":"Logging","pageRoute":"hatchet://docs/v1/logging","content":"You can pass a custom logger to the `Hatchet` class when initializing it. For example:\n\n```python\nimport logging\n\nfrom hatchet_sdk import ClientConfig, Hatchet\n\nlogging.basicConfig(level=logging.INFO)\n\nroot_logger = logging.getLogger()\n\nhatchet = Hatchet(\n    config=ClientConfig(\n        logger=root_logger,\n    ),\n)\n```\n\nIt's recommended that you pass the root logger to the `Hatchet` class, as this will ensure that all logs are captured by the Hatchet logger. If you have workflows defined in multiple files, they should be children of the root logger. For example, with the following file structure:\n\n```\nworkflows/\n  workflow.py\nclient.py\nworker.py\nworkflow.py\n```\n\nYou should pass the root logger to the `Hatchet` class in `client.py`:\n\n```python\nimport logging\n\nfrom hatchet_sdk import ClientConfig, Hatchet\n\nlogging.basicConfig(level=logging.INFO)\n\nroot_logger = logging.getLogger()\n\nhatchet = Hatchet(\n    config=ClientConfig(\n        logger=root_logger,\n    ),\n)\n```\n\nAnd then in `workflows/workflow.py`, you should create a child logger:\n\n```python\nimport logging\nimport time\n\nfrom examples.logger.client import hatchet\nfrom hatchet_sdk import Context, EmptyModel\n\nlogger = logging.getLogger(__name__)\n\nlogging_workflow = hatchet.workflow(\n    name=\"LoggingWorkflow\",\n)\n\n\n@logging_workflow.task()\ndef root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    for i in range(12):\n        logger.info(f\"executed step1 - {i}\")\n        logger.info({\"step1\": \"step1\"})\n\n        time.sleep(0.1)\n\n    return {\"status\": \"success\"}\n```"},"142":{"title":"Using the `context.log` method","pageTitle":"Logging","pageRoute":"hatchet://docs/v1/logging","content":"You can also use the `context.log` method to log messages from your workflows. This method is available on the `Context` object that is passed to each task in your workflow. For example:\n\n```python\n@logging_workflow.task()\ndef context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    for i in range(12):\n        ctx.log(f\"executed step1 - {i}\")\n        ctx.log({\"step1\": \"step1\"})\n\n        time.sleep(0.1)\n\n    return {\"status\": \"success\"}\n```\n\nEach task is currently limited to 1000 log lines.\n\n#### Tab 2\n\nIn TypeScript, there are two options for logging from your tasks. The first is to use the `ctx.log()` method (from the `Context`) to send logs:\n\n```typescript\nconst workflow = hatchet.workflow({\n  name: 'logger-example',\n  description: 'test',\n  on: {\n    event: 'user:create',\n  },\n});\n\nworkflow.task({\n  name: 'logger-step1',\n  fn: async (_, ctx) => {\n    // log in a for loop\n\n    for (let i = 0; i < 10; i++) {\n      ctx.logger.info(`log message ${i}`);\n      await sleep(200);\n    }\n\n    return { step1: 'completed step run' };\n  },\n});\n```\n\nThis has the benefit of being easy to use out of the box (no setup required!), but it's limited in its flexibiliy and how pluggable it is with your existing logging setup.\n\nHatchet also allows you to \"bring your own\" logger when you define a workflow:\n\n```typescript\nconst logger = pino();\n\nclass PinoLogger implements Logger {\n  logLevel: LogLevel;\n  context: string;\n\n  constructor(context: string, logLevel: LogLevel = 'DEBUG') {\n    this.logLevel = logLevel;\n    this.context = context;\n  }\n\n  debug(message: string, extra?: JsonObject): void {\n    logger.debug(extra, message);\n  }\n\n  info(message: string, extra?: JsonObject): void {\n    logger.info(extra, message);\n  }\n\n  green(message: string, extra?: JsonObject): void {\n    logger.info(extra, `%c${message}`);\n  }\n\n  warn(message: string, error?: Error, extra?: JsonObject): void {\n    logger.warn(extra, `${message} ${error}`);\n  }\n\n  error(message: string, error?: Error, extra?: JsonObject): void {\n    logger.error(extra, `${message} ${error}`);\n  }\n\n  // optional util method\n  util(key: string, message: string, extra?: JsonObject): void {\n    // for example you may want to expose a trace method\n    if (key === 'trace') {\n      logger.info(extra, 'trace');\n    }\n  }\n}\n\nconst hatchet = Hatchet.init({\n  log_level: 'DEBUG',\n  logger: (ctx, level) => new PinoLogger(ctx, level),\n});\n```\n\nIn this example, we create Pino logger that implement's Hatchet's `Logger` interface and pass it to the Hatchet client constructor. We can then use that logger in our steps:\n\n```typescript\nconst workflow = hatchet.workflow({\n  name: 'logger-example',\n  description: 'test',\n  on: {\n    event: 'user:create',\n  },\n});\n\nworkflow.task({\n  name: 'logger-step1',\n  fn: async (_, ctx) => {\n    // log in a for loop\n\n    for (let i = 0; i < 10; i++) {\n      ctx.logger.info(`log message ${i}`);\n      await sleep(200);\n    }\n\n    return { step1: 'completed step run' };\n  },\n});\n```\n\n#### Tab 3\n\n```ruby\nrequire \"hatchet-sdk\"\nrequire \"logger\"\n\nlogger = Logger.new($stdout)\nlogger.level = Logger::INFO\n\nHATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)\n\nLOGGING_WORKFLOW = HATCHET.workflow(name: \"LoggingWorkflow\")\n\nLOGGING_WORKFLOW.task(:root_logger) do |input, ctx|\n  12.times do |i|\n    logger.info(\"executed step1 - #{i}\")\n    logger.info({ \"step1\" => \"step1\" }.inspect)\n\n    sleep 0.1\n  end\n\n  { \"status\" => \"success\" }\nend\n```\n```ruby\nLOGGING_WORKFLOW.task(:context_logger) do |input, ctx|\n  12.times do |i|\n    ctx.log(\"executed step1 - #{i}\")\n    ctx.log({ \"step1\" => \"step1\" }.inspect)\n\n    sleep 0.1\n  end\n\n  { \"status\" => \"success\" }\nend\n```"},"143":{"title":"OpenTelemetry","pageTitle":"OpenTelemetry","pageRoute":"hatchet://docs/v1/opentelemetry","content":"# OpenTelemetry\n\nHatchet supports exporting traces from your tasks to an [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) to improve visibility into your Hatchet tasks."},"144":{"title":"Setup","pageTitle":"OpenTelemetry","pageRoute":"hatchet://docs/v1/opentelemetry","content":"#### Python\n\nInstall the `otel` extra:\n\n```bash\npip install hatchet-sdk[otel]\n```\n\nThen create the instrumentor and call `instrument()`:\n\n```python\nfrom hatchet_sdk.opentelemetry.instrumentor import HatchetInstrumentor\n\nHatchetInstrumentor().instrument()\n```\n\nBy default, `HatchetInstrumentor` creates a `TracerProvider` that sends spans to the Hatchet engine's OTLP collector. You can also pass your own `TracerProvider`:\n\n```python\nHatchetInstrumentor(\n    tracer_provider=your_tracer_provider,\n).instrument()\n```\n\n### Options\n\nOption, Type, Default, Description\n\n`tracer_provider`, `TracerProvider`, —, Custom TracerProvider. If not set, one is created automatically.\n`enable_hatchet_otel_collector`, `bool`, `True`, Send traces to the Hatchet engine's OTLP collector.\n`schedule_delay_millis`, `int`, —, Delay between consecutive exports of the BatchSpanProcessor.\n`max_export_batch_size`, `int`, —, Maximum batch size for the BatchSpanProcessor.\n`max_queue_size`, `int`, —, Maximum queue size for the BatchSpanProcessor.\n\n#### TypeScript\n\nInstall the required OpenTelemetry packages:\n\n```bash\nnpm install @opentelemetry/api @opentelemetry/instrumentation @opentelemetry/sdk-trace-base @opentelemetry/exporter-trace-otlp-grpc\n```\n\nRegister the instrumentor before creating any Hatchet clients or workers:\n\n```typescript\nconst { registerInstrumentations } = require(\"@opentelemetry/instrumentation\");\nimport { HatchetInstrumentor } from \"@hatchet-dev/typescript-sdk/opentelemetry\";\n\nregisterInstrumentations({\n  instrumentations: [new HatchetInstrumentor()],\n});\n```\n\nBy default, `HatchetInstrumentor` sends spans to the Hatchet engine's OTLP collector. You can disable this with `enableHatchetCollector: false`.\n\n### Options\n\nOption, Type, Default, Description\n\n`enableHatchetCollector`, `boolean`, `true`, Send traces to the Hatchet engine's OTLP collector.\n`clientConfig`, `object`, —, Override Hatchet client config for the collector connection.\n`includeTaskNameInSpanName`, `boolean`, `false`, Append the task action ID to the `hatchet.start_step_run` span name.\n`excludedAttributes`, `array`, `[]`, List of `hatchet.*` attribute keys to exclude from spans.\n\n#### Go\n\nImport the `opentelemetry` package (commonly aliased as `hatchetotel`):\n\n```go\nimport hatchetotel \"github.com/hatchet-dev/hatchet/sdks/go/opentelemetry\"\n```\n\nCreate the instrumentor, then register its middleware on the worker:\n\n```go\ninstrumentor, err := hatchetotel.NewInstrumentor()\nif err != nil {\n    log.Fatalf(\"failed to create instrumentor: %v\", err)\n}\n\nworker.Use(instrumentor.Middleware())\n```\n\nBy default, `NewInstrumentor` creates a `TracerProvider` that sends spans to the Hatchet engine's OTLP collector.\n\nRemember to shut down the instrumentor on exit to flush remaining spans:\n\n```go\ndefer instrumentor.Shutdown(context.Background())\n```\n\n### Options\n\nOption, Description\n\n`hatchetotel.WithTracerProvider(tp)`, Use a custom `*sdktrace.TracerProvider` instead of creating a new one.\n`hatchetotel.DisableHatchetCollector()`, Disable sending traces to the Hatchet engine's OTLP collector.\n`hatchetotel.WithBatchSpanProcessorOptions(...)`, Configure the `BatchSpanProcessor` for the Hatchet collector.\n\n#### Ruby\n\n> **Info:** OpenTelemetry support for the Ruby SDK is coming soon."},"145":{"title":"Spans","pageTitle":"OpenTelemetry","pageRoute":"hatchet://docs/v1/opentelemetry","content":"By default, Hatchet creates spans at the following points in the lifecycle of a task run:\n\n1. **Producer spans** — when a trigger is called on the client side (e.g. `run()`, `runNoWait()`, `push()`, `schedule()`).\n2. **Consumer spans** — when a worker handles a task run, such as starting to run the task (`hatchet.start_step_run`) or cancelling a task (`hatchet.cancel_step_run`).\n\n### Span Names\n\nSpan Name, Kind, Description\n\n`hatchet.start_step_run`, `CONSUMER`, Worker begins executing a task.\n`hatchet.cancel_step_run`, `CONSUMER`, Worker cancels a running task.\n`hatchet.run_workflow`, `PRODUCER`, Client triggers a workflow run.\n`hatchet.run_workflows`, `PRODUCER`, Client triggers multiple workflow runs.\n`hatchet.schedule_workflow`, `PRODUCER`, Client creates a scheduled workflow run.\n`hatchet.push_event`, `PRODUCER`, Client pushes an event.\n`hatchet.bulk_push_event`, `PRODUCER`, Client pushes events in bulk.\n`hatchet.durable.wait_for`, `INTERNAL`, Durable task waits for a signal/condition.\n\n### Span Attributes\n\nAll spans include an `instrumentor` attribute set to `\"hatchet\"`. Consumer spans (`hatchet.start_step_run`) include the following `hatchet.*` attributes:\n\nAttribute, Type, Description\n\n`hatchet.tenant_id`, `string`, The tenant ID for the task run.\n`hatchet.worker_id`, `string`, The worker handling the task.\n`hatchet.workflow_run_id`, `string`, The workflow run ID.\n`hatchet.step_run_id`, `string`, The task run ID.\n`hatchet.step_id`, `string`, The task ID.\n`hatchet.workflow_name`, `string`, The workflow name.\n`hatchet.action_name`, `string`, The action ID (format: `workflowName:taskName`).\n`hatchet.step_name`, `string`, The human-readable task name.\n`hatchet.retry_count`, `int`, Current retry attempt (0-indexed).\n`hatchet.workflow_id`, `string`, The workflow definition ID (if available).\n`hatchet.workflow_version_id`, `string`, The workflow version ID (if available).\n`hatchet.parent_workflow_run_id`, `string`, Parent workflow run ID (for child workflows).\n`hatchet.child_workflow_index`, `int`, Child workflow index (for child workflows).\n`hatchet.child_workflow_key`, `string`, Child workflow key (for child workflows).\n\nProducer spans (`hatchet.run_workflow`) include `hatchet.step_name` (the workflow being triggered) and, on success, `hatchet.child_workflow_run_id`.\n\n### Context Propagation\n\nAll SDKs:\n\n1. Automatically inject W3C `traceparent` into `additionalMetadata` on producer spans, so consumer spans on the worker become children of the trigger span.\n2. Provide an `HatchetAttributeSpanProcessor` that propagates `hatchet.*` attributes from the parent task run span to all child spans created within the task. This means any custom spans you create inside a task function will automatically carry the same `hatchet.*` attributes."},"146":{"title":"Worker Health Checks","pageTitle":"Worker Health Checks","pageRoute":"hatchet://docs/v1/worker-healthchecks","content":"# Worker Health Checks\n\nThe Python SDK allows you to enable and ping a healthcheck to check on the status of your worker.\n\n### Usage\n\nFirst, set the `HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED` environment variable to `True`. Once that flag is set, two health check endpoints will be available (on port `8001` by default):\n\n1. `/health` - Returns **200** when the worker listener is healthy, otherwise **503** with body `{\"status\":\"HEALTHY\"}` or `{\"status\":\"UNHEALTHY\"}`.\n2. `/metrics` - A metrics endpoint intended to be used by a monitoring system like Prometheus.\n\n### Custom Port\n\nYou can set a custom port with the `HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT` environment variable, e.g. `HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT=8002`.\n\n### Event loop blocked threshold\n\nIf the worker listener process event loop becomes blocked for longer than a threshold, `/health` will return **503**.\n\nYou can configure this threshold (in seconds) with:\n\n- `HATCHET_CLIENT_WORKER_HEALTHCHECK_EVENT_LOOP_BLOCK_THRESHOLD_SECONDS` (default: `5.0`)\n\n#### Example request to `/health`:\n\n```bash\ncurl localhost:8001/health\n\n{\"status\":\"HEALTHY\"}\n```\n\n#### Example request to `/metrics`:\n\n```bash\ncurl localhost:8001/metrics\n\n# HELP python_gc_objects_collected_total Objects collected during gc\n# TYPE python_gc_objects_collected_total counter\npython_gc_objects_collected_total{generation=\"0\"} 18782.0\npython_gc_objects_collected_total{generation=\"1\"} 4907.0\npython_gc_objects_collected_total{generation=\"2\"} 244.0\n# HELP python_gc_objects_uncollectable_total Uncollectable objects found during GC\n# TYPE python_gc_objects_uncollectable_total counter\npython_gc_objects_uncollectable_total{generation=\"0\"} 0.0\npython_gc_objects_uncollectable_total{generation=\"1\"} 0.0\npython_gc_objects_uncollectable_total{generation=\"2\"} 0.0\n# HELP python_gc_collections_total Number of times this generation was collected\n# TYPE python_gc_collections_total counter\npython_gc_collections_total{generation=\"0\"} 308.0\npython_gc_collections_total{generation=\"1\"} 27.0\npython_gc_collections_total{generation=\"2\"} 2.0\n# HELP python_info Python platform information\n# TYPE python_info gauge\npython_info{implementation=\"CPython\",major=\"3\",minor=\"10\",patchlevel=\"15\",version=\"3.10.15\"} 1.0\n# HELP hatchet_worker_listener_health_my_worker Listener health (1 healthy, 0 unhealthy)\n# TYPE hatchet_worker_listener_health_my_worker gauge\nhatchet_worker_listener_health_my_worker 1.0\n# HELP hatchet_worker_event_loop_lag_seconds_my_worker Event loop lag in seconds (listener process)\n# TYPE hatchet_worker_event_loop_lag_seconds_my_worker gauge\nhatchet_worker_event_loop_lag_seconds_my_worker 0.0\n```\n\n#### Example Prometheus Configuration for `/metrics`:\n\n```yaml\nscrape_configs:\n  - job_name: \"hatchet\"\n    scrape_interval: 5s\n    static_configs:\n      - targets: [\"localhost:8001\"]\n```\n\n#### Example Prometheus Query\n\nAn example query to check if the worker is healthy might look something like:\n\n```\n(hatchet_worker_listener_health_my_worker{instance=\"localhost:8001\", job=\"hatchet\"}) or vector(0)\n```"},"147":{"title":"Prometheus Metrics","pageTitle":"Prometheus Metrics","pageRoute":"hatchet://docs/v1/prometheus-metrics","content":"# Prometheus Metrics\n\n> **Info:** Only available in the Dedicated tier and above on Hatchet Cloud, [reach\n>   out](https://hatchet.run/office-hours) to upgrade.\n\nHatchet exports Prometheus Metrics for your tenant which can be scraped with services like Grafana and DataDog."},"148":{"title":"Tenant Metrics","pageTitle":"Prometheus Metrics","pageRoute":"hatchet://docs/v1/prometheus-metrics","content":"> **Warning:** Only works with v1 tenants\n\nMetrics for individual tenants are available in Prometheus Text Format via a REST API endpoint.\n\n### Endpoint\n\n```\nGET /api/v1/tenants/{tenantId}/prometheus-metrics\n```\n\n### Authentication\n\nThe endpoint requires Bearer token authentication using a valid API token:\n\n```\nAuthorization: Bearer \n```\n\n### Response Format\n\nThe response is returned in standard Prometheus Text Format, including:\n\n- HELP comments describing each metric\n- TYPE declarations (counter, gauge, etc.)\n- Metric samples with labels and values\n\n### Example Usage\n\n```bash\ncurl -H \"Authorization: Bearer your-api-token-here\" \\\n  https://cloud.onhatchet.run/api/v1/tenants/707d0855-80ab-4e1f-a156-f1c4546cbf52/prometheus-metrics\n```"},"149":{"title":"Additional Metadata","pageTitle":"Additional Metadata","pageRoute":"hatchet://docs/v1/additional-metadata","content":"# Additional Metadata\n\nHatchet allows you to attach arbitrary key-value string pairs to events and task runs, which can be used for filtering, searching, or any other lookup purposes. This additional metadata is not part of the event payload or task input data but provides supplementary information for better organization and discoverability.\n\n> **Info:** Additional metadata can be added to `Runs`, `Scheduled Runs`, `Cron Runs`, and\n>   `Events`. The data is propagated from parents to children or from events to\n>   runs.\n\nYou can attach additional metadata when pushing events or triggering task runs using the Hatchet client libraries:\n\n#### Event Push\n\n#### Ruby\n\n```python\nhatchet.event.push(\n    \"user:create\",\n    {\"userId\": \"1234\", \"should_skip\": False},\n    additional_metadata={\"source\": \"api\"},  # Arbitrary key-value pair\n)\n```\n\n#### Tab 2\n\n```typescript\nconst withMetadata = await hatchet.events.push(\n  'user:create',\n  {\n    test: 'test',\n  },\n  {\n    additionalMetadata: {\n      source: 'api', // Arbitrary key-value pair\n    },\n  }\n);\n```\n\n#### Tab 3\n\n```go\nerr = client.Events().Push(\n\tcontext.Background(),\n\t\"user:create\",\n\tInput{Message: \"hello\"},\n\tv0Client.WithEventMetadata(\n\t\tmap[string]string{\"version\": \"1.0.0\"},\n\t),\n)\nif err != nil {\n\tlog.Fatalf(\"failed to push event: %v\", err)\n}\n```\n\n#### Tab 4\n\n```ruby\nHATCHET.event.push(\n  \"user:create\",\n  { \"userId\" => \"1234\", \"should_skip\" => false },\n  additional_metadata: { \"source\" => \"api\" }\n)\n```\n\n#### Task Run Trigger\n\n#### Ruby\n\n```python\nsimple.run(\n    additional_metadata={\"source\": \"api\"},  # Arbitrary key-value pair\n)\n```\n\n#### Tab 2\n\n```typescript\nconst withMetadata = simple.run(\n  {\n    Message: 'HeLlO WoRlD',\n  },\n  {\n    additionalMetadata: {\n      source: 'api', // Arbitrary key-value pair\n    },\n  }\n);\n```\n\n#### Tab 3\n\n```go\n_, err = client.Run(\n\tcontext.Background(),\n\t\"my-workflow\",\n\tInput{Message: \"hello\"},\n\thatchet.WithRunMetadata(\n\t\tmap[string]string{\"version\": \"1.0.0\"},\n\t),\n)\nif err != nil {\n\tlog.Fatalf(\"failed to run workflow: %v\", err)\n}\n```\n\n#### Tab 4\n\n```ruby\nSIMPLE.run(\n  {},\n  options: Hatchet::TriggerWorkflowOptions.new(\n    additional_metadata: { \"source\" => \"api\" }\n  )\n)\n```"},"150":{"title":"Filtering in the Dashboard","pageTitle":"Additional Metadata","pageRoute":"hatchet://docs/v1/additional-metadata","content":"Once you've attached additional metadata to events or task runs, this data will be available in the Event and Task Run list views in the Hatchet dashboard. You can use the filter input field to search for events or task runs based on the additional metadata key-value pairs you've attached.\n\nFor example, you can filter events by the `source` metadata keys to quickly find events originating from a specific source or environment.\n\n![Blocks](/addl-meta.gif)"},"151":{"title":"Use Cases","pageTitle":"Additional Metadata","pageRoute":"hatchet://docs/v1/additional-metadata","content":"Some common use cases for additional metadata include:\n\n- Tagging events or task runs with environment information (e.g., `production`, `staging`, `development`)\n- Specifying the source or origin of events (e.g., `api`, `webhook`, `manual`)\n- Categorizing events or task runs based on business-specific criteria (e.g., `priority`, `region`, `product`)\n\nBy leveraging additional metadata, you can enhance the organization, searchability, and discoverability of your events and task runs within Hatchet."},"152":{"title":"Middleware","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"# Middleware & Dependency Injection\n\nMiddleware lets you run logic **before** and **after** every task on a client, without touching individual task definitions. Common uses include injecting request IDs, enriching inputs with shared data, encrypting/decrypting payloads, and normalizing or augmenting outputs.\n\n\n  This feature is experimental, and middleware hook signatures may change in\n  future releases.\n\n\n#### Python\n\nHatchet's Python SDK uses FastAPI-style dependency injection to run logic\nbefore tasks and inject the results as parameters. Dependencies are declared\nas functions and wired into tasks with `Depends`.\n\n#### Typescript\n\nMiddleware hooks are registered on the client with `withMiddleware` and are\nfully type-safe — TypeScript sees the union of fields from the task input\ntype and any values returned by `before` hooks, and similarly for task\noutputs and `after` hooks.\n\n#### Go\n\n> **Info:** Middleware support for the Go SDK is coming soon. Join our\n>       [Discord](https://hatchet.run/discord) to stay up to date.\n\n#### Ruby\n\nIn Ruby, this pattern uses callable objects (lambdas/procs) passed as `deps`\nwhen defining tasks. Dependencies are evaluated before each task run and\nmade available via `ctx.deps`."},"153":{"title":"Defining Middleware","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"#### Python\n\nDefine your dependency functions — they receive the workflow input and context, and their return values are injected into the task as parameters.\n\n```python\nasync def async_dep(input: EmptyModel, ctx: Context) -> str:\n    return ASYNC_DEPENDENCY_VALUE\n\n\ndef sync_dep(input: EmptyModel, ctx: Context) -> str:\n    return SYNC_DEPENDENCY_VALUE\n\n\n@asynccontextmanager\nasync def async_cm_dep(\n    input: EmptyModel, ctx: Context, async_dep: Annotated[str, Depends(async_dep)]\n) -> AsyncGenerator[str, None]:\n    try:\n        yield ASYNC_CM_DEPENDENCY_VALUE + \"_\" + async_dep\n    finally:\n        pass\n\n\n@contextmanager\ndef sync_cm_dep(\n    input: EmptyModel, ctx: Context, sync_dep: Annotated[str, Depends(sync_dep)]\n) -> Generator[str, None, None]:\n    try:\n        yield SYNC_CM_DEPENDENCY_VALUE + \"_\" + sync_dep\n    finally:\n        pass\n\n\n@contextmanager\ndef base_cm_dep(input: EmptyModel, ctx: Context) -> Generator[str, None, None]:\n    try:\n        yield CHAINED_CM_VALUE\n    finally:\n        pass\n\n\ndef chained_dep(\n    input: EmptyModel, ctx: Context, base_cm: Annotated[str, Depends(base_cm_dep)]\n) -> str:\n    return \"chained_\" + base_cm\n\n\n@asynccontextmanager\nasync def base_async_cm_dep(\n    input: EmptyModel, ctx: Context\n) -> AsyncGenerator[str, None]:\n    try:\n        yield CHAINED_ASYNC_CM_VALUE\n    finally:\n        pass\n\n\nasync def chained_async_dep(\n    input: EmptyModel,\n    ctx: Context,\n    base_async_cm: Annotated[str, Depends(base_async_cm_dep)],\n) -> str:\n    return \"chained_\" + base_async_cm\n```\n\n#### Typescript\n\nCreate a client and attach middleware with `before` and `after` hooks.\n\n- **`before(input, ctx)`** runs before the task. Its return value **replaces** the task input.\n- **`after(output, ctx, input)`** runs after the task. Its return value **replaces** the task output.\n\n```typescript\nimport { HatchetClient, HatchetMiddleware } from '@hatchet/v1';\n\nexport type GlobalInputType = {\n  first: number;\n  second: number;\n};\n\nexport type GlobalOutputType = {\n  extra: number;\n};\n\nconst myMiddleware = {\n  before: (input, ctx) => {\n    console.log('before', input.first);\n    return { ...input, dependency: 'abc-123' };\n  },\n  after: (output, ctx, input) => {\n    return { ...output, additionalData: 2 };\n  },\n} satisfies HatchetMiddleware;\n\nexport const hatchetWithMiddleware = HatchetClient.init<\n  GlobalInputType,\n  GlobalOutputType\n>().withMiddleware(myMiddleware);\n```\n\n\n  **Spread the original value if you want to keep it.** The return value of each hook **replaces** the input (or output) entirely — it does not shallow-merge. If you omit `...input` in a `before` hook, the original fields are lost. The same applies to `...output` in an `after` hook.\n\n  ```typescript\n      // ✅ Keeps original fields and adds `requestId`\n      before: (input) => ({ ...input, requestId: crypto.randomUUID() })\n\n      // ❌ Replaces input entirely — task only receives { requestId }\n      before: (input) => ({ requestId: crypto.randomUUID() })\n  ```\n\n\n### Chaining Middleware\n\nYou can chain multiple `.withMiddleware()` calls to run hooks in sequence. Each `before` hook receives the return value of the previous `before` hook (or the original input for the first hook), and each `after` hook receives the return value of the previous `after` hook.\n\n```typescript\nconst firstMiddleware = {\n  before: (input, ctx) => {\n    console.log('before', input.first);\n    return { ...input, dependency: 'abc-123' };\n  },\n  after: (output, ctx, input) => {\n    return { ...output, firstExtra: 3 };\n  },\n} satisfies HatchetMiddleware;\n\nconst secondMiddleware = {\n  before: (input, ctx) => {\n    console.log('before', input.dependency); // available from previous middleware\n    return { ...input, anotherDep: true };\n  },\n  after: (output, ctx, input) => {\n    return { ...output, secondExtra: 4 };\n  },\n} satisfies HatchetMiddleware;\n\nexport const hatchetWithMiddlewareChaining = HatchetClient.init()\n  .withMiddleware(firstMiddleware)\n  .withMiddleware(secondMiddleware);\n```\n\n#### Go\n\n> **Info:** Middleware support for the Go SDK is coming soon. Join our [Discord](https://hatchet.run/discord) to stay up to date.\n\n#### Ruby\n\nDefine your dependencies as callable objects (lambdas). They receive the input, context, and optionally a hash of previously resolved dependencies for chaining.\n\n```ruby\nsync_dep = ->(_input, _ctx) { SYNC_DEPENDENCY_VALUE }\nasync_dep = ->(_input, _ctx) { ASYNC_DEPENDENCY_VALUE }\n\nsync_cm_dep = lambda { |_input, _ctx, deps|\n  \"#{SYNC_CM_DEPENDENCY_VALUE}_#{deps[:sync_dep]}\"\n}\n\nasync_cm_dep = lambda { |_input, _ctx, deps|\n  \"#{ASYNC_CM_DEPENDENCY_VALUE}_#{deps[:async_dep]}\"\n}\n\nchained_dep = ->(_input, _ctx, deps) { \"chained_#{CHAINED_CM_VALUE}\" }\nchained_async_dep = ->(_input, _ctx, deps) { \"chained_#{CHAINED_ASYNC_CM_VALUE}\" }\n```"},"154":{"title":"How Middleware Executes","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"#### Python\n\nDependencies are resolved before each task execution. Each dependency function receives the original workflow input and the task context, and its return value is injected as a named parameter to the task function.\n\n#### Typescript\n\nWhen a task runs, the worker applies middleware hooks in this order:\n\n\n### Before hooks run in registration order\n\nEach `before` hook receives the current input and the task `Context`. Its return value **replaces** the input for the next hook (or the task itself). Returning `undefined` (or `void`) skips replacement and passes the input through unchanged.\n\n### The task function executes\n\nThe task receives the final input after all `before` hooks have run.\n\n### After hooks run in registration order\n\nEach `after` hook receives the current output, the task `Context`, and the final input. Its return value **replaces** the output for the next hook (or the final result). Returning `undefined` skips replacement.\n\n\nBoth `before` and `after` hooks can be **async** — return a `Promise` and it will be awaited before proceeding.\n\n> **Info:** If a middleware hook throws an error, the task run fails with that error. There is no built-in error recovery within middleware — use try/catch inside your hooks if you need graceful fallback.\n\n### The `ctx` Parameter\n\nThe second parameter of both `before` and `after` hooks is the task `Context` object. This gives middleware access to:\n- `ctx.workflowRunId` — the ID of the current workflow run\n- `ctx.stepRunId` — the ID of the current step run\n- `ctx.log()` — emit structured logs visible in the Hatchet dashboard\n- `ctx.cancel()` — cancel the current run from within middleware\n\n### Global Types vs Middleware Types\n\nThere are two ways extra fields end up on a task's input:\n\nMechanism, Set via, Required at call site?, Available at runtime?\n\n**Global input type**, `HatchetClient.init()`, Yes — callers must provide these fields, Yes\n**Middleware before hook**, `.withMiddleware({ before })`, No — injected automatically by the worker, Yes\n\nGlobal input types (`T` in `init()`) represent fields that **callers must supply** when triggering a task. This is useful when you know every task must always receive certain parameters — for example, a `userId` for authentication or a `tenantId` for multi-tenant routing. By declaring these as the global type, TypeScript enforces that every caller provides them.\n\nMiddleware `before` hooks, on the other hand, inject fields that are **computed at runtime** (e.g. request IDs, decrypted secrets, fetched config) and are **not** required from callers.\n\n```typescript\n    type RequiredContext = { userId: string; orgId: string };\n\n    const client = HatchetClient.init()\n      .withMiddleware({\n        before: (input) => ({\n          ...input,\n          resolvedAt: Date.now(),        // injected, not required from caller\n          permissions: lookupPerms(input.userId), // derived from global type\n        }),\n      });\n\n    // Callers MUST provide userId and orgId — TypeScript enforces this\n    await myTask.run({ userId: 'usr_123', orgId: 'org_456', /* ...task fields */ });\n```\n\n#### Go\n\n> **Info:** Middleware support for the Go SDK is coming soon. Join our [Discord](https://hatchet.run/discord) to stay up to date.\n\n#### Ruby\n\nDependencies are resolved in the order they are declared in the `deps` hash. Each dependency function can optionally receive already-resolved dependencies as its third argument, enabling chaining."},"155":{"title":"Using Middleware in Tasks","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"#### Python\n\nInject dependencies into your tasks using `Depends` and type annotations. The dependency results are passed directly as function parameters.\n\n```python\n@hatchet.task()\nasync def async_task_with_dependencies(\n    _i: EmptyModel,\n    ctx: Context,\n    async_dep: Annotated[str, Depends(async_dep)],\n    sync_dep: Annotated[str, Depends(sync_dep)],\n    async_cm_dep: Annotated[str, Depends(async_cm_dep)],\n    sync_cm_dep: Annotated[str, Depends(sync_cm_dep)],\n    chained_dep: Annotated[str, Depends(chained_dep)],\n    chained_async_dep: Annotated[str, Depends(chained_async_dep)],\n) -> Output:\n    return Output(\n        sync_dep=sync_dep,\n        async_dep=async_dep,\n        async_cm_dep=async_cm_dep,\n        sync_cm_dep=sync_cm_dep,\n        chained_dep=chained_dep,\n        chained_async_dep=chained_async_dep,\n    )\n```\n\n\n  Your dependency functions must take two positional arguments: the workflow input and the `Context` (the same as any other task).\n\n\n#### Typescript\n\nTasks created from a middleware-enabled client automatically receive the merged input and output types. There is no extra configuration needed on the task itself.\n\n```typescript\nimport { hatchetWithMiddleware } from './client';\n\ntype TaskInput = {\n  message: string;\n};\n\ntype TaskOutput = {\n  message: string;\n};\n\nexport const taskWithMiddleware = hatchetWithMiddleware.task({\n  name: 'task-with-middleware',\n  fn: (input, _ctx) => {\n    console.log('task', input.message); // string  (from TaskInput)\n    console.log('task', input.first); // number  (from GlobalInputType)\n    console.log('task', input.second); // number  (from GlobalInputType)\n    console.log('task', input.dependency); // string  (from Pre Middleware)\n    return {\n      message: input.message,\n      extra: 1,\n    };\n  },\n});\n// !!\n```\n\nThe task's `input` type is the intersection of `TaskInput`, `GlobalInputType`, and the return type of the `before` middleware hook. The task's return type must satisfy `TaskOutput` and `GlobalOutputType`, while the caller receives the intersection of those with the `after` middleware return type.\n\n#### Go\n\n> **Info:** Middleware support for the Go SDK is coming soon. Join our [Discord](https://hatchet.run/discord) to stay up to date.\n\n#### Ruby\n\nPass a `deps` hash when defining a task. The resolved dependency values are available inside the task block via `ctx.deps`.\n\n```ruby\nASYNC_TASK_WITH_DEPS = HATCHET.task(\n  name: \"async_task_with_dependencies\",\n  deps: {\n    sync_dep: sync_dep,\n    async_dep: async_dep,\n    sync_cm_dep: sync_cm_dep,\n    async_cm_dep: async_cm_dep,\n    chained_dep: chained_dep,\n    chained_async_dep: chained_async_dep\n  }\n) do |input, ctx|\n  {\n    \"sync_dep\" => ctx.deps[:sync_dep],\n    \"async_dep\" => ctx.deps[:async_dep],\n    \"async_cm_dep\" => ctx.deps[:async_cm_dep],\n    \"sync_cm_dep\" => ctx.deps[:sync_cm_dep],\n    \"chained_dep\" => ctx.deps[:chained_dep],\n    \"chained_async_dep\" => ctx.deps[:chained_async_dep]\n  }\nend\n\nSYNC_TASK_WITH_DEPS = HATCHET.task(\n  name: \"sync_task_with_dependencies\",\n  deps: {\n    sync_dep: sync_dep,\n    async_dep: async_dep,\n    sync_cm_dep: sync_cm_dep,\n    async_cm_dep: async_cm_dep,\n    chained_dep: chained_dep,\n    chained_async_dep: chained_async_dep\n  }\n) do |input, ctx|\n  {\n    \"sync_dep\" => ctx.deps[:sync_dep],\n    \"async_dep\" => ctx.deps[:async_dep],\n    \"async_cm_dep\" => ctx.deps[:async_cm_dep],\n    \"sync_cm_dep\" => ctx.deps[:sync_cm_dep],\n    \"chained_dep\" => ctx.deps[:chained_dep],\n    \"chained_async_dep\" => ctx.deps[:chained_async_dep]\n  }\nend\n\nDURABLE_ASYNC_TASK_WITH_DEPS = HATCHET.durable_task(\n  name: \"durable_async_task_with_dependencies\",\n  deps: {\n    sync_dep: sync_dep,\n    async_dep: async_dep,\n    sync_cm_dep: sync_cm_dep,\n    async_cm_dep: async_cm_dep,\n    chained_dep: chained_dep,\n    chained_async_dep: chained_async_dep\n  }\n) do |input, ctx|\n  {\n    \"sync_dep\" => ctx.deps[:sync_dep],\n    \"async_dep\" => ctx.deps[:async_dep],\n    \"async_cm_dep\" => ctx.deps[:async_cm_dep],\n    \"sync_cm_dep\" => ctx.deps[:sync_cm_dep],\n    \"chained_dep\" => ctx.deps[:chained_dep],\n    \"chained_async_dep\" => ctx.deps[:chained_async_dep]\n  }\nend\n\nDURABLE_SYNC_TASK_WITH_DEPS = HATCHET.durable_task(\n  name: \"durable_sync_task_with_dependencies\",\n  deps: {\n    sync_dep: sync_dep,\n    async_dep: async_dep,\n    sync_cm_dep: sync_cm_dep,\n    async_cm_dep: async_cm_dep,\n    chained_dep: chained_dep,\n    chained_async_dep: chained_async_dep\n  }\n) do |input, ctx|\n  {\n    \"sync_dep\" => ctx.deps[:sync_dep],\n    \"async_dep\" => ctx.deps[:async_dep],\n    \"async_cm_dep\" => ctx.deps[:async_cm_dep],\n    \"sync_cm_dep\" => ctx.deps[:sync_cm_dep],\n    \"chained_dep\" => ctx.deps[:chained_dep],\n    \"chained_async_dep\" => ctx.deps[:chained_async_dep]\n  }\nend\n\nDI_WORKFLOW = HATCHET.workflow(name: \"dependency-injection-workflow\")\n\n# Workflow tasks with dependencies follow the same pattern\nDI_WORKFLOW.task(:wf_task_with_dependencies) do |input, ctx|\n  {\n    \"sync_dep\" => SYNC_DEPENDENCY_VALUE,\n    \"async_dep\" => ASYNC_DEPENDENCY_VALUE\n  }\nend\n```"},"156":{"title":"Running a Worker","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"#### Python\n\nNo special worker configuration is needed — dependencies are evaluated automatically each time a task runs.\n\n#### Typescript\n\nWorkers are created from the same middleware-enabled client. No special setup is required — the middleware hooks are applied automatically when tasks execute.\n\n```typescript\nimport { hatchetWithMiddleware } from './client';\nimport { taskWithMiddleware } from './workflow';\n\nasync function main() {\n  const worker = await hatchetWithMiddleware.worker('task-with-middleware', {\n    workflows: [taskWithMiddleware],\n  });\n\n  await worker.start();\n}\n\nif (require.main === module) {\n  main();\n}\n```\n\n#### Go\n\n> **Info:** Middleware support for the Go SDK is coming soon. Join our [Discord](https://hatchet.run/discord) to stay up to date.\n\n#### Ruby\n\nNo special worker configuration is needed — dependencies are resolved automatically before each task execution."},"157":{"title":"Practical Examples","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"The examples below show TypeScript middleware for common production patterns. Each can be adapted to the Python dependency injection model by extracting the same logic into a dependency function.\n\n### End-to-End Encryption\n\nEncrypt sensitive input fields before they reach the Hatchet server, and decrypt the output on the way back. This ensures plaintext data never leaves your worker process.\n\n```typescript\nimport { HatchetClient, HatchetMiddleware } from '@hatchet/v1';\nimport { randomUUID, createCipheriv, createDecipheriv, randomBytes } from 'crypto';\n```\n\n> **Info:** The `before` hook decrypts incoming data so your task function works with\n>   plaintext. The `after` hook encrypts the output before it is stored. The\n>   encryption key never leaves the worker environment.\n\n### Offloading Large Payloads to S3\n\nWhen task inputs or outputs exceed Hatchet's payload size limit (or you simply want to keep large blobs out of the control plane), upload them to S3 and pass a signed URL instead.\n\n```typescript\nimport { S3Client, PutObjectCommand, GetObjectCommand } from '@aws-sdk/client-s3';\nimport { getSignedUrl } from '@aws-sdk/s3-request-presigner';\n\nconst ALGORITHM = 'aes-256-gcm';\nconst KEY = Buffer.from(process.env.ENCRYPTION_KEY!, 'hex');\n\ntype EncryptedEnvelope = { ciphertext: string; iv: string; tag: string };\n\nfunction encrypt(plaintext: string): EncryptedEnvelope {\n  const iv = randomBytes(16);\n  const cipher = createCipheriv(ALGORITHM, KEY, iv);\n  const encrypted = Buffer.concat([cipher.update(plaintext, 'utf8'), cipher.final()]);\n  return {\n    ciphertext: encrypted.toString('base64'),\n    iv: iv.toString('base64'),\n    tag: cipher.getAuthTag().toString('base64'),\n  };\n}\n\nfunction decrypt(ciphertext: string, iv: string, tag: string): string {\n  const decipher = createDecipheriv(ALGORITHM, KEY, Buffer.from(iv, 'base64'));\n  decipher.setAuthTag(Buffer.from(tag, 'base64'));\n  return decipher.update(ciphertext, 'base64', 'utf8') + decipher.final('utf8');\n}\n\ntype EncryptedInput = { encrypted?: EncryptedEnvelope };\n\nconst e2eEncryption: HatchetMiddleware = {\n  before: (input) => {\n    if (!input.encrypted) {\n      return input;\n    }\n    const { ciphertext, iv, tag } = input.encrypted;\n    const decrypted = JSON.parse(decrypt(ciphertext, iv, tag));\n    return { ...input, ...decrypted, encrypted: undefined };\n  },\n  after: (output) => {\n    const payload = JSON.stringify(output);\n    return { encrypted: encrypt(payload) };\n  },\n};\n\nconst encryptionClient = HatchetClient.init().withMiddleware(e2eEncryption);\n\nconst s3 = new S3Client({ region: process.env.AWS_REGION });\nconst BUCKET = process.env.S3_BUCKET!;\nconst PAYLOAD_THRESHOLD = 256 * 1024; // 256 KB\n\nasync function uploadToS3(data: unknown): Promise<string> {\n  const key = `hatchet-payloads/${randomUUID()}.json`;\n  await s3.send(\n    new PutObjectCommand({\n      Bucket: BUCKET,\n      Key: key,\n      Body: JSON.stringify(data),\n      ContentType: 'application/json',\n    })\n  );\n  return getSignedUrl(s3, new GetObjectCommand({ Bucket: BUCKET, Key: key }), {\n    expiresIn: 3600,\n  });\n}\n\nasync function downloadFromS3(url: string): Promise<unknown> {\n  const res = await fetch(url);\n  return res.json();\n}\n\ntype S3Input = { s3Url?: string };\n\nconst s3Offload: HatchetMiddleware = {\n  before: async (input) => {\n    if (input.s3Url) {\n      const restored = (await downloadFromS3(input.s3Url)) as Record<string, any>;\n      return { ...restored, s3Url: undefined };\n    }\n    return input;\n  },\n  after: async (output) => {\n    const serialized = JSON.stringify(output);\n    if (serialized.length > PAYLOAD_THRESHOLD) {\n      const url = await uploadToS3(output);\n      return { s3Url: url };\n    }\n    return output;\n  },\n};\n\nconst s3Client = HatchetClient.init().withMiddleware(s3Offload);\n```\n\n\n  The caller is responsible for uploading oversized inputs to S3 before triggering the task. The `before` hook only handles the download side. You can use the same `uploadToS3` helper on the caller side to upload the input and pass `{ __s3Url: url }` as the task input."},"158":{"title":"FAQ","pageTitle":"Middleware","pageRoute":"hatchet://docs/v1/middleware","content":"### What is Hatchet middleware and how does it differ from Express middleware?\n\nHatchet middleware runs **inside the worker process** around each task invocation — not on an HTTP request path. A `before` hook transforms input before the task runs, and an `after` hook transforms output after. Unlike Express middleware, there is no `next()` function; hooks return their result directly and the runner chains them automatically.\n\n### Can I use middleware with both tasks and workflows?\n\nYes. Middleware is registered on the `HatchetClient` instance, so it applies to every task created from that client — whether the task is a standalone `client.task()` or part of a multi-step `client.workflow()`. Each step in a workflow will have middleware applied independently.\n\n### Does middleware run on the server or on the worker?\n\nMiddleware runs entirely **on the worker**. The Hatchet server never sees or executes your middleware code. This is what makes patterns like end-to-end encryption possible — plaintext data stays within your infrastructure.\n\n### What happens if my middleware throws an error?\n\nIf a `before` or `after` hook throws (or returns a rejected `Promise`), the task run fails with that error. There is no automatic retry of middleware itself, but the task's configured retry policy will still apply, re-running the task (and its middleware) from scratch.\n\n### Can I use async/await in middleware hooks?\n\nYes. Both `before` and `after` hooks can be synchronous or asynchronous. If a hook returns a `Promise`, the worker will `await` it before proceeding to the next hook or the task function.\n\n### How do I share state between `before` and `after` hooks?\n\nThe `after` hook receives the task input (after `before` hooks have run) as its third argument. Add fields in `before` (e.g. `startedAt`, `traceId`) and read them from `input` in `after`. There is no separate shared context object — the input itself is the carrier.\n\n### Does middleware apply to child tasks spawned via fanout?\n\nMiddleware is scoped to the **client instance**. If a child task is defined on the same middleware-enabled client, its middleware will run when that child task executes. If the child task uses a different client instance, only that client's middleware (if any) applies.\n\n### Can I selectively skip middleware for certain tasks?\n\nMiddleware applies to **all** tasks on a given client. To skip middleware for specific tasks, create a second client without middleware and define those tasks on it. This is a deliberate design choice — middleware is a cross-cutting concern, and selective opt-out is handled at the client boundary.\n\n### Is there a performance overhead to using middleware?\n\nMiddleware hooks are plain JavaScript functions that run in-process on the worker. The overhead is the execution time of your hook code. For lightweight operations (adding a field, logging), the overhead is negligible. For heavier operations (network calls like S3 uploads or decryption), the task's total duration will include that time, so keep hooks as efficient as possible.\n\n### What is the difference between global types and middleware types in TypeScript?\n\nGlobal types (`HatchetClient.init()`) define fields that **callers must provide** when triggering a task. Middleware types (inferred from `withMiddleware` return values) define fields that are **injected at runtime** by the worker. Both end up on the task's `input` type, but only global types appear in the caller-facing `run()` signature.\n\n### Can I use middleware for rate limiting or authentication?\n\nYes. A `before` hook can check rate limits, validate API keys, or verify JWTs before the task runs. If the check fails, throw an error to abort the task. However, for rate limiting specifically, consider using Hatchet's built-in [rate limiting](/home/rate-limits) feature, which operates at the scheduling layer and is more efficient than in-worker checks.\n\n### How do I test middleware in isolation?\n\nMiddleware hooks are plain functions — you can unit-test them directly by calling them with mock input and a mock context object. For integration tests, the e2e test pattern of creating a client, attaching middleware, defining a task, starting a worker, and asserting on the result works well. See the [middleware example on GitHub](https://github.com/hatchet-dev/hatchet/tree/main/examples/typescript/middleware) for a complete test setup."},"159":{"title":"Streaming","pageTitle":"Streaming","pageRoute":"hatchet://docs/v1/streaming","content":"# Streaming in Hatchet\n\nHatchet tasks can stream data back to a consumer in real-time. This has a number of valuable uses, such as streaming the results of an LLM call back from a Hatchet worker to a frontend or sending progress updates as a task chugs along."},"160":{"title":"Publishing Stream Events","pageTitle":"Streaming","pageRoute":"hatchet://docs/v1/streaming","content":"You can stream data out of a task run by using the `put_stream` (or equivalent) method on the `Context`.\n\n#### Python\n\n```python\nanna_karenina = \"\"\"\nHappy families are all alike; every unhappy family is unhappy in its own way.\n\nEverything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him.\n\"\"\"\n\n\ndef create_chunks(content: str, n: int) -> Generator[str, None, None]:\n    for i in range(0, len(content), n):\n        yield content[i : i + n]\n\n\nchunks = list(create_chunks(anna_karenina, 10))\n\n\n@hatchet.task()\nasync def stream_task(input: EmptyModel, ctx: Context) -> None:\n    # 👀 Sleeping to avoid race conditions\n    await asyncio.sleep(2)\n\n    for chunk in chunks:\n        await ctx.aio_put_stream(chunk)\n        await asyncio.sleep(0.20)\n```\n\n#### Typescript\n\n```typescript\nconst annaKarenina = `\nHappy families are all alike; every unhappy family is unhappy in its own way.\n\nEverything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him.\n`;\n\nfunction* createChunks(content: string, n: number): Generator<string, void, unknown> {\n  for (let i = 0; i < content.length; i += n) {\n    yield content.slice(i, i + n);\n  }\n}\n\nexport const streamingTask = hatchet.task({\n  name: 'stream-example',\n  fn: async (_, ctx) => {\n    await sleep(2000);\n\n    for (const chunk of createChunks(annaKarenina, 10)) {\n      ctx.putStream(chunk);\n      await sleep(200);\n    }\n  },\n});\n```\n\n#### Go\n\n```go\nconst annaKarenina = `\nHappy families are all alike; every unhappy family is unhappy in its own way.\n\nEverything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him.\n`\n\nfunc createChunks(content string, n int) []string {\n\tvar chunks []string\n\tfor i := 0; i < len(content); i += n {\n\t\tend := i + n\n\t\tif end > len(content) {\n\t\t\tend = len(content)\n\t\t}\n\t\tchunks = append(chunks, content[i:end])\n\t}\n\treturn chunks\n}\n\nfunc StreamTask(ctx hatchet.Context, input StreamTaskInput) (*StreamTaskOutput, error) {\n\ttime.Sleep(2 * time.Second)\n\n\tchunks := createChunks(annaKarenina, 10)\n\n\tfor _, chunk := range chunks {\n\t\tctx.PutStream(chunk)\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\n\treturn &StreamTaskOutput{\n\t\tMessage: \"Streaming completed\",\n\t}, nil\n}\n```\n\n#### Ruby\n\n```ruby\nANNA_KARENINA = <<~TEXT\n  Happy families are all alike; every unhappy family is unhappy in its own way.\n\n  Everything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him.\nTEXT\n\nSTREAM_CHUNKS = ANNA_KARENINA.scan(/.{1,10}/)\n\nSTREAM_TASK = HATCHET.task(name: \"stream_task\") do |input, ctx|\n  # Sleeping to avoid race conditions\n  sleep 2\n\n  STREAM_CHUNKS.each do |chunk|\n    ctx.put_stream(chunk)\n    sleep 0.20\n  end\nend\n```\n\nThis task will stream small chunks of content through Hatchet, which can then be consumed elsewhere. Here we use some text as an example, but this is intended to replicate streaming the results of an LLM call back to a consumer."},"161":{"title":"Consuming Streams","pageTitle":"Streaming","pageRoute":"hatchet://docs/v1/streaming","content":"You can easily consume stream events by using the stream method on the workflow run ref that the various [fire-and-forget](/v1/running-your-task#fire-and-forget) methods return.\n\n#### Python\n\n```python\nref = await stream_task.aio_run(wait_for_result=False)\n\nasync for chunk in hatchet.runs.subscribe_to_stream(ref.workflow_run_id):\n    print(chunk, flush=True, end=\"\")\n```\n\n#### Typescript\n\n```typescript\nconst ref = await streamingTask.runNoWait({});\nconst id = await ref.getWorkflowRunId();\n\nfor await (const content of hatchet.runs.subscribeToStream(id)) {\n  process.stdout.write(content);\n}\n```\n\n#### Go\n\n```go\nfunc main() {\n\tclient, err := hatchet.NewClient()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create Hatchet client: %v\", err)\n\t}\n\n\tctx := context.Background()\n\n\tstreamingWorkflow := shared.StreamingWorkflow(client)\n\n\tworkflowRun, err := streamingWorkflow.RunNoWait(ctx, shared.StreamTaskInput{})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to run workflow: %v\", err)\n\t}\n\n\tid := workflowRun.RunId\n\tstream := client.Runs().SubscribeToStream(ctx, id)\n\n\tfor content := range stream {\n\t\tfmt.Print(content)\n\t}\n\n\tfmt.Println(\"\\nStreaming completed!\")\n}\n```\n\n#### Ruby\n\n```ruby\nref = STREAM_TASK.run_no_wait\n\nHATCHET.runs.subscribe_to_stream(ref.workflow_run_id) do |chunk|\n  print chunk\nend\n```\n\nIn the examples above, this will result in the famous text below being gradually printed to the console, bit by bit.\n\n```\nHappy families are all alike; every unhappy family is unhappy in its own way.\n\nEverything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him.\n```\n\n\n  You must begin consuming the stream before any events are published. Any\n  events published before a consumer is initialized will be dropped. In\n  practice, this will not be an issue in most cases, but adding a short sleep\n  before beginning streaming results back can help."},"162":{"title":"Streaming to a Web Application","pageTitle":"Streaming","pageRoute":"hatchet://docs/v1/streaming","content":"It's common to want to stream events out of a Hatchet task and back to the frontend of your application, for consumption by an end user. As mentioned before, some clear cases where this is useful would be for streaming back progress of some long-running task for a customer to monitor, or streaming back the results of an LLM call.\n\nIn both cases, we recommend using your application's backend as a proxy for the stream, where you would subscribe to the stream of events from Hatchet, and then stream events through to the frontend as they're received by the backend.\n\n#### Python\n\nFor example, with FastAPI, you'd do the following:\n\n```python\nhatchet = Hatchet()\napp = FastAPI()\n\n\n@app.get(\"/stream\")\nasync def stream() -> StreamingResponse:\n    ref = await stream_task.aio_run(wait_for_result=False)\n\n    return StreamingResponse(\n        hatchet.runs.subscribe_to_stream(ref.workflow_run_id), media_type=\"text/plain\"\n    )\n```\n\n#### Typescript\n\nFor example, with NextJS backend-as-frontend, you'd do the following:\n\n```typescript\nexport async function GET(): Promise {\n  try {\n    const ref = await streamingTask.runNoWait({});\n    const workflowRunId = await ref.getWorkflowRunId();\n\n    const stream = Readable.from(hatchet.runs.subscribeToStream(workflowRunId));\n\n    return new Response(Readable.toWeb(stream), {\n      headers: {\n        'Content-Type': 'text/plain',\n        'Cache-Control': 'no-cache',\n        Connection: 'keep-alive',\n      },\n    });\n  } catch (error) {\n    return new Response('Internal Server Error', { status: 500 });\n  }\n}\n```\n\n#### Go\n\nFor example, with Go's built-in HTTP server, you'd do the following:\n\n```go\nfunc main() {\n\tclient, err := hatchet.NewClient()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create Hatchet client: %v\", err)\n\t}\n\n\tstreamingWorkflow := shared.StreamingWorkflow(client)\n\n\thttp.HandleFunc(\"/stream\", func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := context.Background()\n\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tw.Header().Set(\"Cache-Control\", \"no-cache\")\n\t\tw.Header().Set(\"Connection\", \"keep-alive\")\n\n\t\tworkflowRun, err := streamingWorkflow.RunNoWait(ctx, shared.StreamTaskInput{})\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tstream := client.Runs().SubscribeToStream(ctx, workflowRun.RunId)\n\n\t\tflusher, _ := w.(http.Flusher)\n\t\tfor content := range stream {\n\t\t\tfmt.Fprint(w, content)\n\t\t\tif flusher != nil {\n\t\t\t\tflusher.Flush()\n\t\t\t}\n\t\t}\n\t})\n\n\tserver := &http.Server{\n\t\tAddr:         \":8000\",\n\t\tReadTimeout:  5 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t}\n\n\tif err := server.ListenAndServe(); err != nil {\n\t\tlog.Println(\"Failed to start server:\", err)\n\t}\n}\n```\n\n#### Ruby\n\n\nThen, assuming you run the server on port `8000`, running `curl -N http://localhost:8000/stream` would result in the text streaming back to your console from Hatchet through your FastAPI proxy."},"163":{"title":"Environments","pageTitle":"Environments","pageRoute":"hatchet://docs/v1/environments","content":"# Managing Environments with Hatchet"},"164":{"title":"Multiple Developers, One Orchestrator","pageTitle":"Environments","pageRoute":"hatchet://docs/v1/environments","content":"When multiple developers share a single Hatchet orchestrator, conflicts can arise as workflow runs and events intermingle. Without proper isolation, one developer's workflows might interfere with another's testing or development work.\n\nHatchet provides three key solutions for managing this challenge: namespaces, multi-tenancy, and a local Hatchet instance.\n\n### Solution 1: Multi-Tenancy\n\nThe easiest way to isolate environments for different developers or teams is to use Hatchet's multi-tenancy feature. Each tenant represents a separate environment with its own set of workflows and workers. To add a new tenant for each developer, create an organization and follow these steps:\n\n1. Access the organization dropdown in the dashboard (top right)\n2. Select the `+` icon next to your organization's name\n3. Generate a new token for that tenant\n4. Each developer configures their environment with their designated tenant token\n\n### Solution 2: Local Hatchet Instance\n\nIf you are using Hatchet locally, you can create a local instance of Hatchet to manage your isolated local development environment. Follow instructions [here](/self-hosting/hatchet-lite) to get started."},"165":{"title":"Troubleshooting Workers","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"# Troubleshooting Hatchet Workers\n\nThis guide covers common issues when deploying and operating Hatchet workers."},"166":{"title":"Quick debugging checklist","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"Before diving into specific issues, run through these checks:\n\n1. **Verify your API token** — make sure `HATCHET_CLIENT_TOKEN` matches the token generated in the Hatchet dashboard for your tenant.\n2. **Check worker logs** — look for connection errors, heartbeat failures, or crash traces in your worker output.\n3. **Check the dashboard** — navigate to the Workers tab to see if your worker is registered and healthy.\n4. **Confirm network connectivity** — workers need to reach the Hatchet engine over gRPC. Firewalls, VPNs, or missing TLS configuration can block this.\n5. **Check SDK version** — ensure your SDK version is compatible with your engine version. Mismatches can cause subtle failures."},"167":{"title":"Could not send task to worker","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"If you see this error in the event history of a task, it could mean several things:\n\n1. The worker is closing its network connection while the task is being sent. This could be caused by the worker crashing or going offline.\n\n2. The payload is too large for the worker to accept or the Hatchet engine to send. The default maximum payload size is 4MB. Consider reducing the size of the input data or output data of your tasks.\n\n3. The worker has a large backlog of tasks in-flight on the network connection and is rejecting new tasks. This can occur if workers are geographically distant from the Hatchet engine or if there are network issues causing delays. Hatchet Cloud runs by default in `us-west-2` (Oregon, USA), so consider deploying your workers in a region close to that for the best performance.\n\n   If you are self-hosting, you can increase the maximum backlog size via the `SERVER_GRPC_WORKER_STREAM_MAX_BACKLOG_SIZE` environment variable in your Hatchet engine configuration. The default is 20."},"168":{"title":"No workers visible in dashboard","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"If you have deployed workers but they are not visible in the Hatchet dashboard, it is likely that:\n\n1. Your API token is invalid or incorrect. Ensure that the token you are using to start the worker matches the token generated in the Hatchet dashboard for your tenant.\n\n2. Worker heartbeats are not reaching the Hatchet engine. You will see noisy logs in the worker output if this is the case."},"169":{"title":"Tasks stuck in QUEUED state","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"If tasks remain in the `QUEUED` state and never move to `RUNNING`:\n\n1. **No workers registered for the task** — check the Workers tab in the dashboard and confirm a worker is registered that handles the task name. If you recently renamed a task, make sure the worker has been restarted with the updated code.\n\n2. **All worker slots are full** — if every slot is occupied by other tasks, new tasks will wait in the queue. Check worker utilization in the dashboard or increase the [slot count](/v1/workers#slots).\n\n3. **Concurrency or rate limit is blocking** — if you've configured [concurrency limits](/v1/concurrency) or [rate limits](/v1/rate-limits), tasks may be held back intentionally. Review your configuration."},"170":{"title":"Worker keeps disconnecting","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"If your worker repeatedly connects and then drops:\n\n1. **Resource exhaustion** — the worker process may be running out of memory or CPU and getting killed by the OS or orchestrator (OOM kill). Check system logs and increase resource limits.\n\n2. **Network instability** — intermittent connectivity between the worker and the Hatchet engine will cause reconnection cycles. Check for packet loss or high latency between the worker and the engine.\n\n3. **Graceful shutdown not configured** — if your deployment platform sends `SIGTERM` and the worker doesn't handle it, in-flight tasks may be interrupted. Ensure your worker handles shutdown signals and gives tasks time to complete."},"171":{"title":"Phantom workers active in dashboard","pageTitle":"Troubleshooting Workers","pageRoute":"hatchet://docs/v1/troubleshooting/index","content":"This is often due to workers still running in your deployed environment. We see this most often with very long termination periods for workers, or in local development environments where worker processes are leaking. If you are in a local development environment, you can usually view running Hatchet worker processes via `ps -a | grep worker` (or whatever your entrypoint binary is called) and kill them manually."},"172":{"title":"Architecture & Guarantees","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"# Architecture & Guarantees\n\nThis page explains how Hatchet is put together, what the main components do, and what reliability guarantees you should design your workers around."},"173":{"title":"Architecture overview","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"Hatchet has three main moving pieces:\n\n- **API server**: the HTTP surface area for triggering workflows, querying state, and powering the UI\n- **Engine**: schedules and dispatches work, enforces dependencies/policies, and records state transitions durably\n- **Workers**: your processes that run the actual task code\n\nState is stored durably (PostgreSQL is the source of truth). In many deployments that’s enough—no separate broker required—while self-hosted high-throughput setups can add additional components (for example, RabbitMQ) based on your needs.\n\nHatchet Cloud and self-hosted Hatchet share the same architecture; the difference is who runs and operates the Hatchet services.\n\n```mermaid\ngraph LR\n    subgraph \"External (Optional)\"\n        EXT[Webhooks<br/>Events]\n    end\n\n    subgraph \"Your Infrastructure\"\n        APP[Your API, App, Service, etc.]\n        W[Workers]\n    end\n\n    subgraph \"Hatchet\"\n        API[API Server]\n        ENG[Engine]\n        DB[(Database)]\n    end\n\n    EXT --> API\n    APP <--> API\n    API --> ENG\n    ENG <--> DB\n    API <--> DB\n    ENG <-.->|gRPC| W\n\n    classDef userInfra fill:#e3f2fd,stroke:#1976d2,stroke-width:2px,color:#0d47a1\n    classDef hatchet fill:#f1f8e9,stroke:#388e3c,stroke-width:2px,color:#1b5e20\n    classDef external fill:#fff8e1,stroke:#f57c00,stroke-width:2px,color:#e65100\n\n    class APP,W userInfra\n    class API,ENG,DB hatchet\n    class EXT external\n```"},"174":{"title":"Core components","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"### API server\n\nThe API server is the front door to Hatchet. It’s what your application and the Hatchet UI talk to in order to:\n\n- trigger workflows with input data\n- query workflow/task state (and, where supported, subscribe to updates)\n- manage resources like schedules and settings\n- ingest webhooks/events (optional)\n\n### Engine\n\nThe engine is responsible for turning “a workflow should run” into “these tasks are ready and should be executed.” In practice, it:\n\n- evaluates workflow dependencies\n- enforces policies like concurrency limits, rate limits, and priorities\n- schedules ready tasks and dispatches them to connected workers\n- records state transitions durably and applies retry/timeout behavior\n- runs scheduled/cron workflows\n\nWorkers connect to the engine over bidirectional gRPC, which allows low-latency dispatch and frequent status updates.\n\n### Workers\n\nWorkers are your processes. They connect to the engine, receive tasks, run your code, and report progress/results back to Hatchet.\n\nWorkers are intentionally flexible: you can run them locally, in containers, or on VMs, and you can scale workers independently from the Hatchet services. You can also run different “types” of workers (and even different languages) depending on what your system needs.\n\n### Storage (and optional messaging)\n\nPostgreSQL is the durable store for workflow definitions and execution state (queued/running/completed, inputs/outputs, retries, etc.). In self-hosted deployments, you can start with PostgreSQL-only and add components like RabbitMQ if you need higher throughput."},"175":{"title":"Guarantees & tradeoffs","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"Hatchet aims to sit in the middle: more structure than a simple queue, but simpler to operate than a full distributed workflow system.\n\n### Good fit for\n\n- **Workflow orchestration** with dependencies, retries, and timeouts\n- **Durable background jobs** where “don’t lose work” matters\n- **Moderate to high throughput** systems (and a path to higher scale with tuning/sharding). If you’re pushing the limits, [contact us](https://hatchet.run/contact).\n- **Multi-language / polyglot workers**\n- **Teams already on PostgreSQL** who want operational simplicity\n- **Cloud or air-gapped environments** ([Hatchet Cloud](https://cloud.onhatchet.run) or [self-hosting](/self-hosting))\n\n### Not a good fit for\n\n- **Extremely high throughput** without sharding/custom tuning (for example, sustaining 10,000+ tasks/sec)\n- **Sub-millisecond dispatch latency** requirements\n- **In-memory-only queuing** where durability is unnecessary\n- **Serverless-only runtimes** (e.g. AWS Lambda / Cloud Functions) as your primary worker model"},"176":{"title":"Core reliability guarantees","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"### At-least-once task execution\n\nHatchet is **at least once**: tasks are not silently dropped, and failures retry according to your configuration. This also means **a task can run more than once**, so your task code should be **idempotent** (or otherwise safe to retry).\n\n### Durable state transitions\n\nWorkflow state is persisted in PostgreSQL, and state transitions are performed transactionally. This helps keep dependency resolution consistent and makes the system resilient to restarts and transient failures.\n\n### Execution policies are explicit\n\nBy default, task assignment is FIFO, and you can change execution behavior using:\n\n- [Concurrency policies](/v1/concurrency)\n- [Rate limits](/v1/rate-limits)\n- [Priorities](/v1/priority)\n\n### Stateless services; resilient workers\n\nThe engine and API server are designed to restart without losing state, which also enables horizontal scaling by running multiple instances. Workers reconnect after network interruptions and can run close to your services (or close to Hatchet) depending on your latency goals."},"177":{"title":"Performance expectations","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"Real-world performance depends heavily on topology (worker ↔ engine network latency), database sizing, and workload shape.\n\n- **Dispatch latency**: often sub-50ms with PostgreSQL-backed storage; in optimized, “hot worker” setups it can be closer to ~25ms P95.\n- **Throughput**: varies by setup. PostgreSQL-only deployments often handle hundreds of tasks/sec per engine instance; higher throughput typically requires additional tuning and/or components like RabbitMQ. With tuning and sharding, Hatchet can scale into the high tens of thousands of tasks/sec—[contact us](https://hatchet.run/contact) if you want to design for that.\n- **Common bottlenecks**: DB connection limits, large payloads (e.g. > 1MB), complex dependency graphs, and cross-region latency.\n\n> **Warning:** **Not seeing expected performance?**\n>\n> If you're not seeing the performance you expect, please [reach out to us](https://hatchet.run/office-hours) or [join our community](https://hatchet.run/discord) to explore tuning options."},"178":{"title":"Next Steps","pageTitle":"Architecture & Guarantees","pageRoute":"hatchet://docs/v1/architecture-and-guarantees","content":"- **[Quick Start](/v1/quickstart)**: set up your first Hatchet worker\n- **[Self-Hosting](/self-hosting)**: deploy Hatchet on your own infrastructure"},"179":{"title":"Cloud vs OSS","pageTitle":"Cloud vs OSS","pageRoute":"hatchet://docs/v1/cloud-vs-oss","content":"# Cloud vs OSS\n\nHatchet is available as **Hatchet Cloud** (managed) and as **open source** (self-hosted). The programming model is the same: you write tasks/workflows in code and run workers that connect to Hatchet.\n\nThis page helps you decide which deployment model fits your team."},"180":{"title":"Quick decision guide","pageTitle":"Cloud vs OSS","pageRoute":"hatchet://docs/v1/cloud-vs-oss","content":"Choose **Hatchet Cloud** if you want:\n\n- the Hatchet control plane operated for you (upgrades, scaling, backups)\n- the fastest path to production\n- a status page and managed incident response\n\nChoose **self-hosted (OSS)** if you need:\n\n- full control over infrastructure and networking\n- strict data residency or air-gapped environments\n- a deployment you can customize and operate with your own tooling"},"181":{"title":"What’s the same in both","pageTitle":"Cloud vs OSS","pageRoute":"hatchet://docs/v1/cloud-vs-oss","content":"- **SDK + worker model**: your workers run your code and connect to Hatchet\n- **Durability + retries**: tasks are durably tracked and retry according to configuration\n- **Observability surfaces**: you can inspect runs, workers, and workflow history\n- **Core semantics**: the same workflows/tasks/concurrency patterns apply"},"182":{"title":"What changes (operational responsibilities)","pageTitle":"Cloud vs OSS","pageRoute":"hatchet://docs/v1/cloud-vs-oss","content":"### Hatchet Cloud (managed)\n\nHatchet runs and operates the Hatchet services. You bring:\n\n- your worker processes\n- your application code that triggers workflows\n- your operational policies (timeouts, retries, concurrency, rate limits)\n\nFor security and compliance documentation, see the **[Hatchet Trust Center](https://trust.hatchet.run/)**. For current incidents and historical uptime, see **[status.hatchet.run](https://status.hatchet.run/)**.\n\n### Self-hosted (OSS)\n\nYou run and operate the Hatchet services and their dependencies. Typical responsibilities include:\n\n- provisioning and scaling the Hatchet services\n- managing PostgreSQL (and any optional components you deploy)\n- backups, upgrades, and monitoring\n- network security and access control for the API/DB\n\nIf you’re planning production usage, start with:\n\n- [Self Hosting](/self-hosting)\n- [High Availability](/self-hosting/high-availability)\n- [Security](/v1/security)"},"183":{"title":"Migrating between Cloud and self-hosted","pageTitle":"Cloud vs OSS","pageRoute":"hatchet://docs/v1/cloud-vs-oss","content":"You can move between deployment models without rewriting worker code. In practice, migration usually means:\n\n- pointing workers and clients at a new endpoint\n- swapping credentials/tokens\n- validating environment-specific settings (TLS, networking, retention, etc.)"},"184":{"title":"Next steps","pageTitle":"Cloud vs OSS","pageRoute":"hatchet://docs/v1/cloud-vs-oss","content":"- **[Quickstart](/v1/quickstart)**: run a worker and trigger your first workflow\n- **[Architecture & Guarantees](/v1/architecture-and-guarantees)**: understand reliability semantics and tradeoffs\n- **[Self Hosting](/self-hosting)**: deploy Hatchet on your own infrastructure"},"185":{"title":"About","pageTitle":"About","pageRoute":"hatchet://docs/v1/security/index","content":"# Security\n\nThis page points you to Hatchet's security resources and highlights the most important security considerations for Hatchet Cloud and self-hosted deployments."},"186":{"title":"Trust center","pageTitle":"About","pageRoute":"hatchet://docs/v1/security/index","content":"Hatchet is SOC 2 Type II, HIPAA, and GDPR compliant. Company-level security practices, compliance reports, and security documentation are available at the **[Hatchet Trust Center](https://trust.hatchet.run/)**."},"187":{"title":"Same source, same security","pageTitle":"About","pageRoute":"hatchet://docs/v1/security/index","content":"Hatchet Cloud and self-hosted Hatchet run the same codebase. The open source project is 100% MIT licensed and undergoes regular third-party penetration testing. Findings are remediated across both deployment models, so security improvements benefit all users equally."},"188":{"title":"Hatchet Cloud","pageTitle":"About","pageRoute":"hatchet://docs/v1/security/index","content":"Hatchet Cloud is Hatchet's managed service:\n\n- **Encryption in transit**: all API and worker traffic is encrypted with TLS. gRPC connections between workers and the engine use TLS by default.\n- **Encryption at rest**: data stored in Hatchet Cloud is encrypted at rest.\n- **Tenant isolation**: each tenant's data is logically isolated. Requests are authenticated and scoped to a single tenant.\n- **Authentication**: API tokens are scoped per-tenant with configurable expiration. The dashboard supports SSO via Google, GitHub, and more coming soon.\n- **Penetration testing**: Hatchet Cloud is regularly tested by independent security firms. Findings are tracked and remediated on a defined timeline.\n- **Infrastructure**: Hatchet Cloud runs on AWS with private networking, automated patching, and centralized logging.\n\nFor the definitive controls, policies, and compliance reports, refer to the **[Hatchet Trust Center](https://trust.hatchet.run/)**."},"189":{"title":"Self-hosted","pageTitle":"About","pageRoute":"hatchet://docs/v1/security/index","content":"When you self-host Hatchet, your security posture depends on how you deploy and operate the Hatchet services and their dependencies. A practical baseline:\n\n- **Put TLS in front of the API**: terminate TLS at your ingress/load balancer (or directly on the API) and only expose it to the networks that need it.\n- **Treat tokens and DB credentials as secrets**: use a secrets manager and rotate credentials; avoid committing secrets into git or baking them into images.\n- **Limit network reachability**: restrict access to the Hatchet API and PostgreSQL to trusted networks (VPC, private subnets, or Kubernetes network policies).\n- **Use least privilege**: run Hatchet with the minimum DB permissions needed; don't reuse \"admin\" DB credentials.\n- **Stay current**: keep Hatchet and dependencies up to date to pick up security fixes.\n\nSee [Self Hosting](/self-hosting) for deployment and configuration guidance, or [contact us](https://hatchet.run/contact) for help."},"190":{"title":"Audit Logs","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"# Audit Logs\n\nHatchet records audit logs for key actions performed across your organization, giving you visibility into who did what, when, and from where.\n\n> **Info:** Audit logs are available on **Business** plans and above. If you're on the\n>   open-source edition and need audit logs, [contact\n>   us](mailto:support@hatchet.run) to learn more about upgrading."},"191":{"title":"What gets logged","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"Every audit log entry captures the following:\n\nField, Description\n\n**Actor**, The user or API key that performed the action\n**Action**, The operation performed (e.g. `ApiTokenCreate`, `TenantMemberDelete`)\n**Resource type**, The type of resource acted upon (e.g. `workflow-run`, `api-token`)\n**Resource ID**, The specific resource that was affected\n**IP address**, The IP address of the actor (HTTP requests only)\n**User agent**, The user agent string of the request (HTTP requests only)\n**Timestamp**, When the action occurred\n**Correlation ID**, An optional ID for grouping related actions together (gRPC requests)"},"192":{"title":"Audited actions","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"The following actions are currently recorded as audit log entries:\n\nAction, Resource Type, Description\n\n`TenantInviteAccept`, `tenant-invite`, A user accepts a tenant invitation\n`TenantMemberDelete`, `tenant-member`, A tenant member is removed\n`ApiTokenCreate`, `api-token`, A new API token is created\n`ApiTokenUpdateRevoke`, `api-token`, An API token is revoked\n`V1WorkflowRunCreate`, `workflow-run`, A workflow run is triggered via the API\n`ScheduledWorkflowRunCreate`, `scheduled-workflow`, A scheduled workflow run is created"},"193":{"title":"Actor types","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"Audit log entries distinguish between two types of actors:\n\n- **User** — actions performed by a logged-in user through the dashboard or API. These entries include the actor's IP address and user agent.\n- **API key** — actions performed programmatically via an API key (e.g. triggering workflow runs over gRPC). These entries may include a correlation ID for grouping related actions."},"194":{"title":"Retention","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"Audit log entries are retained for **30 days**. Entries older than 30 days are automatically removed."},"195":{"title":"Viewing audit logs","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"Organization admins can view audit logs in the Hatchet dashboard under the organization settings. Logs can be filtered by tenant and time range."},"196":{"title":"API access","pageTitle":"Audit Logs","pageRoute":"hatchet://docs/v1/security/audit-logs","content":"Audit logs can also be retrieved programmatically via the Management API:\n\n```\nGET /api/v1/management/organizations/{organization}/audit-logs\n```\n\nQuery parameters:\n\nParameter, Type, Default, Description\n\n`tenant`, UUID, all active tenants in the organization, Filter logs to a specific tenant\n`limit`, integer, `1000`, Maximum number of results to return\n`offset`, integer, `0`, Number of results to skip\n`since`, ISO 8601, 24 hours ago, Start of the time range\n`until`, ISO 8601, now, End of the time range\n\nResults are ordered by timestamp descending (most recent first)."},"197":{"title":"Region Availability","pageTitle":"Region Availability","pageRoute":"hatchet://docs/v1/region-availability","content":"# Region availability\n\nHatchet Cloud is available in multiple regions so you can run workloads close to your users and data."},"198":{"title":"Current regions","pageTitle":"Region Availability","pageRoute":"hatchet://docs/v1/region-availability","content":"**Hatchet Cloud** ([cloud.onhatchet.run](https://cloud.onhatchet.run)) is currently deployed in **us-west-2** (Oregon).\n\nWe are expanding availability. Planned or available regions include:\n\nRegion, Location, Status\n\nus-west-2, Oregon (US), **Live**\nus-east-1, N. Virginia (US), Private Beta\neu-west-1, Ireland, Private Beta\nap-southeast-2, Sydney, Private Beta"},"199":{"title":"Request a region","pageTitle":"Region Availability","pageRoute":"hatchet://docs/v1/region-availability","content":"We are always open to rolling out new regions based on demand. If you need a specific region for latency or compliance, [contact us](https://hatchet.run/contact) and we can discuss availability."},"200":{"title":"Uptime","pageTitle":"Uptime","pageRoute":"hatchet://docs/v1/uptime","content":"# Uptime and status\n\nFor Hatchet Cloud availability and incident updates, use the status page. For self-hosted deployments, availability depends on your own infrastructure."},"201":{"title":"Hatchet Cloud status page","pageTitle":"Uptime","pageRoute":"hatchet://docs/v1/uptime","content":"Use **[status.hatchet.run](https://status.hatchet.run/)** for real-time status and incident history for Hatchet Cloud and related services:\n\n- **API**: Hatchet API availability\n- **Hatchet Cloud**: `cloud.onhatchet.run`\n- **Website**: `hatchet.run` and documentation sites\n\nYou can also subscribe to updates (email/SMS/etc.) directly from the status page."},"202":{"title":"Self-hosted deployments","pageTitle":"Uptime","pageRoute":"hatchet://docs/v1/uptime","content":"If you self-host Hatchet, you’re responsible for uptime, monitoring, backups, and upgrade procedures.\n\n- **Deployment guidance**: [Self Hosting](/self-hosting)\n- **Redundancy & failover**: [High Availability](/self-hosting/high-availability)"},"203":{"title":"Developer Experience","pageTitle":"Developer Experience","pageRoute":"hatchet://docs/v1/developer-experience","content":"# Developer experience\n\nHatchet is designed to be practical day-to-day: write workflows in code, run workers locally with a tight feedback loop, and debug production runs with good visibility."},"204":{"title":"Workflows as code","pageTitle":"Developer Experience","pageRoute":"hatchet://docs/v1/developer-experience","content":"You define tasks and workflows in your application code, then trigger them with input data. Hatchet handles the operational pieces you’d otherwise build yourself:\n\n- **Durability** (work isn’t lost on restarts)\n- **Retries/timeouts**\n- **Concurrency and rate limiting**\n- **Visibility into what ran, where, and why**"},"205":{"title":"Dashboard (UI)","pageTitle":"Developer Experience","pageRoute":"hatchet://docs/v1/developer-experience","content":"The dashboard is where you go to understand “what is happening right now?”:\n\n- **Runs**: status, inputs/outputs, and execution history\n- **Workers**: connected workers and health\n- **Workflows**: definitions and recent activity\n- **Settings**: tenants, API tokens, configuration\n\nIt’s useful for debugging, operational checks, and ad-hoc triggers."},"206":{"title":"CLI","pageTitle":"Developer Experience","pageRoute":"hatchet://docs/v1/developer-experience","content":"The [Hatchet CLI](/reference/cli) is the fastest way to develop and operate Hatchet from your terminal:\n\n- **`hatchet worker dev`**: run a local worker with hot reload\n- **`hatchet trigger`**: trigger a workflow from the command line (handy for smoke tests)\n- **`hatchet tui`**: terminal UI for runs/workers/workflows\n- **`hatchet profile`**: switch between tenants and environments\n\nSee the [CLI reference](/reference/cli) for installation and the full command set."},"207":{"title":"Coding agents (MCP)","pageTitle":"Developer Experience","pageRoute":"hatchet://docs/v1/developer-experience","content":"If you use AI coding tools in your editor, Hatchet’s docs can be used via an [MCP (Model Context Protocol) server](/v1/using-coding-agents). We also publish “agent skills” (short, step-by-step playbooks) so coding agents can run common Hatchet workflows—like starting a worker, triggering a workflow, and debugging a run—without guessing at CLI usage.\n\nSee [Using Coding Agents](/v1/using-coding-agents) for setup."},"208":{"title":"Frequently Asked Questions","pageTitle":"Frequently Asked Questions","pageRoute":"hatchet://docs/v1/faq","content":"# Frequently Asked Questions\n\nThis page provides answers to a number of the most common questions we're asked, to help you keep making great use of Hatchet!"},"209":{"title":"How do I choose how many slots to set on my worker?","pageTitle":"Frequently Asked Questions","pageRoute":"hatchet://docs/v1/faq","content":"The default slot count for workers in Hatchet is 100. In many cases, leaving the default as-is will be perfectly fine, especially when first getting set up with Hatchet.\n\nOver time, you'll likely run into one of two issues: Resource starvation (meaning the worker is using up too much memory, CPU, etc.), or wanting to squeeze more juice out of your workers.\n\nIf your workers are resource starved, there are basically two options:\n\n1. Reduce the slot count, so the worker runs less work concurrently. This is a blunt instrument, in the sense that it doesn't let you _tune_ resources to the needs of the workload running on the worker. For instance, if you're using 100% of your memory but only 10% of your CPU, reducing the slot count will likely help the worker stay online, but you'll be significantly under-utilizing CPU. In this case, you can:\n2. Reconfigure the specs of the machine the worker is running on. For instance, in the example above, you might be able to migrate from a CPU-optimized machine to a memory-optimized one, which will give you more efficient resource utilization across the board.\n\nOn the other hand, if your workers are underutilizing resources, your options are:\n\n1. Increase the number of slots on them so they can pick up more work. This is especially helpful for heavily I/O bound tasks, which generally are spending most of their time waiting.\n2. Similar to the opposite case of resource starvation, you can scale down the resource requirements of the machine the worker is running on.\n\n> **Info:** In general, we recommend not pushing the number of slots on a single worker\n>   much past 250-300. At this point, it likely makes sense to scale more\n>   horizontally."},"210":{"title":"Why am I seeing missed heartbeats and task reassignments?","pageTitle":"Frequently Asked Questions","pageRoute":"hatchet://docs/v1/faq","content":"Hatchet uses heartbeats to monitor worker health. Workers send a heartbeat every **4 seconds**. If the engine does not receive a heartbeat for **30 seconds**, the engine considers the worker to be inactive, and re-queues its in-flight tasks for other workers to pick up.\n\nThere are a number of common reasons a worker might miss heartbeats:\n\n- **Process crash** - the worker process exits unexpectedly (OOM kill, unhandled exception, SIGKILL).\n- **Network disruption** - the connection between the worker and the Hatchet engine is interrupted (DNS failure, firewall change, cloud network blip).\n- **Resource pressure** - High CPU or memory usage can starve the worker for resources"},"211":{"title":"Overview","pageTitle":"Overview","pageRoute":"hatchet://docs/cookbooks/index","content":"---\nasIndexPage: true\n---\n\nimport { Cards } from \"nextra/components\";\n\n# Cookbooks\n\nOur cookbooks are guides to help you solve common problems you'll find easy to tackle with Hatchet."},"212":{"title":"Webhooks","pageTitle":"Overview","pageRoute":"hatchet://docs/cookbooks/index","content":"Receive webhooks from external services and use them to trigger tasks in Hatchet. Each guide walks through setup end-to-end — creating the webhook in Hatchet, wiring it up to the source, and writing a task that handles the incoming event.\n\n\n  \n    Handle payment events, subscription changes, and other Stripe webhooks.\n  \n  \n    React to pushes, pull requests, issues, and other GitHub events.\n  \n  \n    Respond to Slack events like messages, reactions, and slash commands."},"213":{"title":"Stripe","pageTitle":"Stripe","pageRoute":"hatchet://docs/cookbooks/webhooks-stripe","content":"# Stripe Webhooks\n\nStripe sends webhooks for all sorts of events, such as payments succeeding, subscription cancellations, invoice creation, and so on. This guide walks through setting up webhooks from Stripe to trigger events directly in Hatchet."},"214":{"title":"Setup","pageTitle":"Stripe","pageRoute":"hatchet://docs/cookbooks/webhooks-stripe","content":"### Get your Stripe webhook signing secret\n\nIn the [Stripe Dashboard](https://dashboard.stripe.com/webhooks), you'll create a new webhook endpoint. Don't fill in the URL yet — you'll get that from Hatchet in the next step. See [Stripe's webhooks guide](https://docs.stripe.com/webhooks) for more details on setting this up.\n\nFor now, note the **signing secret** that Stripe generates for you (it starts with `whsec_`). You'll need this to tell Hatchet how to verify incoming requests.\n\n### Create the webhook in Hatchet\n\nIn the Hatchet dashboard, go to **Webhooks** and create a new webhook with the following settings:\n\nField, Value\n\n**Name**, `stripe` (or whatever you'd like)\n**Source**, Stripe\n**Event Key Expression**, `'stripe:' + input.type`\n**Secret**, Your `whsec_...` signing secret\n\nThe event key expression here takes the `type` field from Stripe's payload (something like `payment_intent.created`) and prefixes it with `stripe:` so your event keys are namespaced. When a webhook from Stripe is ingested with an `input.type` of `payment_intent.created`, there will be a corresponding Hatchet event with a key of `stripe:payment_intent.created` created.\n\nOnce you've created the webhook, copy the URL that Hatchet generates.\n\n### Add the URL to Stripe\n\nGo back to the Stripe Dashboard webhook you created in step 1 and paste in the Hatchet webhook URL. Select the events you want to listen for (or just select all of them — Hatchet will only trigger workflows that match the event key).\n\n### Write a task that listens for Stripe events\n\nNow you just need a task with a matching `on_events` trigger. For example, to handle successful payments:\n\n#### Python\n\n```python\nclass StripeObject(BaseModel):\n    customer: str\n    amount: int\n\n\nclass StripeData(BaseModel):\n    object: StripeObject\n\n\nclass StripePaymentInput(BaseModel):\n    type: str\n    data: StripeData\n\n\nclass StripePaymentOutput(BaseModel):\n    customer: str\n    amount: int\n\n\n@hatchet.task(\n    input_validator=StripePaymentInput,\n    on_events=[\"stripe:payment_intent.succeeded\"],\n)\ndef handle_stripe_payment(\n    input: StripePaymentInput, ctx: Context\n) -> StripePaymentOutput:\n    customer = input.data.object.customer\n    amount = input.data.object.amount\n    print(f\"Payment of {amount} from {customer}\")\n    return StripePaymentOutput(customer=customer, amount=amount)\n```\n\n#### Typescript\n\n```typescript\ntype StripePaymentInput = {\n  type: string;\n  data: {\n    object: {\n      customer: string;\n      amount: number;\n    };\n  };\n};\n\nexport const handleStripePayment = hatchet.task({\n  name: 'handle-stripe-payment',\n  on: {\n    event: 'stripe:payment_intent.succeeded',\n  },\n  fn: async (input: StripePaymentInput) => {\n    const { customer, amount } = input.data.object;\n    console.log(`Payment of ${amount} from ${customer}`);\n    return { customer, amount };\n  },\n});\n```\n\n#### Go\n\n```go\ntype StripePaymentInput struct {\n\tType string `json:\"type\"`\n\tData struct {\n\t\tObject struct {\n\t\t\tCustomer string `json:\"customer\"`\n\t\t\tAmount   int    `json:\"amount\"`\n\t\t} `json:\"object\"`\n\t} `json:\"data\"`\n}\n\nstripePayment := client.NewStandaloneTask(\n\t\"handle-stripe-payment\",\n\tfunc(ctx hatchet.Context, input StripePaymentInput) (*struct {\n\t\tCustomer string `json:\"customer\"`\n\t\tAmount   int    `json:\"amount\"`\n\t}, error) {\n\t\tfmt.Printf(\"Payment of %d from %s\\n\", input.Data.Object.Amount, input.Data.Object.Customer)\n\t\treturn &struct {\n\t\t\tCustomer string `json:\"customer\"`\n\t\t\tAmount   int    `json:\"amount\"`\n\t\t}{\n\t\t\tCustomer: input.Data.Object.Customer,\n\t\t\tAmount:   input.Data.Object.Amount,\n\t\t}, nil\n\t},\n\thatchet.WithWorkflowEvents(\"stripe:payment_intent.succeeded\"),\n)\n```\n\n#### Ruby\n\n```ruby\nHANDLE_STRIPE_PAYMENT = HATCHET.task(\n  name: \"handle-stripe-payment\",\n  on_events: [\"stripe:payment_intent.succeeded\"]\n) do |input, ctx|\n  customer = input[\"data\"][\"object\"][\"customer\"]\n  amount = input[\"data\"][\"object\"][\"amount\"]\n  puts \"Payment of #{amount} from #{customer}\"\n  { \"customer\" => customer, \"amount\" => amount }\nend\n```\n\n### Test it\n\nYou can use Stripe's \"Send test webhook\" feature in the dashboard, or trigger a real event in test mode. You should see the task run appear in the Hatchet dashboard."},"215":{"title":"GitHub","pageTitle":"GitHub","pageRoute":"hatchet://docs/cookbooks/webhooks-github","content":"# GitHub Webhooks\n\nGitHub can send webhooks for repository events — pushes, pull requests, issues, releases, and so on. This guide walks through connecting GitHub webhooks to Hatchet."},"216":{"title":"Setup","pageTitle":"GitHub","pageRoute":"hatchet://docs/cookbooks/webhooks-github","content":"### Create the webhook in Hatchet\n\nIn the Hatchet dashboard, go to **Webhooks** and create a new webhook with the following settings:\n\nField, Value\n\n**Name**, `github` (or whatever you'd like)\n**Source**, GitHub\n**Event Key Expression**, `'github:' + headers['x-github-event'] + ':' + input.action`\n**Secret**, A secret string of your choosing (you'll use the same one in GitHub)\n\nA quick note on the event key expression: GitHub sends the event type (like `pull_request` or `issues`) in the `x-github-event` header, and the specific action (like `opened` or `closed`) in the payload's `action` field. The expression above combines them to produce keys like `github:pull_request:opened`.\n\nNot all GitHub events have an `action` field, though. Push events, for instance, don't. If you want to handle events that might not have an `action`, you could use a simpler expression like `'github:' + headers['x-github-event']` and handle action-level routing in your task logic instead. Or you could create two separate webhooks — one for action-based events and one for action-less events.\n\nOnce you've created the webhook, copy the URL.\n\n### Configure the webhook in GitHub\n\nGo to your repository (or organization) settings, find **Webhooks**, and add a new webhook. See [GitHub's webhook docs](https://docs.github.com/en/webhooks/using-webhooks/creating-webhooks) for the full walkthrough.\n\n1. **Payload URL**: Paste the Hatchet webhook URL.\n2. **Content type**: Select `application/json`.\n3. **Secret**: Enter the same secret you used when creating the webhook in Hatchet.\n4. **Events**: Choose \"Let me select individual events\" and pick the ones you care about, or select \"Send me everything\" if you prefer.\n\n> **Warning:** Make sure you set the content type to `application/json`. GitHub defaults to\n>   `application/x-www-form-urlencoded`, which won't work with Hatchet's JSON\n>   payload parsing.\n\n### Write a task that listens for GitHub events\n\nHere's an example that triggers when a pull request is opened:\n\n#### Python\n\n```python\nclass GitHubPullRequest(BaseModel):\n    number: int\n    title: str\n\n\nclass GitHubRepository(BaseModel):\n    full_name: str\n\n\nclass GitHubPRInput(BaseModel):\n    action: str\n    pull_request: GitHubPullRequest\n    repository: GitHubRepository\n\n\nclass GitHubPROutput(BaseModel):\n    repo: str\n    pr: int\n\n\n@hatchet.task(\n    input_validator=GitHubPRInput,\n    on_events=[\"github:pull_request:opened\"],\n)\ndef handle_github_pr(input: GitHubPRInput, ctx: Context) -> GitHubPROutput:\n    repo = input.repository.full_name\n    pr_number = input.pull_request.number\n    title = input.pull_request.title\n    print(f\"PR #{pr_number} opened on {repo}: {title}\")\n    return GitHubPROutput(repo=repo, pr=pr_number)\n```\n\n#### Typescript\n\n```typescript\ntype GitHubPRInput = {\n  action: string;\n  pull_request: {\n    number: number;\n    title: string;\n  };\n  repository: {\n    full_name: string;\n  };\n};\n\nexport const handleGitHubPR = hatchet.task({\n  name: 'handle-github-pr',\n  on: {\n    event: 'github:pull_request:opened',\n  },\n  fn: async (input: GitHubPRInput) => {\n    const repo = input.repository.full_name;\n    const prNumber = input.pull_request.number;\n    const { title } = input.pull_request;\n    console.log(`PR #${prNumber} opened on ${repo}: ${title}`);\n    return { repo, pr: prNumber };\n  },\n});\n```\n\n#### Go\n\n```go\ntype GitHubPRInput struct {\n\tAction      string `json:\"action\"`\n\tPullRequest struct {\n\t\tNumber int    `json:\"number\"`\n\t\tTitle  string `json:\"title\"`\n\t} `json:\"pull_request\"`\n\tRepository struct {\n\t\tFullName string `json:\"full_name\"`\n\t} `json:\"repository\"`\n}\n\ngithubPR := client.NewStandaloneTask(\n\t\"handle-github-pr\",\n\tfunc(ctx hatchet.Context, input GitHubPRInput) (*struct {\n\t\tRepo string `json:\"repo\"`\n\t\tPR   int    `json:\"pr\"`\n\t}, error) {\n\t\tfmt.Printf(\"PR #%d opened on %s: %s\\n\", input.PullRequest.Number, input.Repository.FullName, input.PullRequest.Title)\n\t\treturn &struct {\n\t\t\tRepo string `json:\"repo\"`\n\t\t\tPR   int    `json:\"pr\"`\n\t\t}{\n\t\t\tRepo: input.Repository.FullName,\n\t\t\tPR:   input.PullRequest.Number,\n\t\t}, nil\n\t},\n\thatchet.WithWorkflowEvents(\"github:pull_request:opened\"),\n)\n```\n\n#### Ruby\n\n```ruby\nHANDLE_GITHUB_PR = HATCHET.task(\n  name: \"handle-github-pr\",\n  on_events: [\"github:pull_request:opened\"]\n) do |input, ctx|\n  repo = input[\"repository\"][\"full_name\"]\n  pr_number = input[\"pull_request\"][\"number\"]\n  title = input[\"pull_request\"][\"title\"]\n  puts \"PR ##{pr_number} opened on #{repo}: #{title}\"\n  { \"repo\" => repo, \"pr\" => pr_number }\nend\n```\n\n### Test it\n\nAfter saving the webhook in GitHub, GitHub will send a `ping` event to verify the connection. You can also use the \"Redeliver\" button in GitHub's webhook settings to replay past events, or just open a PR to trigger a real event."},"217":{"title":"Slack","pageTitle":"Slack","pageRoute":"hatchet://docs/cookbooks/webhooks-slack","content":"# Slack Webhooks\n\nSlack has several different ways to send data to your app — slash commands, interactive components (buttons, modals, etc.), and event subscriptions. They each have different payload formats and authentication mechanisms, which makes the setup a bit more involved than other webhook integrations. This guide walks through each one.\n\n> **Info:** Slack's different interaction modes use different content types. Event\n>   subscriptions send JSON, but slash commands and interactive components send\n>   form-encoded data. Hatchet handles both, but it's good to be aware of the\n>   difference when writing your CEL expressions and task logic."},"218":{"title":"Slack App Setup","pageTitle":"Slack","pageRoute":"hatchet://docs/cookbooks/webhooks-slack","content":"Before configuring anything in Hatchet, you'll need a Slack app. If you don't already have one:\n\n1. Go to [api.slack.com/apps](https://api.slack.com/apps) and click **Create New App**. See [Slack's getting started guide](https://api.slack.com/quickstart) if this is your first time.\n2. Choose **From scratch**, give it a name, and select your workspace.\n3. Once created, go to **Basic Information** and note the **Signing Secret** — you'll need this for Hatchet. See [Slack's signing secret docs](https://api.slack.com/authentication/verifying-requests-from-slack) for more on how request verification works."},"219":{"title":"Event Subscriptions","pageTitle":"Slack","pageRoute":"hatchet://docs/cookbooks/webhooks-slack","content":"Event subscriptions are what Slack uses to notify your app about things happening in the workspace — messages being posted, channels being created, users joining, and so on.\n\n\n### Create the webhook in Hatchet\n\nField, Value\n\n**Name**, `slack-events`\n**Source**, Slack\n**Event Key Expression**, `'slack:event:' + input.event.type`\n**Secret**, Your Slack app's signing secret\n\nCopy the generated URL.\n\n### Enable Event Subscriptions in Slack\n\nIn your Slack app settings, go to [**Event Subscriptions**](https://api.slack.com/events), toggle it on, and paste the Hatchet webhook URL into the **Request URL** field.\n\n> **Info:** Slack will send a challenge request to verify the URL. Hatchet handles this\n>   automatically — you should see a green checkmark confirming the URL is\n>   verified.\n\nThen, under **Subscribe to bot events**, add the events you want to listen for (e.g., `message.channels`, `app_mention`, `member_joined_channel`).\n\n### Write a task\n\n#### Python\n\n```python\nclass SlackEvent(BaseModel):\n    type: str\n    user: str\n    text: str\n    channel: str\n\n\nclass SlackEventInput(BaseModel):\n    event: SlackEvent\n\n\nclass SlackEventOutput(BaseModel):\n    handled: bool\n\n\n@hatchet.task(\n    input_validator=SlackEventInput,\n    on_events=[\"slack:event:app_mention\"],\n)\ndef handle_slack_mention(input: SlackEventInput, ctx: Context) -> SlackEventOutput:\n    print(\n        f\"Mentioned by {input.event.user} in {input.event.channel}: {input.event.text}\"\n    )\n    return SlackEventOutput(handled=True)\n```\n\n#### Typescript\n\n```typescript\ntype SlackEventInput = {\n  event: {\n    type: string;\n    user: string;\n    text: string;\n    channel: string;\n  };\n};\n\nexport const handleSlackMention = hatchet.task({\n  name: 'handle-slack-mention',\n  on: {\n    event: 'slack:event:app_mention',\n  },\n  fn: async (input: SlackEventInput) => {\n    const { user, text, channel } = input.event;\n    console.log(`Mentioned by ${user} in ${channel}: ${text}`);\n    return { handled: true };\n  },\n});\n```\n\n#### Go\n\n```go\ntype SlackEventInput struct {\n\tEvent struct {\n\t\tType    string `json:\"type\"`\n\t\tUser    string `json:\"user\"`\n\t\tText    string `json:\"text\"`\n\t\tChannel string `json:\"channel\"`\n\t} `json:\"event\"`\n}\n\nslackMention := client.NewStandaloneTask(\n\t\"handle-slack-mention\",\n\tfunc(ctx hatchet.Context, input SlackEventInput) (*struct {\n\t\tHandled bool `json:\"handled\"`\n\t}, error) {\n\t\tfmt.Printf(\"Mentioned by %s in %s: %s\\n\", input.Event.User, input.Event.Channel, input.Event.Text)\n\t\treturn &struct {\n\t\t\tHandled bool `json:\"handled\"`\n\t\t}{Handled: true}, nil\n\t},\n\thatchet.WithWorkflowEvents(\"slack:event:app_mention\"),\n)\n```\n\n#### Ruby\n\n```ruby\nHANDLE_SLACK_MENTION = HATCHET.task(\n  name: \"handle-slack-mention\",\n  on_events: [\"slack:event:app_mention\"]\n) do |input, ctx|\n  event = input[\"event\"]\n  puts \"Mentioned by #{event[\"user\"]} in #{event[\"channel\"]}: #{event[\"text\"]}\"\n  { \"handled\" => true }\nend\n```"},"220":{"title":"Slash Commands","pageTitle":"Slack","pageRoute":"hatchet://docs/cookbooks/webhooks-slack","content":"Slash commands work differently from event subscriptions. When a user types something like `/deploy production`, Slack sends a form-encoded POST to your configured URL. The payload includes the command, the text after it, the user, the channel, and a `response_url` you can use to send a response back.\n\n\n### Create the webhook in Hatchet\n\nField, Value\n\n**Name**, `slack-commands`\n**Source**, Slack\n**Event Key Expression**, `'slack:command:' + input.command`\n**Secret**, Your Slack app's signing secret\n\nCopy the generated URL.\n\n> **Info:** Even though slash commands send form-encoded payloads, Hatchet parses them\n>   into a JSON object so you can use the same `input.field` syntax in your CEL\n>   expressions.\n\n### Add the slash command in Slack\n\nIn your Slack app settings, go to [**Slash Commands**](https://api.slack.com/interactivity/slash-commands) and create a new command. Set the **Request URL** to the Hatchet webhook URL you just copied.\n\n### Write a task\n\nThe `input.command` field includes the leading slash (e.g., `/deploy`), so your event key will look like `slack:command:/deploy`.\n\n#### Python\n\n```python\nclass SlackCommandInput(BaseModel):\n    command: str\n    text: str\n    user_name: str\n    response_url: str\n\n\nclass SlackCommandOutput(BaseModel):\n    command: str\n    args: str\n\n\n@hatchet.task(\n    input_validator=SlackCommandInput,\n    on_events=[\"slack:command:/deploy\"],\n)\ndef handle_slack_command(input: SlackCommandInput, ctx: Context) -> SlackCommandOutput:\n    print(f\"{input.user_name} ran {input.command} {input.text}\")\n    return SlackCommandOutput(command=input.command, args=input.text)\n```\n\n#### Typescript\n\n```typescript\ntype SlackCommandInput = {\n  command: string;\n  text: string;\n  user_name: string;\n  response_url: string;\n};\n\nexport const handleSlackCommand = hatchet.task({\n  name: 'handle-slack-command',\n  on: {\n    event: 'slack:command:/deploy',\n  },\n  fn: async (input: SlackCommandInput) => {\n    console.log(`${input.user_name} ran ${input.command} ${input.text}`);\n    return { command: input.command, args: input.text };\n  },\n});\n```\n\n#### Go\n\n```go\ntype SlackCommandInput struct {\n\tCommand     string `json:\"command\"`\n\tText        string `json:\"text\"`\n\tUserName    string `json:\"user_name\"`\n\tResponseURL string `json:\"response_url\"`\n}\n\nslackCommand := client.NewStandaloneTask(\n\t\"handle-slack-command\",\n\tfunc(ctx hatchet.Context, input SlackCommandInput) (*struct {\n\t\tCommand string `json:\"command\"`\n\t\tArgs    string `json:\"args\"`\n\t}, error) {\n\t\tfmt.Printf(\"%s ran %s %s\\n\", input.UserName, input.Command, input.Text)\n\t\treturn &struct {\n\t\t\tCommand string `json:\"command\"`\n\t\t\tArgs    string `json:\"args\"`\n\t\t}{\n\t\t\tCommand: input.Command,\n\t\t\tArgs:    input.Text,\n\t\t}, nil\n\t},\n\thatchet.WithWorkflowEvents(\"slack:command:/deploy\"),\n)\n```\n\n#### Ruby\n\n```ruby\nHANDLE_SLACK_COMMAND = HATCHET.task(\n  name: \"handle-slack-command\",\n  on_events: [\"slack:command:/deploy\"]\n) do |input, ctx|\n  puts \"#{input[\"user_name\"]} ran #{input[\"command\"]} #{input[\"text\"]}\"\n  { \"command\" => input[\"command\"], \"args\" => input[\"text\"] }\nend\n```"},"221":{"title":"Interactive Components","pageTitle":"Slack","pageRoute":"hatchet://docs/cookbooks/webhooks-slack","content":"Interactive components — buttons, menus, modals — send payloads to an **Interactivity Request URL** when a user interacts with them. These are also form-encoded, with the actual payload nested inside a `payload` field as a JSON string.\n\n\n### Create the webhook in Hatchet\n\nField, Value\n\n**Name**, `slack-interactions`\n**Source**, Slack\n**Event Key Expression**, `'slack:interaction:' + input.type`\n**Secret**, Your Slack app's signing secret\n\n### Enable Interactivity in Slack\n\nIn your Slack app settings, go to [**Interactivity & Shortcuts**](https://api.slack.com/interactivity/handling), toggle it on, and paste the Hatchet webhook URL into the **Request URL** field.\n\n### Write a task\n\n#### Python\n\n```python\nclass SlackAction(BaseModel):\n    action_id: str\n\n\nclass SlackUser(BaseModel):\n    username: str\n\n\nclass SlackInteractionInput(BaseModel):\n    type: str\n    actions: list[SlackAction]\n    user: SlackUser\n\n\nclass SlackInteractionOutput(BaseModel):\n    action: str\n\n\n@hatchet.task(\n    input_validator=SlackInteractionInput,\n    on_events=[\"slack:interaction:block_actions\"],\n)\ndef handle_slack_interaction(\n    input: SlackInteractionInput, ctx: Context\n) -> SlackInteractionOutput:\n    action = input.actions[0]\n    print(f\"{input.user.username} clicked button: {action.action_id}\")\n    return SlackInteractionOutput(action=action.action_id)\n```\n\n#### Typescript\n\n```typescript\ntype SlackInteractionInput = {\n  type: string;\n  actions: Array<{ action_id: string }>;\n  user: { username: string };\n};\n\nexport const handleSlackInteraction = hatchet.task({\n  name: 'handle-slack-interaction',\n  on: {\n    event: 'slack:interaction:block_actions',\n  },\n  fn: async (input: SlackInteractionInput) => {\n    const [action] = input.actions;\n    console.log(`${input.user.username} clicked button: ${action.action_id}`);\n    return { action: action.action_id };\n  },\n});\n```\n\n#### Go\n\n```go\ntype SlackInteractionInput struct {\n\tType    string `json:\"type\"`\n\tActions []struct {\n\t\tActionID string `json:\"action_id\"`\n\t} `json:\"actions\"`\n\tUser struct {\n\t\tUsername string `json:\"username\"`\n\t} `json:\"user\"`\n}\n\nslackInteraction := client.NewStandaloneTask(\n\t\"handle-slack-interaction\",\n\tfunc(ctx hatchet.Context, input SlackInteractionInput) (*struct {\n\t\tAction string `json:\"action\"`\n\t}, error) {\n\t\taction := input.Actions[0]\n\t\tfmt.Printf(\"%s clicked button: %s\\n\", input.User.Username, action.ActionID)\n\t\treturn &struct {\n\t\t\tAction string `json:\"action\"`\n\t\t}{Action: action.ActionID}, nil\n\t},\n\thatchet.WithWorkflowEvents(\"slack:interaction:block_actions\"),\n)\n```\n\n#### Ruby\n\n```ruby\nHANDLE_SLACK_INTERACTION = HATCHET.task(\n  name: \"handle-slack-interaction\",\n  on_events: [\"slack:interaction:block_actions\"]\n) do |input, ctx|\n  action = input[\"actions\"][0]\n  puts \"#{input[\"user\"][\"username\"]} clicked button: #{action[\"action_id\"]}\"\n  { \"action\" => action[\"action_id\"] }\nend\n```"},"222":{"title":"Introduction","pageTitle":"Introduction","pageRoute":"hatchet://docs/self-hosting/index","content":"# Self-Hosting the Hatchet Control Plane\n\nSelf-hosting Hatchet means running your own instance of the **Hatchet Control Plane** - the central orchestration system that manages workflows, schedules tasks, and coordinates worker execution. This is different from running workers, which can connect to any Hatchet instance (self-hosted or Hatchet Cloud)."},"223":{"title":"What You're Self-Hosting","pageTitle":"Introduction","pageRoute":"hatchet://docs/self-hosting/index","content":"When you self-host Hatchet, you're deploying:\n\n- **API Server** - REST APIs for workflow management\n- **Engine** - gRPC API for core workflow orchestration and task scheduling\n- **Database** - PostgreSQL for storing workflow state and metadata\n- **Message Queue (optional)** - RabbitMQ for inter-service communication and high-throughput real-time updates\n- **Dashboard** - Web UI for monitoring workflows and debugging\n\nYour **workers** (the processes that execute your workflow steps) will connect to your self-hosted control plane and execute tasks."},"224":{"title":"Deployment Options","pageTitle":"Introduction","pageRoute":"hatchet://docs/self-hosting/index","content":"The fastest way to get a Hatchet instance running locally is with the [Hatchet CLI](/cli) (which wraps Hatchet Lite):\n\n```sh\nhatchet server start\n```\n\nThere are currently three supported ways to self-host the Hatchet Control Plane:\n\nDocker:\n\n1. [Hatchet Lite](./self-hosting/hatchet-lite.mdx) - Single docker image with bundled engine and API (development, testing, or low-throughput production)\n2. [Docker Compose](./self-hosting/docker-compose.mdx) - Multi-container setup with PostgreSQL and RabbitMQ (production)\n\nKubernetes:\n\n1. [Quickstart with Helm](./self-hosting/kubernetes-quickstart.mdx) - Production-ready Helm charts (production)"},"225":{"title":"Hatchet Lite","pageTitle":"Hatchet Lite","pageRoute":"hatchet://docs/self-hosting/hatchet-lite","content":"# Hatchet Lite Deployment\n\nTo get up and running quickly, you can deploy via the `hatchet-lite` image. This image is designed for development and low-volume use-cases.\n\n\n### Prerequisites\n\nThis deployment requires [Docker](https://docs.docker.com/engine/install/) installed locally to work.\n\n### Getting Hatchet Lite Running\n\n#### Hatchet CLI\n\nThe easiest way to get Hatchet Lite running is via the Hatchet CLI. Simply run the following command:\n\n```sh\nhatchet server start\n```\n\n#### With Postgres (Default)\n\nTo use Postgres as both your DB and message queue, copy the following `docker-compose.hatchet.yml` file to the root of your repository:\n\n> **Info:** If you have an existing Postgres instance already running, you can simply\n>   point `DATABASE_URL` to that instance and ignore the `postgres` service\n>   deployment in the following docker-compose file.\n\n```yaml filename=\"docker-compose.hatchet.yml\" copy\nversion: \"3.8\"\nname: hatchet-lite\nservices:\n  postgres:\n    image: postgres:15.6\n    command: postgres -c 'max_connections=200'\n    restart: always\n    environment:\n      - POSTGRES_USER=hatchet\n      - POSTGRES_PASSWORD=hatchet\n      - POSTGRES_DB=hatchet\n    volumes:\n      - hatchet_lite_postgres_data:/var/lib/postgresql/data\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -d hatchet -U hatchet\"]\n      interval: 10s\n      timeout: 10s\n      retries: 5\n      start_period: 10s\n  hatchet-lite:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest\n    ports:\n      - \"8888:8888\"\n      - \"7077:7077\"\n    depends_on:\n      postgres:\n        condition: service_healthy\n    environment:\n      # Refer to https://docs.hatchet.run/self-hosting/configuration-options\n      # for a list of all supported environment variables\n      DATABASE_URL: \"postgresql://hatchet:hatchet@postgres:5432/hatchet?sslmode=disable\"\n      SERVER_AUTH_COOKIE_DOMAIN: localhost\n      SERVER_AUTH_COOKIE_INSECURE: \"t\"\n      SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n      SERVER_GRPC_INSECURE: \"t\"\n      SERVER_GRPC_BROADCAST_ADDRESS: localhost:7077\n      SERVER_GRPC_PORT: \"7077\"\n      SERVER_URL: http://localhost:8888\n      SERVER_AUTH_SET_EMAIL_VERIFIED: \"t\"\n      SERVER_DEFAULT_ENGINE_VERSION: \"V1\"\n      SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: localhost:7077\n    volumes:\n      - \"hatchet_lite_config:/config\"\n\nvolumes:\n  hatchet_lite_postgres_data:\n  hatchet_lite_config:\n```\n\nThen run `docker-compose -f docker-compose.hatchet.yml up` to get the Hatchet Lite instance running.\n\n#### With Postgres + RabbitMQ\n\nTo use Postgres as your DB and RabbitMQ as the message queue, copy the following `docker-compose.hatchet.yml` file to the root of your repository:\n\n> **Info:** If you have an existing Postgres instance already running, you can simply\n>   point `DATABASE_URL` to that instance and ignore the `postgres` service\n>   deployment in the following docker-compose file.\n\n```yaml filename=\"docker-compose.hatchet.yml\" copy\nversion: \"3.8\"\nname: hatchet-lite\nservices:\n  postgres:\n    image: postgres:15.6\n    command: postgres -c 'max_connections=200'\n    restart: always\n    environment:\n      - POSTGRES_USER=hatchet\n      - POSTGRES_PASSWORD=hatchet\n      - POSTGRES_DB=hatchet\n    volumes:\n      - hatchet_lite_postgres_data:/var/lib/postgresql/data\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -d hatchet -U hatchet\"]\n      interval: 10s\n      timeout: 10s\n      retries: 5\n      start_period: 10s\n  rabbitmq:\n    image: \"rabbitmq:3-management\"\n    hostname: \"rabbitmq\"\n    ports:\n      - \"5672:5672\"\n      - \"15672:15672\"\n    environment:\n      RABBITMQ_DEFAULT_USER: \"user\"\n      RABBITMQ_DEFAULT_PASS: \"password\"\n    volumes:\n      - \"hatchet_rabbitmq_data:/var/lib/rabbitmq\"\n      - \"hatchet_rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf\"\n    healthcheck:\n      test: [\"CMD\", \"rabbitmqctl\", \"status\"]\n      interval: 30s\n      timeout: 10s\n      retries: 5\n  hatchet-lite:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest\n    ports:\n      - \"8888:8888\"\n      - \"7077:7077\"\n    depends_on:\n      postgres:\n        condition: service_healthy\n    environment:\n      SERVER_MSGQUEUE_KIND: rabbitmq\n      SERVER_MSGQUEUE_RABBITMQ_URL: \"amqp://user:password@rabbitmq:5672/\"\n      # Refer to https://docs.hatchet.run/self-hosting/configuration-options\n      # for a list of all supported environment variables\n      DATABASE_URL: \"postgresql://hatchet:hatchet@postgres:5432/hatchet?sslmode=disable\"\n      SERVER_AUTH_COOKIE_DOMAIN: localhost\n      SERVER_AUTH_COOKIE_INSECURE: \"t\"\n      SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n      SERVER_GRPC_INSECURE: \"t\"\n      SERVER_GRPC_BROADCAST_ADDRESS: localhost:7077\n      SERVER_GRPC_PORT: \"7077\"\n      SERVER_URL: http://localhost:8888\n      SERVER_AUTH_SET_EMAIL_VERIFIED: \"t\"\n      SERVER_DEFAULT_ENGINE_VERSION: \"V1\"\n    volumes:\n      - \"hatchet_lite_config:/config\"\n\nvolumes:\n  hatchet_lite_postgres_data:\n  hatchet_lite_config:\n  hatchet_rabbitmq_data:\n  hatchet_rabbitmq.conf:\n```\n\nThen run `docker-compose -f docker-compose.hatchet.yml up` to get the Hatchet Lite instance running.\n\n### Accessing Hatchet Lite\n\nOnce the Hatchet Lite instance is running, you can access the Hatchet Lite UI at [http://localhost:8888](http://localhost:8888).\n\nBy default, a user is created with the following credentials:\n\n```\nEmail: admin@example.com\nPassword: Admin123!!\n```\n\nAfter logging in, follow the steps in the UI to create your first tenant and run your first workflow!"},"226":{"title":"Docker Compose","pageTitle":"Docker Compose","pageRoute":"hatchet://docs/self-hosting/docker-compose","content":"# Docker Compose Deployment\n\nThis guide shows how to deploy Hatchet using Docker Compose for a production-ready deployment. If you'd like to get up and running quickly, you can also deploy Hatchet using the `hatchet-lite` image following the tutorial here: [Hatchet Lite Deployment](/self-hosting/hatchet-lite).\n\nThis guide uses RabbitMQ as a message broker for Hatchet. This is optional: if you'd like to use Postgres as a message broker, modify the `setup-config` service in the `docker-compose.yml` file with the following env var, and delete all RabbitMQ references:\n\n```sh\nSERVER_MSGQUEUE_KIND=postgres\n```"},"227":{"title":"Quickstart","pageTitle":"Docker Compose","pageRoute":"hatchet://docs/self-hosting/docker-compose","content":"### Prerequisites\n\nThis deployment requires [Docker](https://docs.docker.com/engine/install/) installed locally to work.\n\n### Create files\n\nWe will be creating a `docker-compose.yml` file in the root of your repository:\n\n```\nroot/\n  docker-compose.yml\ndocker-compose.yml\n```\n\n```yaml filename=\"docker-compose.yml\" copy\nversion: \"3.8\"\nservices:\n  postgres:\n    image: postgres:15.6\n    command: postgres -c 'max_connections=1000'\n    restart: always\n    hostname: \"postgres\"\n    environment:\n      - POSTGRES_USER=hatchet\n      - POSTGRES_PASSWORD=hatchet\n      - POSTGRES_DB=hatchet\n    ports:\n      - \"5435:5432\"\n    volumes:\n      - hatchet_postgres_data:/var/lib/postgresql/data\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -d hatchet -U hatchet\"]\n      interval: 10s\n      timeout: 10s\n      retries: 5\n      start_period: 10s\n  rabbitmq:\n    image: \"rabbitmq:3-management\"\n    hostname: \"rabbitmq\"\n    ports:\n      - \"5673:5672\" # RabbitMQ\n      - \"15673:15672\" # Management UI\n    environment:\n      RABBITMQ_DEFAULT_USER: \"user\"\n      RABBITMQ_DEFAULT_PASS: \"password\"\n    volumes:\n      - \"hatchet_rabbitmq_data:/var/lib/rabbitmq\"\n      - \"hatchet_rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf\" # Configuration file mount\n    healthcheck:\n      test: [\"CMD\", \"rabbitmqctl\", \"status\"]\n      interval: 10s\n      timeout: 10s\n      retries: 5\n  migration:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-migrate:latest\n    command: /hatchet/hatchet-migrate\n    environment:\n      DATABASE_URL: \"postgres://hatchet:hatchet@postgres:5432/hatchet\"\n    depends_on:\n      postgres:\n        condition: service_healthy\n  setup-config:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-admin:latest\n    command: /hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/config --overwrite=false\n    environment:\n      DATABASE_URL: \"postgres://hatchet:hatchet@postgres:5432/hatchet\"\n      SERVER_MSGQUEUE_RABBITMQ_URL: amqp://user:password@rabbitmq:5672/\n      SERVER_AUTH_COOKIE_DOMAIN: localhost:8080\n      SERVER_AUTH_COOKIE_INSECURE: \"t\"\n      SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n      SERVER_GRPC_INSECURE: \"t\"\n      SERVER_GRPC_BROADCAST_ADDRESS: localhost:7077\n      SERVER_DEFAULT_ENGINE_VERSION: \"V1\"\n      SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: hatchet-engine:7070\n    volumes:\n      - hatchet_certs:/hatchet/certs\n      - hatchet_config:/hatchet/config\n    depends_on:\n      migration:\n        condition: service_completed_successfully\n      rabbitmq:\n        condition: service_healthy\n      postgres:\n        condition: service_healthy\n  hatchet-engine:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-engine:latest\n    command: /hatchet/hatchet-engine --config /hatchet/config\n    restart: on-failure\n    depends_on:\n      setup-config:\n        condition: service_completed_successfully\n      migration:\n        condition: service_completed_successfully\n    ports:\n      - \"7077:7070\"\n    environment:\n      DATABASE_URL: \"postgres://hatchet:hatchet@postgres:5432/hatchet\"\n      SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n      SERVER_GRPC_INSECURE: \"t\"\n    volumes:\n      - hatchet_certs:/hatchet/certs\n      - hatchet_config:/hatchet/config\n  hatchet-dashboard:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-dashboard:latest\n    command: sh ./entrypoint.sh --config /hatchet/config\n    ports:\n      - 8080:80\n    restart: on-failure\n    depends_on:\n      setup-config:\n        condition: service_completed_successfully\n      migration:\n        condition: service_completed_successfully\n    environment:\n      DATABASE_URL: \"postgres://hatchet:hatchet@postgres:5432/hatchet\"\n    volumes:\n      - hatchet_certs:/hatchet/certs\n      - hatchet_config:/hatchet/config\n\nvolumes:\n  hatchet_postgres_data:\n  hatchet_rabbitmq_data:\n  hatchet_rabbitmq.conf:\n  hatchet_config:\n  hatchet_certs:\n```\n\n### Get Hatchet up and running\n\nTo start the services, run the following command in the root of your repository:\n\n```bash\ndocker compose up\n```\n\nWait for the `hatchet-engine` and `hatchet-dashboard` services to start.\n\n### Accessing Hatchet\n\nOnce the Hatchet instance is running, you can access the Hatchet UI at [http://localhost:8080](http://localhost:8080).\n\nBy default, a user is created with the following credentials:\n\n```\nEmail: admin@example.com\nPassword: Admin123!!\n```"},"228":{"title":"Run tasks against the Hatchet instance","pageTitle":"Docker Compose","pageRoute":"hatchet://docs/self-hosting/docker-compose","content":"To run tasks against this instance, you will first need to create an API token for your worker. There are two ways to do this:\n\n1. **Using a CLI command**:\n\n   You can run the following command to create a token:\n\n   ```sh\n   docker compose run --no-deps setup-config /hatchet/hatchet-admin token create --config /hatchet/config --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52\n   ```\n\n2. **Using the Hatchet dashboard**:\n   - Log in to the Hatchet dashboard.\n   - Navigate to the \"Settings\" page.\n   - Click on the \"API Tokens\" tab.\n   - Click on \"Create API Token\".\n\nNow that you have an API token, see the guide [here](https://docs.hatchet.run/home/setup) for how to run your first task."},"229":{"title":"Repulling images","pageTitle":"Docker Compose","pageRoute":"hatchet://docs/self-hosting/docker-compose","content":"The docker compose file above uses the `latest` tag for all images. This means that if you want to pull the latest version of the images, you can run the following command:\n\n```bash\ndocker compose pull\n```"},"230":{"title":"Connecting to the engine from within Docker","pageTitle":"Docker Compose","pageRoute":"hatchet://docs/self-hosting/docker-compose","content":"If you're also running your worker application inside of `docker-compose`, you should modify the `SERVER_GRPC_BROADCAST_ADDRESS` environment variable in the `setup-config` service to use `hatchet-engine` as the hostname. For example:\n\n```yaml\nSERVER_GRPC_BROADCAST_ADDRESS: \"hatchet-engine:7077\"\n```\n\nMake sure your worker depends on hatchet-engine:\n\n```yaml\nworker:\n  depends_on:\n    hatchet-engine:\n      condition: service_started\n```\n\n> **Info:** **Note:** modifying the GRPC broadcast address or server URL will require\n>   re-issuing an API token."},"231":{"title":"Additional Docker configuration","pageTitle":"Docker Compose","pageRoute":"hatchet://docs/self-hosting/docker-compose","content":"### Increase Postgres shared memory\n\nBy default, containers have a 64 MB shared memory segment (`/dev/shm`). For larger Hatchet deployments this can be too small and may lead to slow queries or an unresponsive dashboard. Increase the shared memory size for the `postgres` service:\n\n```yaml filename=\"docker-compose.yml\" copy\n# ...\nservices:\n  postgres:\n    image: postgres:15.6\n    shm_size: 1g # Increase shared memory (adjust as needed)\n    command: postgres -c 'max_connections=1000'\n    restart: always\n    hostname: \"postgres\"\n    environment:\n      - POSTGRES_USER=hatchet\n      - POSTGRES_PASSWORD=hatchet\n      - POSTGRES_DB=hatchet\n    ports:\n      - \"5435:5432\"\n    volumes:\n      - hatchet_postgres_data:/var/lib/postgresql/data\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -d hatchet -U hatchet\"]\n      interval: 10s\n      timeout: 10s\n      retries: 5\n      start_period: 10s\n# ...\n```"},"232":{"title":"Quickstart","pageTitle":"Quickstart","pageRoute":"hatchet://docs/self-hosting/kubernetes-quickstart","content":"# Kubernetes Quickstart"},"233":{"title":"Prerequisites","pageTitle":"Quickstart","pageRoute":"hatchet://docs/self-hosting/kubernetes-quickstart","content":"- A Kubernetes cluster currently set as the current context in `kubectl`\n- `kubectl` and `helm` installed"},"234":{"title":"Quickstart","pageTitle":"Quickstart","pageRoute":"hatchet://docs/self-hosting/kubernetes-quickstart","content":"### Get Hatchet Running\n\nTo deploy `hatchet-stack`, run the following commands:\n\n```sh\nhelm repo add hatchet https://hatchet-dev.github.io/hatchet-charts\nhelm install hatchet-stack hatchet/hatchet-stack --set caddy.enabled=true\n```\n\nThis default installation will run the Hatchet server as an internal service in the cluster and spins up a reverse proxy via `Caddy` to get local access. To view the Hatchet server, run the following command:\n\n```sh\nexport NAMESPACE=default # TODO: replace with your namespace\nexport POD_NAME=$(kubectl get pods --namespace $NAMESPACE -l \"app=caddy\" -o jsonpath=\"{.items[0].metadata.name}\")\nexport CONTAINER_PORT=$(kubectl get pod --namespace $NAMESPACE $POD_NAME -o jsonpath=\"{.spec.containers[0].ports[0].containerPort}\")\nkubectl --namespace $NAMESPACE port-forward $POD_NAME 8080:$CONTAINER_PORT\n```\n\nAnd then navigate to `http://localhost:8080` to see the Hatchet frontend running. You can log into Hatchet with the following credentials:\n\n```\nEmail: admin@example.com\nPassword: Admin123!!\n```\n\n### Port forward to the Hatchet engine\n\n```sh\nexport NAMESPACE=default # TODO: replace with your namespace\nexport POD_NAME=$(kubectl get pods --namespace $NAMESPACE -l \"app.kubernetes.io/name=engine\" -o jsonpath=\"{.items[0].metadata.name}\")\nexport CONTAINER_PORT=$(kubectl get pod --namespace $NAMESPACE $POD_NAME -o jsonpath=\"{.spec.containers[0].ports[0].containerPort}\")\nkubectl --namespace $NAMESPACE port-forward $POD_NAME 7070:$CONTAINER_PORT\n```\n\nThis will spin up the Hatchet engine service on `localhost:7070` which you can then connect to from the examples.\n\n### Generate an API token\n\nTo generate an API token, navigate to the `Settings` tab in the Hatchet frontend and click on the `API Tokens` tab. Click the `Generate API Token` button to create a new token. Store this token somewhere safe.\n\n### Run your first worker\n\nNow that you have an API token, see the guide [here](https://docs.hatchet.run/home/setup) for how to run your first task."},"235":{"title":"Installing with Glasskube","pageTitle":"Installing with Glasskube","pageRoute":"hatchet://docs/self-hosting/kubernetes-glasskube","content":"# Kubernetes Deployment via Glasskube"},"236":{"title":"Prerequisites","pageTitle":"Installing with Glasskube","pageRoute":"hatchet://docs/self-hosting/kubernetes-glasskube","content":"- A Kubernetes cluster currently set as the current context in `kubectl`\n- `docker`, `openssl`, `kubectl` and [`glasskube`](https://glasskube.dev) installed"},"237":{"title":"What is Glasskube?","pageTitle":"Installing with Glasskube","pageRoute":"hatchet://docs/self-hosting/kubernetes-glasskube","content":"[Glasskube](https://glasskube.dev) is an alternative package manager for Kubernetes and part of the CNCF landscape. Glasskube is designed as a Cloud Native application and every installed package is represented by a Custom Resource.\n\n\n[`glasskube/glasskube`](https://github.com/glasskube/glasskube/) is in active development, with _good first issues_\navailable for new contributors."},"238":{"title":"Quickstart","pageTitle":"Installing with Glasskube","pageRoute":"hatchet://docs/self-hosting/kubernetes-glasskube","content":"### Generate encryption keys\n\nThere are 4 encryption secrets required for Hatchet to run which can be generated via the following bash script (requires `docker` and `openssl`):\n\n```sh filename=generate.sh copy\n#!/bin/bash\n\n# Define an alias for generating random strings. This needs to be a function in a script.\nrandstring() {\n    openssl rand -base64 69 | tr -d \"\\n=+/\" | cut -c1-$1\n}\n\n# Create keys directory\nmkdir -p ./keys\n\n# Function to clean up the keys directory\ncleanup() {\n    rm -rf ./keys\n}\n\n# Register the cleanup function to be called on the EXIT signal\ntrap cleanup EXIT\n\n# Check if Docker is installed\nif ! command -v docker &> /dev/null\nthen\n    echo \"Docker could not be found. Please install Docker.\"\n    exit 1\nfi\n\n# Generate keysets using Docker\ndocker run --user $(id -u):$(id -g) -v $(pwd)/keys:/hatchet/keys ghcr.io/hatchet-dev/hatchet/hatchet-admin:latest /hatchet/hatchet-admin keyset create-local-keys --key-dir /hatchet/keys\n\n# Read keysets from files\nSERVER_ENCRYPTION_MASTER_KEYSET=$(<./keys/master.key)\nSERVER_ENCRYPTION_JWT_PRIVATE_KEYSET=$(<./keys/private_ec256.key)\nSERVER_ENCRYPTION_JWT_PUBLIC_KEYSET=$(<./keys/public_ec256.key)\n\n# Generate the random strings for SERVER_AUTH_COOKIE_SECRETS\nSERVER_AUTH_COOKIE_SECRET1=$(randstring 16)\nSERVER_AUTH_COOKIE_SECRET2=$(randstring 16)\n\n# Create the YAML file\ncat > hatchet-secret.yaml <\"\nHATCHET_CLIENT_TLS_STRATEGY=none\n```\n\nYou will need this in the following example.\n\n### Port forward to the Hatchet engine\n\n```sh\nexport NAMESPACE=hatchet # TODO: change if you modified the namespace\nexport POD_NAME=$(kubectl get pods --namespace $NAMESPACE -l \"app.kubernetes.io/name=hatchet-engine,app.kubernetes.io/instance=hatchet\" -o jsonpath=\"{.items[0].metadata.name}\")\nexport CONTAINER_PORT=$(kubectl get pod --namespace $NAMESPACE $POD_NAME -o jsonpath=\"{.spec.containers[0].ports[0].containerPort}\")\nkubectl --namespace $NAMESPACE port-forward $POD_NAME 7070:$CONTAINER_PORT\n```\n\nThis will spin up the Hatchet engine service on `localhost:7070` which you can then connect to from the examples.\n\n### Generate an API token\n\nTo generate an API token, navigate to the `Settings` tab in the Hatchet frontend and click on the `API Tokens` tab. Click the `Generate API Token` button to create a new token. Store this token somewhere safe.\n\n### Run your first worker\n\nNow that you have an API token, see the guide [here](https://docs.hatchet.run/home/setup) for how to run your first task."},"239":{"title":"Networking","pageTitle":"Networking","pageRoute":"hatchet://docs/self-hosting/networking","content":"# Kubernetes Networking"},"240":{"title":"Overview","pageTitle":"Networking","pageRoute":"hatchet://docs/self-hosting/networking","content":"By default, the Kubernetes Helm chart does not expose any of the Hatchet services over an ingress. There are three services which can possibly be exposed:\n\n1. `hatchet-engine`\n2. `hatchet-stack-api`\n3. `hatchet-stack-frontend`\n\nTo expose these services, you will need to do the following:\n\n1. Configure ingresses for `frontend` and `engine` services (and optionally the `api` service). We recommend configuring the ingress to reverse proxy `/api` endpoints to the `hatchet-stack-api` service, and configuring a separate ingress to proxy to `hatchet-engine`.\n\n2. Update the following configuration variables:\n\n```yaml\napi:\n  env:\n    SERVER_AUTH_COOKIE_DOMAIN: \"hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_URL: \"https://hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n    SERVER_GRPC_INSECURE: \"false\"\n    SERVER_GRPC_BROADCAST_ADDRESS: \"hatchet-engine.example.com:443\" # example.com should be replaced with your domain\n\nengine:\n  env:\n    SERVER_AUTH_COOKIE_DOMAIN: \"hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_URL: \"https://hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n    SERVER_GRPC_INSECURE: \"false\"\n    SERVER_GRPC_BROADCAST_ADDRESS: \"engine.hatchet.example.com:443\" # example.com should be replaced with your domain\n```"},"241":{"title":"Example: `nginx-ingress`","pageTitle":"Networking","pageRoute":"hatchet://docs/self-hosting/networking","content":"Let's walk through an example of exposing Hatchet over `hatchet.example.com` (for the API and frontend) and `engine.hatchet.example.com` (for the engine).\n\nWe'll be deploying this with SSL enabled, which requires a valid certificate. We recommend using [cert-manager](https://cert-manager.io/docs/) to manage your certificates. This guide assumes that you have a cert-manager `ClusterIssuer` called `letsencrypt-prod` configured.\n\nHere's an example `values.yaml` file for this setup:\n\n```yaml\napi:\n  env:\n    # TODO: insert these values from the output of the keyset generation command\n    SERVER_AUTH_COOKIE_SECRETS: \"$SERVER_AUTH_COOKIE_SECRET1 $SERVER_AUTH_COOKIE_SECRET2\"\n    SERVER_ENCRYPTION_MASTER_KEYSET: \"$SERVER_ENCRYPTION_MASTER_KEYSET\"\n    SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET: \"$SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET\"\n    SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET: \"$SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET\"\n    SERVER_AUTH_COOKIE_DOMAIN: \"hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_URL: \"https://hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n    SERVER_GRPC_INSECURE: \"false\"\n    SERVER_GRPC_BROADCAST_ADDRESS: \"engine.hatchet.example.com:443\" # example.com should be replaced with your domain\n\nengine:\n  env:\n    # TODO: insert these values from the output of the keyset generation command\n    SERVER_AUTH_COOKIE_SECRETS: \"$SERVER_AUTH_COOKIE_SECRET1 $SERVER_AUTH_COOKIE_SECRET2\"\n    SERVER_ENCRYPTION_MASTER_KEYSET: \"$SERVER_ENCRYPTION_MASTER_KEYSET\"\n    SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET: \"$SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET\"\n    SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET: \"$SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET\"\n    SERVER_AUTH_COOKIE_DOMAIN: \"hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_URL: \"https://hatchet.example.com\" # example.com should be replaced with your domain\n    SERVER_GRPC_BIND_ADDRESS: \"0.0.0.0\"\n    SERVER_GRPC_INSECURE: \"false\"\n    SERVER_GRPC_BROADCAST_ADDRESS: \"engine.hatchet.example.com:443\" # example.com should be replaced with your domain\n  ingress:\n    enabled: true\n    ingressClassName: nginx\n    labels: {}\n    annotations:\n      cert-manager.io/cluster-issuer: letsencrypt-prod\n      nginx.ingress.kubernetes.io/auth-tls-verify-client: \"optional\"\n      nginx.ingress.kubernetes.io/auth-tls-secret: \"${kubernetes_namespace.cloud.metadata[0].name}/engine-cert\"\n      nginx.ingress.kubernetes.io/auth-tls-verify-depth: \"1\"\n      nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: \"true\"\n      nginx.ingress.kubernetes.io/backend-protocol: \"GRPC\"\n      nginx.ingress.kubernetes.io/ssl-redirect: \"true\"\n      nginx.ingress.kubernetes.io/grpc-backend: \"true\"\n      nginx.ingress.kubernetes.io/server-snippet: |\n        grpc_read_timeout 1d;\n        grpc_send_timeout 1h;\n        client_header_timeout 1h;\n        client_body_timeout 1h;\n    hosts:\n      - host: engine.hatchet.example.com\n        paths:\n          - path: /\n        backend:\n          serviceName: hatchet-engine\n          servicePort: 7070\n    tls:\n      - hosts:\n          - engine.hatchet.example.com\n        secretName: engine-cert\n        servicePort: 7070\n\nfrontend:\n  ingress:\n    enabled: true\n    ingressClassName: nginx\n    labels: {}\n    annotations:\n      nginx.ingress.kubernetes.io/proxy-body-size: 50m\n      nginx.ingress.kubernetes.io/proxy-send-timeout: \"60\"\n      nginx.ingress.kubernetes.io/proxy-read-timeout: \"60\"\n      nginx.ingress.kubernetes.io/proxy-connect-timeout: \"60\"\n      cert-manager.io/cluster-issuer: letsencrypt-prod\n    hosts:\n      - host: hatchet.example.com\n        paths:\n          - path: /api\n            backend:\n              serviceName: hatchet-api\n              servicePort: 8080\n          - path: /\n            backend:\n              serviceName: hatchet-frontend\n              servicePort: 8080\n    tls:\n      - secretName: hatchet-api\n        hosts:\n          - hatchet.example.com\n```"},"242":{"title":"Configuring the Helm Chart","pageTitle":"Configuring the Helm Chart","pageRoute":"hatchet://docs/self-hosting/kubernetes-helm-configuration","content":"# Configuring the Helm Chart"},"243":{"title":"Shared Config","pageTitle":"Configuring the Helm Chart","pageRoute":"hatchet://docs/self-hosting/kubernetes-helm-configuration","content":"For the `hatchet-stack` and `hatchet-ha` Helm charts, the `sharedConfig` object in the `values.yaml` file allows you to configure shared settings for all backend services. The default values are:\n\n```yaml\nsharedConfig:\n  # you can disable shared config by setting this to false\n  enabled: true\n\n  # these are the most commonly configured values\n  serverUrl: \"http://localhost:8080\"\n  serverAuthCookieDomain: \"localhost:8080\" # the domain for the auth cookie\n  serverAuthCookieInsecure: \"t\" # allows cookies to be set over http\n  serverAuthSetEmailVerified: \"t\" # automatically sets email_verified to true for all users\n  serverAuthBasicAuthEnabled: \"t\" # allows login via basic auth (email/password)\n  grpcBroadcastAddress: \"localhost:7070\" # the endpoint for the gRPC server, exposed via the `grpc` service\n  grpcInsecure: \"true\" # allows gRPC to be served over http\n  defaultAdminEmail: \"admin@example.com\" # in exposed/production environments, change this to a valid email\n  defaultAdminPassword: \"Admin123!!\" # in exposed/production environments, change this to a secure password\n\n  # you can set additional environment variables here, which will override any defaults\n  env: {}\n```\n\n### Networking\n\n- **`sharedConfig.serverUrl`** (default: `\"http://localhost:8080\"`): specifies the base URL for the server. This URL should be the public-facing URL of the Hatchet API server (which is typically bundled behind a reverse proxy with the Hatchet frontend).\n\n- **`sharedConfig.grpcBroadcastAddress`** (default: `\"localhost:7070\"`): defines the address for the gRPC server endpoint, which is exposed via the `grpc` service.\n\n- **`sharedConfig.grpcInsecure`** (default: `\"true\"`): when set to `true`, allows the gRPC server to be served over HTTP instead of HTTPS. Use this in non-production environments only.\n\n### Authentication\n\n- **`sharedConfig.serverAuthCookieDomain`** (default: `\"localhost:8080\"`): specifies the domain for the authentication cookie. Should be set to the appropriate domain when deploying to production.\n\n- **`sharedConfig.serverAuthCookieInsecure`** (default: `\"t\"`): if set to `\"t\"`, allows authentication cookies to be set over HTTP, useful for local development. In production, use a secure setting.\n\n- **`sharedConfig.serverAuthSetEmailVerified`** (default: `\"t\"`): automatically sets `email_verified` to `true` for all users. This is useful for testing environments where email verification is not necessary.\n\n- **`sharedConfig.serverAuthBasicAuthEnabled`** (default: `\"t\"`): enables basic authentication (using email and password) for users. Should be enabled if the system needs to support user logins via email/password.\n\n- **`sharedConfig.defaultAdminEmail`** (default: `\"admin@example.com\"`): specifies the email for the default administrator account. Change this to a valid email when deploying to production environments.\n\n- **`sharedConfig.defaultAdminPassword`** (default: `\"Admin123!!\"`): defines the password for the default administrator account. This should be changed to a strong password for production deployments.\n\n### Additional Env Variables\n\nYou can set additional environment variables for the backend services using the `env` object. For example:\n\n```yaml\nsharedConfig:\n  env:\n    MY_ENV_VAR: \"my-value\"\n```\n\nThis will set the environment variable `MY_ENV_VAR` to `\"my-value\"` for all backend services. These values will override any default environment settings for the services.\n\n### Seeding Data\n\nThe `sharedConfig` object also allows you to seed the database with a default tenant and user. The following environment variables are used for seeding:\n\n````yaml\nThe following environment variables are used to seed the database:\n\n```yaml\nseed:\n  defaultAdminEmail: \"admin@example.com\" # in exposed/production environments, change this to a valid email\n  defaultAdminPassword: \"Admin123!!\" # in exposed/production environments, change this to a secure password\n  env:\n    ADMIN_NAME: \"Admin User\"\n    DEFAULT_TENANT_NAME: \"Default\"\n    DEFAULT_TENANT_SLUG: \"default\"\n    DEFAULT_TENANT_ID: \"707d0855-80ab-4e1f-a156-f1c4546cbf52\"\n````"},"244":{"title":"Setting up an External Database","pageTitle":"Setting up an External Database","pageRoute":"hatchet://docs/self-hosting/kubernetes-external-database","content":"# Setting up Hatchet with an external database"},"245":{"title":"Connecting to Postgres","pageTitle":"Setting up an External Database","pageRoute":"hatchet://docs/self-hosting/kubernetes-external-database","content":"To connect to an external Postgres instance, set `postgres.enabled` to `false` in the `values.yaml` file. This will disable the internal Postgres instance and allow you to connect to an external database. You should then add the following configuration for the `hatchet-stack` or `hatchet-ha` charts:\n\n> Note: Either `DATABASE_URL` or `DATABASE_POSTGRES_*` are required\n\n```yaml\nsharedConfig:\n  env:\n    DATABASE_URL: \"postgres://<user>:<password>@<host>:5432/<db-name>?sslmode=disable\"\n    DATABASE_POSTGRES_HOST: \"<host>\"\n    DATABASE_POSTGRES_PORT: \"5432\"\n    DATABASE_POSTGRES_USERNAME: \"<user>\"\n    DATABASE_POSTGRES_PASSWORD: \"<password>\"\n    DATABASE_POSTGRES_DB_NAME: \"<db-name>\"\n    DATABASE_POSTGRES_SSL_MODE: \"disable\"\n```"},"246":{"title":"Mounting environment variables","pageTitle":"Setting up an External Database","pageRoute":"hatchet://docs/self-hosting/kubernetes-external-database","content":"Environment variables can also be mounted from secrets or configmaps via the `deploymentEnvFrom` field, which corresponds to the `envFrom` field in a Kubernetes deployment. For example, to mount the `DATABASE_URL` environment variable from a secret, you can use the following configuration:\n\n```yaml\nhatchet-api:\n  deploymentEnvFrom:\n    - secretRef:\n        name: hatchet-api-secrets\n        key: DATABASE_URL\n\nhatchet-engine:\n  deploymentEnvFrom:\n    - secretRef:\n        name: hatchet-api-secrets\n        key: DATABASE_URL\n```\n\nFor more information on mounting environment variables from secrets, refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables)."},"247":{"title":"Migrations","pageTitle":"Setting up an External Database","pageRoute":"hatchet://docs/self-hosting/kubernetes-external-database","content":"In order for migrations to run, the database user requires permissions to write and modify schemas **on a clean database**. It is therefore recommended to create a separate database instance where Hatchet can run and grant permissions on this database to the Hatchet user. For example, to create a new database and user `hatchet` in Postgres, run the following commands (**warning:** change the username/password for production usage):\n\n```sql\ncreate database hatchet;\n\ncreate role hatchet\nwith\n    login password 'hatchet';\n\ngrant hatchet to postgres;\n\nalter database hatchet owner to hatchet;\n```"},"248":{"title":"High Availability","pageTitle":"High Availability","pageRoute":"hatchet://docs/self-hosting/high-availability","content":"# High Availability\n\nIf you are running Hatchet in a high-throughput production environment, you may want to set up an HA (High Availability) configuration to ensure that your system remains available in the event of infrastructure failures or other issues.\n\nThere are multiple levels that you can configure Hatchet to be high availability:\n\n- At the **database level** by using a managed Postgres provider like AWS RDS or Google Cloud SQL which supports HA options.\n- At the **RabbitMQ level** by configuring the RabbitMQ cluster to have at least 3 replicas across multiple zones within a region.\n- At the **Hatchet Engine/API level** by running multiple instances of the Hatchet engine behind a load balancer and splitting the different Hatchet services into separate deployments.\n\nThis guide will focus on the last level of high availability.\n\n\nTo view an end-to-end example of configuring Hatchet for high availability on GCP using Terraform, check out the GCP deployment guide [here](https://github.com/hatchet-dev/hatchet-infra-examples/blob/main/self-hosting/gcp)"},"249":{"title":"HA Helm Chart","pageTitle":"High Availability","pageRoute":"hatchet://docs/self-hosting/high-availability","content":"Hatchet offers an HA Helm chart that can be used to deploy Hatchet in a high availability configuration. To use this Helm chart:\n\n```sh\nhelm repo add hatchet https://hatchet-dev.github.io/hatchet-charts\nhelm install hatchet-ha hatchet/hatchet-ha\n```\n\nThis chart accepts the same parameters as `hatchet-stack` for the top-level `api`, `frontend`, `postgres` and `rabbitmq` objects, but you can additionally configure the following services:\n\n```yaml\ngrpc:\n  replicaCount: 4\ncontrollers:\n  replicaCount: 2\nscheduler:\n  replicaCount: 2\n```\n\nSee the [Helm configuration](./kubernetes-helm-configuration) guide for more information on configuring the Hatchet Helm charts."},"250":{"title":"Engine Configuration Options","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"# Configuration Options\n\nThe Hatchet server and engine can be configured via environment variables using several prefixes. This document contains a comprehensive list of all 197+ available options organized by component."},"251":{"title":"Environment Variable Prefixes","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Hatchet uses the following environment variable prefixes:\n\n- **`SERVER_`** (172 variables) - Main server configuration including runtime, authentication, encryption, monitoring, and integrations\n- **`DATABASE_`** (19 variables) - PostgreSQL database connection and configuration\n- **`READ_REPLICA_`** (4 variables) - Read replica database configuration\n- **`ADMIN_`** (3 variables) - Administrator user setup for initial seeding\n- **`DEFAULT_`** (3 variables) - Default tenant configuration\n- **`SCHEDULER_`** (1 variable) - Scheduler-specific rate limiting\n- **`SEED_`** (1 variable) - Development environment seeding\n- **`CACHE_`** (1 variable) - Cache duration settings\n\n_Note: This documentation excludes `HATCHET*CLIENT*_` variables which are specific to Go SDK client configuration.\\*"},"252":{"title":"Required Environment Variables","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"The following variables are **absolutely required** for Hatchet to start successfully:\n\n### Encryption Keys (Required - Choose One Strategy)\n\n**Option A: Local Encryption Keys**\n\n```bash\nSERVER_ENCRYPTION_MASTER_KEYSET=\"<base64-encoded-keyset>\"\nSERVER_ENCRYPTION_JWT_PUBLIC_KEYSET=\"<base64-encoded-jwt-public>\"\nSERVER_ENCRYPTION_JWT_PRIVATE_KEYSET=\"<base64-encoded-jwt-private>\"\n```\n\n**Option B: File-based Keys**\n\n```bash\nSERVER_ENCRYPTION_MASTER_KEYSET_FILE=\"/path/to/master.keyset\"\nSERVER_ENCRYPTION_JWT_PUBLIC_KEYSET_FILE=\"/path/to/jwt-public.keyset\"\nSERVER_ENCRYPTION_JWT_PRIVATE_KEYSET_FILE=\"/path/to/jwt-private.keyset\"\n```\n\n**Option C: Google Cloud KMS**\n\n```bash\nSERVER_ENCRYPTION_CLOUDKMS_ENABLED=true\nSERVER_ENCRYPTION_CLOUDKMS_KEY_URI=\"gcp-kms://your-key-uri\"\nSERVER_ENCRYPTION_CLOUDKMS_CREDENTIALS_JSON=\"<credentials-json>\"\n```\n\n### Authentication Secrets (Required)\n\n```bash\nSERVER_AUTH_COOKIE_SECRETS=\"<secret1> <secret2>\"\n```\n\n### Database Connection (Required)\n\n**Option A: Connection String**\n\n```bash\nDATABASE_URL=\"postgresql://user:password@host:port/dbname\"\n```\n\n**Option B: Individual Parameters** (uses defaults if not specified)\n\n```bash\nDATABASE_POSTGRES_HOST=your-postgres-host\nDATABASE_POSTGRES_PASSWORD=your-secure-password\n```"},"253":{"title":"Minimal Configuration Example","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"> **Warning:** This example is for local development when Hatchet connects to PostgreSQL\n>   running on the same host. For Docker Compose deployments, use your database\n>   service name, such as `postgres`, instead of `127.0.0.1`. See the [Docker\n>   Compose deployment guide](/self-hosting/docker-compose).\n\n```bash\n# Database\nDATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet'\n\n# Encryption (using key files - recommended for development)\nSERVER_ENCRYPTION_MASTER_KEYSET_FILE=./keys/master.key\nSERVER_ENCRYPTION_JWT_PRIVATE_KEYSET_FILE=./keys/private_ec256.key\nSERVER_ENCRYPTION_JWT_PUBLIC_KEYSET_FILE=./keys/public_ec256.key\n\n# Authentication\nSERVER_AUTH_COOKIE_SECRETS=\"your-secret-key-1 your-secret-key-2\"\nSERVER_AUTH_SET_EMAIL_VERIFIED=true\n\n# Basic server config\nSERVER_PORT=8080\nSERVER_URL=http://localhost:8080\n\n# Development settings (optional but recommended)\nSERVER_GRPC_INSECURE=true\nSERVER_INTERNAL_CLIENT_BASE_STRATEGY=none\nSERVER_LOGGER_LEVEL=error\nSERVER_LOGGER_FORMAT=console\nDATABASE_LOGGER_LEVEL=error\nDATABASE_LOGGER_FORMAT=console\n```\n\nGenerate encryption keys with:\n\n```bash\ngo run ./cmd/hatchet-admin keyset create-local-keys --key-dir ./keys\n```"},"254":{"title":"Runtime Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variables marked with ⚠️ are conditionally required when specific features are enabled.\n\nVariable, Description, Default Value\n\n`SERVER_PORT`, Port for the core server, `8080`\n`SERVER_URL`, Full server URL, including protocol, `http://localhost:8080`\n`SERVER_GRPC_PORT`, Port for the GRPC service, `7070`\n`SERVER_GRPC_BIND_ADDRESS`, GRPC server bind address, `127.0.0.1`\n`SERVER_GRPC_BROADCAST_ADDRESS`, GRPC server broadcast address, `127.0.0.1:7070`\n`SERVER_GRPC_INSECURE`, Controls if the GRPC server is insecure, `false`\n`SERVER_ENFORCE_LIMITS`, Enforce tenant limits, `false`\n`SERVER_ALLOW_SIGNUP`, Allow new tenant signups, `true`\n`SERVER_ALLOW_INVITES`, Allow new invites, `true`\n`SERVER_ALLOW_CREATE_TENANT`, Allow tenant creation, `true`\n`SERVER_ALLOW_CHANGE_PASSWORD`, Allow password changes, `true`\n`SERVER_HEALTHCHECK`, Enable healthcheck endpoint, `true`\n`SERVER_HEALTHCHECK_PORT`, Healthcheck port, `8733`\n`SERVER_GRPC_MAX_MSG_SIZE`, gRPC max message size, `4194304`\n`SERVER_GRPC_RATE_LIMIT`, gRPC rate limit, `1000`\n`SCHEDULER_CONCURRENCY_RATE_LIMIT`, Scheduler concurrency rate limit, `20`\n`SCHEDULER_CONCURRENCY_POLLING_MIN_INTERVAL`, Minimum concurrency polling interval, `500ms`\n`SCHEDULER_CONCURRENCY_POLLING_MAX_INTERVAL`, Maximum concurrency polling interval, `5s`\n`SCHEDULER_ADVISORY_LOCK_TIMEOUT`, Timeout for in-memory advisory lock, `5s`\n`SERVER_SERVICES`, Services to run, `[\"all\"]`\n`SERVER_PAUSED_CONTROLLERS`, Paused controllers\n`SERVER_ENABLE_DATA_RETENTION`, Enable data retention, `true`\n`SERVER_ENABLE_WORKER_RETENTION`, Enable worker retention, `false`\n`SERVER_MAX_PENDING_INVITES`, Max pending invites, `100`\n`SERVER_DISABLE_TENANT_PUBS`, Disable tenant pubsub\n`SERVER_MAX_INTERNAL_RETRY_COUNT`, Max internal retry count, `10`\n`SERVER_PREVENT_TENANT_VERSION_UPGRADE`, Prevent tenant version upgrades, `false`\n`SERVER_DEFAULT_ENGINE_VERSION`, Default engine version, `V1`\n`SERVER_REPLAY_ENABLED`, Enable task replay, `true`"},"255":{"title":"Database Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"> **Info:** In Docker Compose deployments, use the database service name in `DATABASE_URL`\n>   rather than `127.0.0.1`. Inside a container, `127.0.0.1` refers to the\n>   container itself. The localhost defaults shown in this section are intended\n>   for local development on the same host.\n\nVariable, Description, Default Value\n\n`DATABASE_URL`, PostgreSQL connection string constructed from database settings if unset\n`DATABASE_POSTGRES_HOST`, PostgreSQL host, `127.0.0.1`\n`DATABASE_POSTGRES_PORT`, PostgreSQL port, `5431`\n`DATABASE_POSTGRES_USERNAME`, PostgreSQL username, `hatchet`\n`DATABASE_POSTGRES_PASSWORD`, PostgreSQL password, `hatchet`\n`DATABASE_POSTGRES_DB_NAME`, PostgreSQL database name, `hatchet`\n`DATABASE_POSTGRES_SSL_MODE`, PostgreSQL SSL mode, `disable`\n`DATABASE_MAX_CONNS`, Max database connections, `50`\n`DATABASE_MIN_CONNS`, Min database connections, `10`\n`DATABASE_MAX_QUEUE_CONNS`, Max queue connections, `50`\n`DATABASE_MIN_QUEUE_CONNS`, Min queue connections, `10`\n`DATABASE_MAX_CONN_LIFETIME`, Max lifetime of a connection, `15m`\n`DATABASE_MAX_CONN_IDLE_TIME`, Max time a connection can be idle before being closed, `1m`\n`DATABASE_LOG_QUERIES`, Log database queries, `false`\n`DATABASE_PGBOUNCER_ENABLED`, Enable pgbouncer support; requires `DATABASE_DIRECT_URL` to be set, `false`\n`DATABASE_DIRECT_URL`, Direct PostgreSQL connection string bypassing pgbouncer for DDL operations\n`DATABASE_DIRECT_MAX_CONNS`, Max connections for the direct (non-pgbouncer) pool, `2`\n`DATABASE_DIRECT_MIN_CONNS`, Min connections for the direct (non-pgbouncer) pool, `1`\n`CACHE_DURATION`, Cache duration, `5s`\n`ADMIN_EMAIL`, Admin email for seeding, `admin@example.com`\n`ADMIN_PASSWORD`, Admin password for seeding, `Admin123!!`\n`ADMIN_NAME`, Admin name for seeding, `Admin`\n`DEFAULT_TENANT_NAME`, Default tenant name, `Default`\n`DEFAULT_TENANT_SLUG`, Default tenant slug, `default`\n`DEFAULT_TENANT_ID`, Default tenant ID\n`SEED_DEVELOPMENT`, Development seeding flag\n`READ_REPLICA_ENABLED`, Enable read replica, `false`\n`READ_REPLICA_DATABASE_URL`, Read replica database URL\n`READ_REPLICA_MAX_CONNS`, Read replica max connections, `50`\n`READ_REPLICA_MIN_CONNS`, Read replica min connections, `10`\n`DATABASE_LOGGER_LEVEL`, Database logger level\n`DATABASE_LOGGER_FORMAT`, Database logger format"},"256":{"title":"Security Check Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_SECURITY_CHECK_ENABLED`, Enable security check, `true`\n`SERVER_SECURITY_CHECK_ENDPOINT`, Security check endpoint, `https://security.hatchet.run`"},"257":{"title":"Limit Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_LIMITS_DEFAULT_TENANT_RETENTION_PERIOD`, Default tenant retention period, `720h`\n`SERVER_LIMITS_DEFAULT_WORKER_LIMIT`, Default worker limit, `4`\n`SERVER_LIMITS_DEFAULT_WORKER_ALARM_LIMIT`, Default worker alarm limit, `2`\n`SERVER_LIMITS_DEFAULT_EVENT_LIMIT`, Default event limit, `1000`\n`SERVER_LIMITS_DEFAULT_EVENT_ALARM_LIMIT`, Default event alarm limit, `750`\n`SERVER_LIMITS_DEFAULT_EVENT_WINDOW`, Default event window, `24h`\n`SERVER_LIMITS_DEFAULT_CRON_LIMIT`, Default cron limit, `5`\n`SERVER_LIMITS_DEFAULT_CRON_ALARM_LIMIT`, Default cron alarm limit, `2`\n`SERVER_LIMITS_DEFAULT_SCHEDULE_LIMIT`, Default schedule limit, `1000`\n`SERVER_LIMITS_DEFAULT_SCHEDULE_ALARM_LIMIT`, Default schedule alarm limit, `750`\n`SERVER_LIMITS_DEFAULT_TASK_RUN_LIMIT`, Default task run limit, `2000`\n`SERVER_LIMITS_DEFAULT_TASK_RUN_ALARM_LIMIT`, Default task run alarm limit, `1500`\n`SERVER_LIMITS_DEFAULT_TASK_RUN_WINDOW`, Default task run window, `24h`\n`SERVER_LIMITS_DEFAULT_WORKER_SLOT_LIMIT`, Default worker slot limit, `4000`\n`SERVER_LIMITS_DEFAULT_WORKER_SLOT_ALARM_LIMIT`, Default worker slot alarm limit, `3000`"},"258":{"title":"Alerting Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_ALERTING_SENTRY_ENABLED`, Enable Sentry for alerting\n`SERVER_ALERTING_SENTRY_DSN`, Sentry DSN\n`SERVER_ALERTING_SENTRY_ENVIRONMENT`, Sentry environment, `development`\n`SERVER_ALERTING_SENTRY_SAMPLE_RATE`, Sentry sample rate, `1.0`\n`SERVER_ANALYTICS_POSTHOG_ENABLED`, Enable PostHog analytics\n`SERVER_ANALYTICS_POSTHOG_API_KEY`, PostHog API key\n`SERVER_ANALYTICS_POSTHOG_ENDPOINT`, PostHog endpoint\n`SERVER_ANALYTICS_POSTHOG_FE_API_HOST`, PostHog frontend API host\n`SERVER_ANALYTICS_POSTHOG_FE_API_KEY`, PostHog frontend API key\n`SERVER_PYLON_ENABLED`, Enable Pylon\n`SERVER_PYLON_APP_ID` ⚠️, Pylon app ID (required if Pylon enabled)\n`SERVER_PYLON_SECRET`, Pylon secret"},"259":{"title":"Encryption Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_ENCRYPTION_MASTER_KEYSET`, Raw master keyset, base64-encoded JSON string\n`SERVER_ENCRYPTION_MASTER_KEYSET_FILE`, Path to the master keyset file\n`SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET`, Public JWT keyset, base64-encoded JSON string\n`SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET_FILE`, Path to the public JWT keyset file\n`SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET`, Private JWT keyset, base64-encoded JSON string\n`SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET_FILE`, Path to the private JWT keyset file\n`SERVER_ENCRYPTION_CLOUDKMS_ENABLED`, Whether Google Cloud KMS is enabled, `false`\n`SERVER_ENCRYPTION_CLOUDKMS_KEY_URI`, URI of the key in Google Cloud KMS\n`SERVER_ENCRYPTION_CLOUDKMS_CREDENTIALS_JSON`, JSON credentials for Google Cloud KMS"},"260":{"title":"Authentication Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_AUTH_RESTRICTED_EMAIL_DOMAINS`, Restricted email domains\n`SERVER_AUTH_BASIC_AUTH_ENABLED`, Whether basic auth is enabled, `true`\n`SERVER_AUTH_SET_EMAIL_VERIFIED`, Whether the user's email is set to verified automatically, `false`\n`SERVER_AUTH_COOKIE_NAME`, Name of the cookie, `hatchet`\n`SERVER_AUTH_COOKIE_DOMAIN`, Domain for the cookie\n`SERVER_AUTH_COOKIE_SECRETS`, Cookie secrets\n`SERVER_AUTH_COOKIE_INSECURE`, Whether the cookie is insecure, `false`\n`SERVER_AUTH_GOOGLE_ENABLED`, Whether Google auth is enabled, `false`\n`SERVER_AUTH_GOOGLE_CLIENT_ID` ⚠️, Google auth client ID (required if Google auth enabled)\n`SERVER_AUTH_GOOGLE_CLIENT_SECRET` ⚠️, Google auth client secret (required if Google auth enabled)\n`SERVER_AUTH_GOOGLE_SCOPES`, Google auth scopes, `[\"openid\", \"profile\", \"email\"]`\n`SERVER_AUTH_GITHUB_ENABLED`, Whether GitHub auth is enabled, `false`\n`SERVER_AUTH_GITHUB_CLIENT_ID` ⚠️, GitHub auth client ID (required if GitHub auth enabled)\n`SERVER_AUTH_GITHUB_CLIENT_SECRET` ⚠️, GitHub auth client secret (required if GitHub auth enabled)\n`SERVER_AUTH_GITHUB_SCOPES`, GitHub auth scopes, `[\"read:user\", \"user:email\"]`"},"261":{"title":"Task Queue Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_MSGQUEUE_KIND`, Message queue kind, `rabbitmq`\n`SERVER_MSGQUEUE_RABBITMQ_URL`, RabbitMQ URL\n`SERVER_MSGQUEUE_RABBITMQ_QOS`, RabbitMQ QoS, `100`\n`SERVER_REQUEUE_LIMIT`, Requeue limit, `100`\n`SERVER_SINGLE_QUEUE_LIMIT`, Single queue limit, `100`\n`SERVER_UPDATE_HASH_FACTOR`, Update hash factor, `100`\n`SERVER_UPDATE_CONCURRENT_FACTOR`, Update concurrent factor, `10`"},"262":{"title":"TLS Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_TLS_STRATEGY`, TLS strategy\n`SERVER_TLS_CERT`, TLS certificate\n`SERVER_TLS_CERT_FILE`, Path to the TLS certificate file\n`SERVER_TLS_KEY`, TLS key\n`SERVER_TLS_KEY_FILE`, Path to the TLS key file\n`SERVER_TLS_ROOT_CA`, TLS root CA\n`SERVER_TLS_ROOT_CA_FILE`, Path to the TLS root CA file\n`SERVER_TLS_SERVER_NAME`, TLS server name\n`SERVER_INTERNAL_CLIENT_BASE_STRATEGY`, Internal client TLS strategy\n`SERVER_INTERNAL_CLIENT_BASE_INHERIT_BASE`, Inherit base TLS config, `true`\n`SERVER_INTERNAL_CLIENT_TLS_BASE_CERT`, Internal client TLS cert\n`SERVER_INTERNAL_CLIENT_TLS_BASE_CERT_FILE`, Internal client TLS cert file\n`SERVER_INTERNAL_CLIENT_TLS_BASE_KEY`, Internal client TLS key\n`SERVER_INTERNAL_CLIENT_TLS_BASE_KEY_FILE`, Internal client TLS key file\n`SERVER_INTERNAL_CLIENT_TLS_BASE_ROOT_CA`, Internal client TLS root CA\n`SERVER_INTERNAL_CLIENT_TLS_BASE_ROOT_CA_FILE`, Internal client TLS root CA file\n`SERVER_INTERNAL_CLIENT_TLS_SERVER_NAME`, Internal client TLS server name\n`SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS`, Internal gRPC broadcast address"},"263":{"title":"Logging Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_LOGGER_LEVEL`, Logger level\n`SERVER_LOGGER_FORMAT`, Logger format\n`SERVER_LOG_INGESTION_ENABLED`, Enable log ingestion, `true`\n`SERVER_ADDITIONAL_LOGGERS_QUEUE_LEVEL`, Queue logger level\n`SERVER_ADDITIONAL_LOGGERS_QUEUE_FORMAT`, Queue logger format\n`SERVER_ADDITIONAL_LOGGERS_PGXSTATS_LEVEL`, PGX stats logger level\n`SERVER_ADDITIONAL_LOGGERS_PGXSTATS_FORMAT`, PGX stats logger format"},"264":{"title":"OpenTelemetry Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_OTEL_SERVICE_NAME`, Service name for OpenTelemetry\n`SERVER_OTEL_COLLECTOR_URL`, Collector URL for OpenTelemetry\n`SERVER_OTEL_INSECURE`, Whether to use an insecure connection to the collector URL\n`SERVER_OTEL_TRACE_ID_RATIO`, OpenTelemetry trace ID ratio\n`SERVER_OTEL_COLLECTOR_AUTH`, OpenTelemetry Collector Authorization header value\n`SERVER_OTEL_METRICS_ENABLED`, Enable OpenTelemetry metrics collection, `false`\n`SERVER_PROMETHEUS_ENABLED`, Enable Prometheus, `false`\n`SERVER_PROMETHEUS_ADDRESS`, Prometheus address, `:9090`\n`SERVER_PROMETHEUS_PATH`, Prometheus metrics path, `/metrics`\n`SERVER_PROMETHEUS_SERVER_URL`, Prometheus server URL\n`SERVER_PROMETHEUS_SERVER_USERNAME`, Prometheus server username\n`SERVER_PROMETHEUS_SERVER_PASSWORD`, Prometheus server password"},"265":{"title":"Tenant Alerting Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_TENANT_ALERTING_SLACK_ENABLED`, Enable Slack for tenant alerting\n`SERVER_TENANT_ALERTING_SLACK_CLIENT_ID`, Slack client ID\n`SERVER_TENANT_ALERTING_SLACK_CLIENT_SECRET`, Slack client secret\n`SERVER_TENANT_ALERTING_SLACK_SCOPES`, Slack scopes, `[\"incoming-webhook\"]`\n`SERVER_EMAIL_KIND`, Email integration kind, `postmark`\n`SERVER_EMAIL_POSTMARK_ENABLED`, Enable Postmark\n`SERVER_EMAIL_POSTMARK_SERVER_KEY`, Postmark server key\n`SERVER_EMAIL_POSTMARK_FROM_EMAIL`, Postmark from email\n`SERVER_EMAIL_POSTMARK_FROM_NAME`, Postmark from name, `Hatchet Support`\n`SERVER_EMAIL_POSTMARK_SUPPORT_EMAIL`, Postmark support email\n`SERVER_EMAIL_SMTP_ENABLED`, Enable SMTP\n`SERVER_EMAIL_SMTP_SERVER_ADDR`, SMTP server address\n`SERVER_EMAIL_SMTP_FROM_EMAIL`, SMTP from email\n`SERVER_EMAIL_SMTP_FROM_NAME`, SMTP from name, `Hatchet Support`\n`SERVER_EMAIL_SMTP_SUPPORT_EMAIL`, SMTP support email\n`SERVER_EMAIL_SMTP_AUTH_USERNAME`, SMTP authentication username\n`SERVER_EMAIL_SMTP_AUTH_PASSWORD`, SMTP authentication password\n`SERVER_MONITORING_ENABLED`, Enable monitoring, `true`\n`SERVER_MONITORING_PERMITTED_TENANTS`, Permitted tenants for monitoring\n`SERVER_MONITORING_PROBE_TIMEOUT`, Monitoring probe timeout, `30s`\n`SERVER_MONITORING_TLS_ROOT_CA_FILE`, Monitoring TLS root CA file\n`SERVER_SAMPLING_ENABLED`, Enable sampling, `false`\n`SERVER_SAMPLING_RATE`, Sampling rate, `1.0`\n`SERVER_OPERATIONS_JITTER`, Operations jitter in milliseconds, `0`\n`SERVER_OPERATIONS_POLL_INTERVAL`, Operations poll interval in seconds, `2`"},"266":{"title":"Cron Operations Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_CRON_OPERATIONS_TASK_ANALYZE_CRON_INTERVAL`, Interval for running ANALYZE on task-related tables, `3h`\n`SERVER_CRON_OPERATIONS_OLAP_ANALYZE_CRON_INTERVAL`, Interval for running ANALYZE on OLAP/analytics tables, `3h`\n`SERVER_CRON_OPERATIONS_DB_HEALTH_METRICS_INTERVAL`, Interval for collecting database health metrics (OTel), `60s`\n`SERVER_CRON_OPERATIONS_OLAP_METRICS_INTERVAL`, Interval for collecting OLAP metrics (OTel), `5m`\n`SERVER_CRON_OPERATIONS_WORKER_METRICS_INTERVAL`, Interval for collecting worker metrics (OTel), `60s`\n`SERVER_CRON_OPERATIONS_YESTERDAY_RUN_COUNT_HOUR`, Hour (0-23) at which to collect yesterday's workflow run count (OTel), `0`\n`SERVER_CRON_OPERATIONS_YESTERDAY_RUN_COUNT_MINUTE`, Minute (0-59) at which to collect yesterday's workflow run count, `5`\n`SERVER_WAIT_FOR_FLUSH`, Default wait for flush, `1ms`\n`SERVER_MAX_CONCURRENT`, Default max concurrent, `50`\n`SERVER_FLUSH_PERIOD_MILLISECONDS`, Default flush period, `10ms`\n`SERVER_FLUSH_ITEMS_THRESHOLD`, Default flush threshold, `100`\n`SERVER_FLUSH_STRATEGY`, Default flush strategy, `DYNAMIC`"},"267":{"title":"OLAP Database Configuration","pageTitle":"Engine Configuration Options","pageRoute":"hatchet://docs/self-hosting/configuration-options","content":"Variable, Description, Default Value\n\n`SERVER_OLAP_STATUS_UPDATE_DAG_BATCH_SIZE_LIMIT`, Batch size limit for running DAG status updates, `1000`\n`SERVER_OLAP_STATUS_UPDATE_TASK_BATCH_SIZE_LIMIT`, Batch size limit for running task status updates, `1000`"},"268":{"title":"Prometheus Metrics for Hatchet","pageTitle":"Prometheus Metrics","pageRoute":"hatchet://docs/self-hosting/prometheus-metrics","content":"> **Warning:** Only works with v1 tenants\n\nThis document provides an overview of the Prometheus metrics exposed by Hatchet, setup instructions for the metrics endpoint, and example PromQL queries to analyze them.\n\n### Setup\n\nTo enable Prometheus metrics for your Hatchet instance, you can set the following environment variables. The corresponding configuration YAML values are mentioned in parantheses. If you are deploying [Hatchet in HA mode](/self-hosting/high-availability), these should be set on both the `controllers` as well as `scheduler` deployments.\n\n- Required\n  - **`SERVER_PROMETHEUS_ENABLED`** (`prometheus.enabled`)\n    - Default: `false`\n    - Description: Enables or disables the Prometheus metrics HTTP server.\n\n- Optional\n  - **`SERVER_PROMETHEUS_ADDRESS`** (`prometheus.address`)\n    - Default: `\":9090\"`\n    - Description: The network address and port to bind the Prometheus metrics server to.\n\n  - **`SERVER_PROMETHEUS_PATH`** (`prometheus.path`)\n    - Default: `\"/metrics\"`\n    - Description: The HTTP path at which metrics will be exposed.\n\nOnce enabled, you can setup any scraper that supports ingesting Prometheus metrics.\n\n#### Tenant metrics endpoint\n\n> **Info:** This step requires communication with a service that scrapes Hatchet\n>   Prometheus metrics.\n\nTo enable the [tenant API endpoint](/v1/prometheus-metrics) you can set the following environment variables:\n\n- Required\n  - **`SERVER_PROMETHEUS_SERVER_URL`** (`prometheus.prometheusServerURL`)\n    - Description: The Prometheus server URL.\n\n- Optional\n  - **`SERVER_PROMETHEUS_SERVER_USERNAME`** (`prometheus.prometheusServerUsername`)\n    - Description: The username to access the Prometheus instance via HTTP basic auth.\n\n  - **`SERVER_PROMETHEUS_SERVER_PASSWORD`** (`prometheus.prometheusServerPassword`)\n    - Description: The password to access the Prometheus instance via HTTP basic auth.\n\n**Example environment setup:**\n\n```bash\nexport SERVER_PROMETHEUS_ENABLED=true\nexport SERVER_PROMETHEUS_ADDRESS=\":9999\"\nexport SERVER_PROMETHEUS_PATH=\"/custom-metrics\"\n```\n\nRestart your Hatchet server after setting these variables to apply the changes.\n\n---\n\n### Global Metrics\n\nMetric Name, Type, Description\n\n`hatchet_queue_invocations_total`, Counter, The total number of invocations of the queuer function\n`hatchet_created_tasks_total`, Counter, The total number of tasks created\n`hatchet_retried_tasks_total`, Counter, The total number of tasks retried\n`hatchet_succeeded_tasks_total`, Counter, The total number of tasks that succeeded\n`hatchet_failed_tasks_total`, Counter, The total number of tasks that failed (in a final state, not including retries)\n`hatchet_skipped_tasks_total`, Counter, The total number of tasks that were skipped\n`hatchet_cancelled_tasks_total`, Counter, The total number of tasks cancelled\n`hatchet_assigned_tasks_total`, Counter, The total number of tasks assigned to a worker\n`hatchet_scheduling_timed_out_total`, Counter, The total number of tasks that timed out while waiting to be scheduled\n`hatchet_rate_limited_total`, Counter, The total number of tasks that were rate limited\n`hatchet_queued_to_assigned_total`, Counter, The total number of unique tasks that were queued and later assigned to a worker\n`hatchet_queued_to_assigned_seconds`, Histogram, Buckets of time (in seconds) spent in the queue before being assigned to a worker\n`hatchet_reassigned_tasks_total`, Counter, The total number of tasks that were reassigned to a worker\n\n#### Example PromQL Queries\n\n##### 1. Rate of calls to the queuer method\n\n```promql\nrate(hatchet_queue_invocations_total[5m])\n```\n\n##### 2. Average queue time in milliseconds\n\n```promql\n# Calculates average queue time over the past 5 minutes, converted to ms\nrate(hatchet_queued_to_assigned_seconds_sum[5m])\n  / rate(hatchet_queued_to_assigned_seconds_count[5m])\n  * 1e3\n```\n\n##### 3. Success and failure rates\n\n```promql\nrate(hatchet_succeeded_tasks_total[5m])\nrate(hatchet_failed_tasks_total[5m])\n```\n\n##### 4. Queue time distribution (histogram)\n\n```promql\nsum by (le) (\n  rate(hatchet_queued_to_assigned_seconds_bucket[5m])\n)\n```\n\n##### 5. Rate of tasks created vs. retried\n\n```promql\nrate(hatchet_created_tasks_total[5m])\nrate(hatchet_retried_tasks_total[5m])\n```\n\n##### 6. Task Assignment Rate\n\n```promql\nrate(hatchet_assigned_tasks_total[5m])\n```\n\n##### 7. Scheduling Timeout Rate\n\n```promql\nrate(hatchet_scheduling_timed_out_total[5m])\n```\n\n##### 8. Rate Limiting Impact\n\n```promql\nrate(hatchet_rate_limited_total[5m])\n```\n\n##### 9. Task Completion Ratio (Success vs Total)\n\n```promql\nrate(hatchet_succeeded_tasks_total[5m])\n/\n(rate(hatchet_succeeded_tasks_total[5m]) + rate(hatchet_failed_tasks_total[5m]))\n```\n\n##### 10. Task Cancellation Rate\n\n```promql\nrate(hatchet_cancelled_tasks_total[5m])\n```\n\n##### 11. Task Skip Rate\n\n```promql\nrate(hatchet_skipped_tasks_total[5m])\n```\n\n##### 12. Queue Processing Efficiency (Assigned vs Created)\n\n```promql\nrate(hatchet_assigned_tasks_total[5m]) / rate(hatchet_created_tasks_total[5m])\n```\n\n##### 13. Task Reassignment Rate\n\n```promql\nrate(hatchet_reassigned_tasks_total[5m])\n```\n\n### Tenant Metrics\n\nMetric Name, Type, Description\n\n`hatchet_tenant_workflow_duration_milliseconds`, Histogram, Duration of workflow execution in milliseconds (DAGs and single tasks)\n`hatchet_tenant_queue_invocations_total`, Counter, The total number of invocations of the queuer function\n`hatchet_tenant_created_tasks_total`, Counter, The total number of tasks created\n`hatchet_tenant_retried_tasks_total`, Counter, The total number of tasks retried\n`hatchet_tenant_succeeded_tasks_total`, Counter, The total number of tasks that succeeded\n`hatchet_tenant_failed_tasks_total`, Counter, The total number of tasks that failed (in a final state, not including retries)\n`hatchet_tenant_skipped_tasks_total`, Counter, The total number of tasks that were skipped\n`hatchet_tenant_cancelled_tasks_total`, Counter, The total number of tasks cancelled\n`hatchet_tenant_assigned_tasks`, Counter, The total number of tasks assigned to a worker\n`hatchet_tenant_scheduling_timed_out`, Counter, The total number of tasks that timed out while waiting to be scheduled\n`hatchet_tenant_rate_limited`, Counter, The total number of tasks that were rate limited\n`hatchet_tenant_queued_to_assigned`, Counter, The total number of unique tasks that were queued and later got assigned to a worker\n`hatchet_tenant_queued_to_assigned_time_seconds`, Histogram, Buckets of time in seconds spent in the queue before being assigned to a worker\n`hatchet_tenant_reassigned_tasks`, Counter, The total number of tasks that were reassigned to a worker\n`hatchet_tenant_used_worker_slots`, Gauge, The current number of worker slots being used\n`hatchet_tenant_available_worker_slots`, Gauge, The current number of worker slots available (free)\n`hatchet_tenant_worker_slots`, Gauge, The total number of worker slots (free + used)\n\n#### Example PromQL Queries\n\n##### 1. Workflow Duration by Tenant and Status\n\n```promql\nrate(hatchet_tenant_workflow_duration_milliseconds_sum[5m])\nby (tenant_id, workflow_name, status)\n/\nrate(hatchet_tenant_workflow_duration_milliseconds_count[5m])\nby (tenant_id, workflow_name, status)\n```\n\n##### 2. Tenant Queue Performance (95th percentile)\n\n```promql\nhistogram_quantile(0.95,\n  rate(hatchet_tenant_queued_to_assigned_time_seconds_bucket[5m])\n) by (tenant_id)\n```\n\n##### 3. Tenant Error Rate by Workflow\n\n```promql\nrate(hatchet_tenant_failed_tasks_total[5m]) by (tenant_id)\n/\nrate(hatchet_tenant_created_tasks_total[5m]) by (tenant_id)\n```\n\n##### 4. Tenant Task Throughput\n\n```promql\nrate(hatchet_tenant_succeeded_tasks_total[5m]) by (tenant_id)\n```\n\n##### 5. Tenant Retry Rate\n\n```promql\nrate(hatchet_tenant_retried_tasks_total[5m]) by (tenant_id)\n/\nrate(hatchet_tenant_created_tasks_total[5m]) by (tenant_id)\n```\n\n##### 6. Workflow Duration Distribution by Tenant\n\n```promql\nsum by (tenant_id, le) (\n  rate(hatchet_tenant_workflow_duration_milliseconds_bucket[5m])\n)\n```\n\n##### 7. Tenant Rate Limiting Impact\n\n```promql\nrate(hatchet_tenant_rate_limited[5m]) by (tenant_id)\n```\n\n##### 8. Per-Tenant Queue Utilization\n\n```promql\nrate(hatchet_tenant_queue_invocations_total[5m]) by (tenant_id)\n```\n\n##### 9. Tenant Scheduling Timeouts\n\n```promql\nrate(hatchet_tenant_scheduling_timed_out[5m]) by (tenant_id)\n```\n\n##### 10. Tenant Task Assignment Success Rate\n\n```promql\nrate(hatchet_tenant_assigned_tasks[5m]) by (tenant_id)\n/\nrate(hatchet_tenant_created_tasks_total[5m]) by (tenant_id)\n```\n\n##### 11. Tenant Task Reassignment Rate\n\n```promql\nrate(hatchet_tenant_reassigned_tasks[5m]) by (tenant_id)\n```\n\n### Cross-Tenant Analysis\n\n#### Example PromQL Queries\n\n##### 1. Top 5 Tenants by Task Volume\n\n```promql\ntopk(5,\n  sum by (tenant_id) (\n    rate(hatchet_tenant_created_tasks_total[1h])\n  )\n)\n```\n\n##### 2. Slowest Workflows Across All Tenants\n\n```promql\ntopk(10,\n  rate(hatchet_tenant_workflow_duration_milliseconds_sum[5m])\n  /\n  rate(hatchet_tenant_workflow_duration_milliseconds_count[5m])\n) by (tenant_id, workflow_name)\n```\n\n##### 3. Tenant Resource Consumption Comparison\n\n```promql\nsum by (tenant_id) (\n  rate(hatchet_tenant_workflow_duration_milliseconds_sum[1h])\n)\n/ 1000 / 60  # Convert to minutes\n```\n\n### Integration with Prometheus\n\nThis endpoint can be used to configure Prometheus to scrape tenant-specific metrics:\n\n```yaml\nscrape_configs:\n  - job_name: \"hatchet-tenant-metrics\"\n    static_configs:\n      - targets: [\"cloud.onhatchet.run\"]\n    metrics_path: \"/api/v1/tenants/707d0855-80ab-4e1f-a156-f1c4546cbf52/prometheus-metrics\"\n    scheme: \"https\"\n    authorization:\n      credentials: \"your-api-token-here\"\n```\n\n**Note:** Replace `cloud.onhatchet.run` with the URL where your Hatchet instance is hosted.\n\nThis provides tenant-isolated metrics that can be scraped directly by Prometheus or consumed by other monitoring tools that support the Prometheus text format."},"269":{"title":"Worker Configuration Options","pageTitle":"Worker Configuration Options","pageRoute":"hatchet://docs/self-hosting/worker-configuration-options","content":"# Worker Configuration Options\n\nThe Hatchet worker can be configured via environment variables and programmatic options. This document contains a list of all available options."},"270":{"title":"Basic Configuration","pageTitle":"Worker Configuration Options","pageRoute":"hatchet://docs/self-hosting/worker-configuration-options","content":"Variable, Description, Default Value\n\n`HATCHET_CLIENT_TOKEN`, Authentication token for the worker\n`HATCHET_CLIENT_HOST_PORT`, GRPC server host and port, \\* Inherited from token\n`HATCHET_CLIENT_API_URL` (TypeScript SDK), API server host and port, \\* Inherited from token\n`HATCHET_CLIENT_SERVER_URL` (Go SDK), API server host and port, \\* Inherited from token\n`HATCHET_CLIENT_NAMESPACE`, Namespace prefix for the worker, \\* Inherited from token"},"271":{"title":"Worker Runtime Configuration","pageTitle":"Worker Configuration Options","pageRoute":"hatchet://docs/self-hosting/worker-configuration-options","content":"Variable, Description, Default Value\n\n`name`, Friendly name of the worker\n`slots`, Maximum number of concurrent runs, `100`\n`durable_slots`, Maximum number of concurrent durable tasks, `1000`"},"272":{"title":"Worker healthcheck server (Python SDK)","pageTitle":"Worker Configuration Options","pageRoute":"hatchet://docs/self-hosting/worker-configuration-options","content":"These variables enable a local HTTP server that exposes `/health` and `/metrics` for a running worker.\n\nVariable, Description, Default Value\n\n`HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED`, Enable the local worker healthcheck server, `false`\n`HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT`, Port for the local worker healthcheck server, `8001`\n`HATCHET_CLIENT_WORKER_HEALTHCHECK_EVENT_LOOP_BLOCK_THRESHOLD_SECONDS`, If the worker listener process event loop is blocked longer than this threshold, `/health` returns 503, `5.0`"},"273":{"title":"TLS Configuration","pageTitle":"Worker Configuration Options","pageRoute":"hatchet://docs/self-hosting/worker-configuration-options","content":"Variable, Description, Default Value\n\n`HATCHET_CLIENT_TLS_STRATEGY`, TLS strategy (tls, mtls, none), `tls`\n`HATCHET_CLIENT_TLS_CERT_FILE`, Path to TLS certificate file\n`HATCHET_CLIENT_TLS_KEY_FILE`, Path to TLS key file\n`HATCHET_CLIENT_TLS_ROOT_CA_FILE`, Path to TLS root CA file\n`HATCHET_CLIENT_TLS_SERVER_NAME`, TLS server name"},"274":{"title":"Logging Configuration","pageTitle":"Worker Configuration Options","pageRoute":"hatchet://docs/self-hosting/worker-configuration-options","content":"Variable, Description, Default Value\n\n`HATCHET_CLIENT_LOG_LEVEL`, Log level for the worker client, `WARN`\n`HATCHET_CLIENT_LOG_FORMAT`, Log format for the worker client, `console`\n`HATCHET_CLIENT_GRPC_MAX_RECV_MESSAGE_LENGTH`, Maximum gRPC message receive size (Python SDK only), `4MB`\n`HATCHET_CLIENT_GRPC_MAX_SEND_MESSAGE_LENGTH`, Maximum gRPC message send size (Python SDK only), `4MB`"},"275":{"title":"Upgrading and Downgrading","pageTitle":"Upgrading and Downgrading","pageRoute":"hatchet://docs/self-hosting/upgrading-downgrading","content":"# Upgrading and Downgrading Hatchet\n\nThis guide covers how to safely upgrade and downgrade self-hosted Hatchet instances, with strategies for production-critical workloads."},"276":{"title":"Overview","pageTitle":"Upgrading and Downgrading","pageRoute":"hatchet://docs/self-hosting/upgrading-downgrading","content":"For production-critical deployments, we recommend the following workflow:\n\n1. **Snapshot** your database before upgrading\n2. **Upgrade** the Hatchet engine to the new version\n3. **Verify** the upgrade is working as expected\n4. If something goes wrong, **downgrade** by restoring the snapshot or running down migrations"},"277":{"title":"Step 1: Take a Database Snapshot","pageTitle":"Upgrading and Downgrading","pageRoute":"hatchet://docs/self-hosting/upgrading-downgrading","content":"Before any version change, create a point-in-time snapshot of your database. This gives you a fast, reliable rollback path if the upgrade causes issues.\n\nRefer to the backup and restore documentation for your database provider:\n\n- **PostgreSQL (self-managed):** [Backup and Restore](https://www.postgresql.org/docs/current/backup.html)\n- **AWS RDS:** [Backing Up and Restoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_CommonTasks.BackupRestore.html)\n- **Google Cloud SQL:** [Backup and Recovery](https://docs.cloud.google.com/sql/docs/postgres/backup-recovery/backups)\n- **Azure Database for PostgreSQL:** [Backup](https://learn.microsoft.com/en-us/azure/backup/backup-azure-database-postgresql)"},"278":{"title":"Step 2: Upgrade Engine Versions","pageTitle":"Upgrading and Downgrading","pageRoute":"hatchet://docs/self-hosting/upgrading-downgrading","content":"Once your snapshot is in place, upgrade the Hatchet engine.\n\n> **Info:** Hatchet runs database migrations automatically on engine startup. No separate\n>   migration step is required when upgrading.\n\n### Docker Compose\n\nUpdate the image tags in your `docker-compose.yml`:\n\n```yaml\nservices:\n  hatchet-engine:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-engine:v0.78.26\n    # ... rest of configuration\n\n  hatchet-dashboard:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-dashboard:v0.78.26\n    # ... rest of configuration\n```\n\nThen redeploy:\n\n> **Warning:** This can cause some downtime till the containers are back up.\n\n```bash\ndocker-compose pull\ndocker-compose down\ndocker-compose up -d\n```\n\n### Kubernetes (Helm)\n\nThe Hatchet Helm charts use a `sharedConfig.image.tag` value that sets the image tag for all components (engine, API, frontend, migrations). Set this to the target Hatchet version:\n\n```yaml\n# values.yaml\nsharedConfig:\n  image:\n    tag: \"v0.78.26\"\n```\n\nThen upgrade the release:\n\n```bash\n# hatchet-stack (standard deployment)\nhelm upgrade hatchet ./charts/hatchet-stack \\\n  --namespace hatchet \\\n  --values values.yaml\n\n# hatchet-ha (high-availability deployment)\nhelm upgrade hatchet ./charts/hatchet-ha \\\n  --namespace hatchet \\\n  --values values.yaml\n```\n\n> **Info:** You can also override individual component tags (e.g., `engine.image.tag`,\n>   `frontend.image.tag`), but `sharedConfig.image.tag` takes precedence when set.\n\n### Verification\n\nAfter upgrading, verify the deployment is healthy:\n\n1. Check that the engine is running and accepting connections\n2. Confirm the dashboard loads and shows the correct version\n3. Run a test workflow to verify end-to-end functionality\n4. Monitor logs for migration errors or unexpected warnings\n\n```bash\n# Docker Compose\ndocker-compose logs hatchet-engine | head -50\n\n# Kubernetes\nkubectl logs -n hatchet -l app=hatchet-engine --tail=50\n```"},"279":{"title":"Step 3: Downgrade if Needed","pageTitle":"Upgrading and Downgrading","pageRoute":"hatchet://docs/self-hosting/upgrading-downgrading","content":"If the upgrade causes issues, you have two options depending on your situation:\n\n> **Warning:** Both the following options will result in data loss and some downtime.\n\n### Option A: Restore from Database Snapshot (Recommended for Production)\n\nThis is the fastest and safest rollback path. It returns your database to the exact state before the upgrade, avoiding any risk of incomplete down migrations.\n\n\n### Stop all Hatchet services\n\nShut down all Hatchet engine instances to prevent writes during the restore.\n\n```bash\n# Docker Compose\ndocker-compose down\n\n# Kubernetes\nkubectl scale deployment hatchet-engine -n hatchet --replicas=0\n```\n\n### Restore the database snapshot\n\nFollow the restore procedure for your database provider (see [Step 1](#step-1-take-a-database-snapshot) for links to the relevant documentation).\n\n### Deploy the previous Hatchet version\n\nUpdate your deployment to use the previous version's image tags (see [Upgrade Engine Versions](#step-2-upgrade-engine-versions) for the relevant deployment method) and redeploy.\n\n### Verify the rollback\n\nConfirm the engine starts, the dashboard loads, and workflows execute correctly.\n\n\n### Option B: Run Down Migrations (Manual)\n\nIf you don't have a database snapshot or prefer a more targeted rollback, you can run down migrations to revert schema changes. See the [Downgrading DB Schema Manually](/self-hosting/downgrading-db-schema-manually) guide for detailed instructions on:\n\n- Finding the target migration version for your desired Hatchet version\n- Running `hatchet-migrate --down <migration_version>`\n- Deploying the older engine version\n\n> **Warning:** Down migrations may not fully reverse all data changes (e.g., dropped columns\n>   lose their data). For production-critical workloads, prefer restoring from a\n>   snapshot when possible."},"280":{"title":"Downgrading DB Schema Manually","pageTitle":"Downgrading DB Schema Manually","pageRoute":"hatchet://docs/self-hosting/downgrading-db-schema-manually","content":"# Downgrading DB Schema Manually\n\nThis guide explains how to safely downgrade your Hatchet DB schema to a previous version.\n\n> **Info:** For production-critical workloads, see the [Upgrading and\n>   Downgrading](/self-hosting/upgrading-downgrading) guide which covers database\n>   snapshots, upgrading, and safe rollback strategies.\n\n> **Warning:** Downgrading may result in data loss. Always test downgrades in a\n>   non-production environment first."},"281":{"title":"Overview","pageTitle":"Downgrading DB Schema Manually","pageRoute":"hatchet://docs/self-hosting/downgrading-db-schema-manually","content":"Downgrading Hatchet involves two steps:\n\n1. Running down migrations to revert database schema changes\n2. Deploying the older Hatchet version"},"282":{"title":"Prerequisites","pageTitle":"Downgrading DB Schema Manually","pageRoute":"hatchet://docs/self-hosting/downgrading-db-schema-manually","content":"- **Critical:** Backup your database before downgrading\n- Ensure the target version supports the current data in your database\n- Have access to run `hatchet-migrate` command\n- Verify that all migrations between your current version and target version have down migrations"},"283":{"title":"Finding the Target Migration Version","pageTitle":"Downgrading DB Schema Manually","pageRoute":"hatchet://docs/self-hosting/downgrading-db-schema-manually","content":"To downgrade to a specific Hatchet version, you need to identify the last migration that was included in that version.\n\nVisit the Hatchet GitHub repository for your target version:\n\n```\nhttps://github.com/hatchet-dev/hatchet/tree/{GIT_TAG}/cmd/hatchet-migrate/migrate/migrations\n```\n\nReplace `{GIT_TAG}` with your target version (e.g., `v0.71.0`).\n\nFind the last migration file in that directory - the timestamp at the beginning of the filename is your target migration version.\n\n**Example:**\n\n- Target Hatchet version: `v0.71.0`\n- Last migration file: `20250813183355_v1_0_36.sql`\n- Migration version: `20250813183355`"},"284":{"title":"Running Down Migrations","pageTitle":"Downgrading DB Schema Manually","pageRoute":"hatchet://docs/self-hosting/downgrading-db-schema-manually","content":"> **Info:** Use a stable release of the `hatchet-migrate` binary (avoid alpha tags) from\n>   the [Hatchet releases page](https://github.com/hatchet-dev/hatchet/tags) to\n>   ensure down migrations work correctly.\n\nOnce you have identified the target migration version, use the `hatchet-migrate` command with the `--down` flag:\n\n```bash\nhatchet-migrate --down 20241023223039\n```\n\nThis will:\n\n1. Connect to your database using the `DATABASE_URL` environment variable\n2. Check the current migration version\n3. Run all down migrations from the current version to the target version\n4. Display progress and confirm when complete"},"285":{"title":"Deploying the Older Version","pageTitle":"Downgrading DB Schema Manually","pageRoute":"hatchet://docs/self-hosting/downgrading-db-schema-manually","content":"After successfully running the down migrations, deploy the older Hatchet version:\n\n### Docker Compose\n\nUpdate your `docker-compose.yml`:\n\n```yaml\nservices:\n  hatchet-engine:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-engine:v0.71.0\n    # ... rest of configuration\n\n  hatchet-dashboard:\n    image: ghcr.io/hatchet-dev/hatchet/hatchet-dashboard:v0.71.0\n    # ... rest of configuration\n```\n\nThen restart:\n\n```bash\ndocker-compose down\ndocker-compose up -d\n```"},"286":{"title":"Benchmarking","pageTitle":"Benchmarking","pageRoute":"hatchet://docs/self-hosting/benchmarking","content":"# Benchmarking Hatchet\n\nThis page provides example benchmarks for Hatchet throughput and latency on an 8 CPU database (Amazon RDS, `m7g.2xlarge` instance type). These benchmarks were all run against a v1 Hatchet engine running version `v0.55.26`. For more information on the setup, see the [Setup](#setup) section. Note that on better hardware, there will be significantly better performance: we have tested up to 10k/s on an `m7g.8xlarge` instance.\n\nThe best way to benchmark Hatchet is to run your own benchmarks in your own environment. The benchmarks below are provided as a reference point for what you might expect to see in a typical setup. To run your own benchmarks, see the [Running your own benchmarks](#running-your-own-benchmarks) section."},"287":{"title":"Throughput","pageTitle":"Benchmarking","pageRoute":"hatchet://docs/self-hosting/benchmarking","content":"Below are summarized throughput benchmarks run at different incoming event rates. For each run, we note the database CPU utilization and estimated IOPS, which are the most relevant metrics for tracking performance on the database.\n\nThroughput (runs/s), Database CPU, Database IOPs\n\n100, 15%, 400\n500, 60%, 600\n2000, 83%, 800"},"288":{"title":"Latency","pageTitle":"Benchmarking","pageRoute":"hatchet://docs/self-hosting/benchmarking","content":"Benchmarks run using event-based triggering: this approximately doubles the queueing time of a workflow. The average latency of events in Hatchet can be approximated by two measurements that Hatchet reports:\n\n- **Average execution time per executed event**: The time from when the event starts execution to when it completes.\n- **Average write time per event**: The acknowledgement time for Hatchet to write the event.\n\nBelow is a table summarizing these latencies:\n\nThroughput (runs/s), Average Execution Time (ms), Average Write Time (ms)\n\n100, ~40, ~2.5\n500, ~48, ~2.6\n2000, ~220, ~5.7\n\nFor workloads up to around 100-500 events per second, the latency remains relatively low. As throughput scales toward 2000 events per second, the overall average execution time increases (though the Hatchet engine remained stable throughout the tests)."},"289":{"title":"Running your own benchmarks","pageTitle":"Benchmarking","pageRoute":"hatchet://docs/self-hosting/benchmarking","content":"Hatchet publishes a public load testing container which can be used for benchmarking. This container is available at `ghcr.io/hatchet-dev/hatchet/hatchet-loadtest`. It acts as a Hatchet worker and event emitter, so it simply expects a `HATCHET_CLIENT_TOKEN` to be set in the environment.\n\nFor example, to run 100 events/second for 60 seconds, you can use the following command:\n\n```bash\ndocker run -e HATCHET_CLIENT_TOKEN=your-token ghcr.io/hatchet-dev/hatchet/hatchet-loadtest -e \"100\" -d \"60s\" --level \"warn\" --slots \"100\"\n```\n\nThe event emitter which is bundled into the container has difficulty emitting more than 2k events/s. As a result, to test higher throughputs, it is recommended to run multiple containers in parallel. Since each container manages its own workflows and worker, it is recommended to use the `HATCHET_CLIENT_NAMESPACE` environment variable to ensure that workflows are not duplicated across containers. For example:\n\n```bash\n# first container\ndocker run -e HATCHET_CLIENT_TOKEN=your-token -e HATCHET_CLIENT_NAMESPACE=loadtest1 ghcr.io/hatchet-dev/hatchet/hatchet-loadtest -e \"2000\" -d \"60s\" --level \"warn\" --slots \"100\"\n\n# second container\ndocker run -e HATCHET_CLIENT_TOKEN=your-token -e HATCHET_CLIENT_NAMESPACE=loadtest2 ghcr.io/hatchet-dev/hatchet/hatchet-loadtest -e \"2000\" -d \"60s\" --level \"warn\" --slots \"100\"\n```\n\n### Reference\n\nThis container takes the following arguments:\n\n```sh\nUsage:\n  loadtest [flags]\n\nFlags:\n  -c, --concurrency int        concurrency specifies the maximum events to run at the same time\n  -D, --delay duration         delay specifies the time to wait in each event to simulate slow tasks\n  -d, --duration duration      duration specifies the total time to run the load test (default 10s)\n  -F, --eventFanout int        eventFanout specifies the number of events to fanout (default 1)\n  -e, --events int             events per second (default 10)\n  -f, --failureRate float32    failureRate specifies the rate of failure for the worker\n  -h, --help                   help for loadtest\n  -l, --level string           logLevel specifies the log level (debug, info, warn, error) (default \"info\")\n  -P, --payloadSize string     payload specifies the size of the payload to send (default \"0kb\")\n  -s, --slots int              slots specifies the number of slots to use in the worker\n  -w, --wait duration          wait specifies the total time to wait until events complete (default 10s)\n  -p, --workerDelay duration   workerDelay specifies the time to wait before starting the worker\n```\n\n### Running a benchmark on Kubernetes\n\nYou can use the following Pod manifest to run the load test on Kubernetes (make sure to fill in `HATCHET_CLIENT_TOKEN`):\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n  name: loadtest1a\n  namespace: staging\nspec:\n  restartPolicy: Never\n  containers:\n    - image: ghcr.io/hatchet-dev/hatchet/hatchet-loadtest:v0.56.0\n      imagePullPolicy: Always\n      name: loadtest\n      command: [\"/hatchet/hatchet-load-test\"]\n      args:\n        - loadtest\n        - --duration\n        - \"60s\"\n        - --events\n        - \"100\"\n        - --slots\n        - \"100\"\n        - --wait\n        - \"10s\"\n        - --level\n        - warn\n      env:\n        - name: HATCHET_CLIENT_TOKEN\n          value: \"your-token\"\n        - name: HATCHET_CLIENT_NAMESPACE\n          value: \"loadtest1a\"\n      resources:\n        limits:\n          memory: 1Gi\n        requests:\n          cpu: 500m\n          memory: 1Gi\n```"},"290":{"title":"Setup","pageTitle":"Benchmarking","pageRoute":"hatchet://docs/self-hosting/benchmarking","content":"All tests were run on a Kubernetes cluster on AWS configured with:\n\n- **Hatchet engine replicas:** 2 (using `c7i.4xlarge` instances to ensure CPU was not a bottleneck)\n- **Database:** `m7g.2xlarge` instance type (Amazon RDS)\n- **Hatchet version:** `v0.55.26`\n- **AWS region:** `us-west-1`\n\nThe database configuration was chosen to avoid disk and CPU contention until higher throughputs were reached. We observed that up to around 2000 events/second, the chosen database instance size kept up without major performance degradation. The Hatchet engine was deployed with 2 replicas, and each engine instance had ample CPU headroom on `c7i.4xlarge` nodes."},"291":{"title":"Data Retention","pageTitle":"Data Retention","pageRoute":"hatchet://docs/self-hosting/data-retention","content":"# Data Retention\n\nIn Hatchet engine version `0.36.0` and above, you can configure the default data retention per tenant for workflow runs and events. The default value is set to 30 days, which means that all workflow runs which were created over 30 days ago and are in a final state (i.e. completed or failed), and all events which were created over 30 days ago, will be deleted.\n\nThis can be configured by setting the following environment variable to a Go duration string:\n\n```sh\nSERVER_LIMITS_DEFAULT_TENANT_RETENTION_PERIOD=720h # 30 days\n```"},"292":{"title":"Improving Performance","pageTitle":"Improving Performance","pageRoute":"hatchet://docs/self-hosting/improving-performance","content":"# Tuning Hatchet for Performance\n\nGenerally, with a reasonable database instance (4 CPU, 8GB RAM) and small payload sizes, Hatchet can handle hundreds of events and workflow runs per second. However, as throughput increases, you will start to see performance degradation. The most common causes of performance degradation are listed below."},"293":{"title":"Database Connection Pooling","pageTitle":"Improving Performance","pageRoute":"hatchet://docs/self-hosting/improving-performance","content":"The default max connection pool size is 50 per engine instance. If you have a high throughput, you may need to increase this value. This value can be set via the `DATABASE_MAX_CONNS` environment variable on the engine. Note that if you increase this value, you will need to increase the [`max_connections`](https://www.postgresql.org/docs/current/runtime-config-connection.html) value on your Postgres instance as well."},"294":{"title":"High Database CPU","pageTitle":"Improving Performance","pageRoute":"hatchet://docs/self-hosting/improving-performance","content":"Due to the nature of Hatchet workloads, the first bottleneck you will typically see on the database is CPU. If you have access to database query performance metrics, it is worth checking the cause of high CPU. If there is high lock contention on a query, please let the Hatchet team know, as we are looking to reduce lock contention in future releases. Otherwise, if you are seeing high CPU usage without any lock contention, you should increase the number of cores on your database instance.\n\nIf you are performing a high number of inserts, particularly in a short period of time, and this correlates with high CPU usage, you can improve performance in several ways by using bulk endpoints or tuning the buffer settings.\n\n### Using bulk endpoints\n\nThere are two main ways to initiate workflows, by sending events to Hatchet and by starting workflows directly. In most example workflows, we push a single event or workflow at a time, but it is possible to send multiple events or workflows in one request.\n\n#### Events\n\n #### Python\n\n```python\nhatchet.event.bulk_push(\n    events=[\n        BulkPushEventWithMetadata(\n            key=\"user:create\",\n            payload={\"userId\": str(i), \"should_skip\": False},\n        )\n        for i in range(10)\n    ]\n)\n```\n\n#### Typescript\n\n```typescript\nconst events = [\n  {\n    payload: { test: 'test1' },\n    additionalMetadata: { user_id: 'user1', source: 'test' },\n  },\n  {\n    payload: { test: 'test2' },\n    additionalMetadata: { user_id: 'user2', source: 'test' },\n  },\n  {\n    payload: { test: 'test3' },\n    additionalMetadata: { user_id: 'user3', source: 'test' },\n  },\n];\n\nawait hatchet.events.bulkPush('user:create', events);\n```\n\n#### Go\n\n```go\nc, err := client.New(\n  client.WithHostPort(\"127.0.0.1\", 7077),\n)\n\nif err != nil {\n  panic(err)\n}\n\nevents := []client.EventWithMetadata{\n  {\n    Event: &events.TestEvent{\n      Name: \"testing\",\n    },\n    AdditionalMetadata: map[string]string{\"hello\": \"world1\"},\n    Key: \"user:create\",\n  },\n  {\n    Event: &events.TestEvent{\n      Name: \"testing2\",\n    },\n    AdditionalMetadata: map[string]string{\"hello\": \"world2\"},\n    Key: \"user:create\",\n  },\n}\n\nc.Event().BulkPush(\n  context.Background(),\n  events,\n)\n```\n\n> **Warning:** There is a maximum limit of 1000 events per request.\n\n#### Workflows\n\n#### Python\n\n```python\n@bulk_parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:\n    # 👀 Create each workflow run to spawn\n    child_workflow_runs = [\n        bulk_child_wf.create_bulk_run_item(\n            input=ChildInput(a=str(i)),\n            key=f\"child{i}\",\n            additional_metadata={\"hello\": \"earth\"},\n        )\n        for i in range(input.n)\n    ]\n\n    # 👀 Run workflows in bulk to improve performance\n    spawn_results = await bulk_child_wf.aio_run_many(child_workflow_runs)\n\n    return {\"results\": spawn_results}\n```\n\n#### Typescript\n\n```typescript\nconst parent = hatchet.task({\n  name: 'simple',\n  fn: async (input: SimpleInput, ctx) => {\n    // Bulk run two tasks in parallel\n    const child = await ctx.bulkRunChildren([\n      {\n        workflow: simple,\n        input: {\n          Message: 'Hello, World!',\n        },\n      },\n      {\n        workflow: simple,\n        input: {\n          Message: 'Hello, Moon!',\n        },\n      },\n    ]);\n\n    return {\n      TransformedMessage: `${child[0].TransformedMessage} ${child[1].TransformedMessage}`,\n    };\n  },\n});\n```\n\n#### Go\n\n```go\nw.RegisterWorkflow(\n    &worker.WorkflowJob{\n        Name: \"parent-workflow\",\n        On: worker.Event(\"fanout:create\"),\n        Description: \"Example workflow for spawning child workflows.\",\n        Steps: []*worker.WorkflowStep{\n            worker.Fn(func(ctx worker.HatchetContext) error {\n                // Prepare the batch of workflows to spawn\n                childWorkflows := make([]*worker.SpawnWorkflowsOpts, 10)\n\n                for i := 0; i < 10; i++ {\n                    childInput := \"child-input-\" + strconv.Itoa(i)\n                    childWorkflows[i] = &worker.SpawnWorkflowsOpts{\n                        WorkflowName: \"child-workflow\",\n                        Input:        childInput,\n                        Key:          \"child-key-\" + strconv.Itoa(i),\n                    }\n                }\n\n                // Spawn all workflows in bulk using SpawnWorkflows\n                createdWorkflows, err := ctx.SpawnWorkflows(childWorkflows)\n                if err != nil {\n                    return err\n                }\n\n                return nil\n            }),\n        },\n    },\n)\n\n```\n\n> **Warning:** There is a maximum limit of 1000 workflows per bulk request.\n\n### Tuning Buffer Settings\n\nHatchet has configurable write buffers which enable it to reduce the total number of database queries by batching DB writes. This speeds up throughput dramatically at the expense of a slight increase in latency. In general, increasing the buffer size and reducing the buffer flush frequency reduces the CPU load on the DB.\n\nThe two most important configurable settings for the buffers are\n\n1. **Flush Period:** The amount of milliseconds to wait between subsequent writes to the database\n\n2. **Max Buffer Size:** The maximum size of the internal buffer writing to the database.\n\nThe following environment variables are all configurable:\n\n```sh\n# Default values if the values below are not set\nSERVER_FLUSH_PERIOD_MILLISECONDS\nSERVER_FLUSH_ITEMS_THRESHOLD\n\n# Settings for writing workflow runs to the database\nSERVER_WORKFLOWRUNBUFFER_FLUSH_PERIOD_MILLISECONDS\nSERVER_WORKFLOWRUNBUFFER_FLUSH_ITEMS_THRESHOLD\n\n# Settings for writing events to the database\nSERVER_EVENTBUFFER_FLUSH_PERIOD_MILLISECONDS\nSERVER_EVENTBUFFER_FLUSH_ITEMS_THRESHOLD\n\n# Settings for releasing slots for workers\nSERVER_RELEASESEMAPHOREBUFFER_FLUSH_PERIOD_MILLISECONDS\nSERVER_RELEASESEMAPHOREBUFFER_FLUSH_ITEMS_THRESHOLD\n\n# Settings for writing queue items to the database\nSERVER_QUEUESTEPRUNBUFFER_FLUSH_PERIOD_MILLISECONDS\nSERVER_QUEUESTEPRUNBUFFER_FLUSH_ITEMS_THRESHOLD\n```\n\nA buffer configuration for higher throughput might look like the following:\n\n```sh\n\n# Default values if the values below are not set\nSERVER_FLUSH_PERIOD_MILLISECONDS=250\nSERVER_FLUSH_ITEMS_THRESHOLD=1000\n\n# Settings for writing workflow runs to the database\nSERVER_WORKFLOWRUNBUFFER_FLUSH_PERIOD_MILLISECONDS=100\nSERVER_WORKFLOWRUNBUFFER_FLUSH_ITEMS_THRESHOLD=500\n\n# Settings for writing events to the database\nSERVER_EVENTBUFFER_FLUSH_PERIOD_MILLISECONDS=1000\nSERVER_EVENTBUFFER_FLUSH_ITEMS_THRESHOLD=1000\n\n# Settings for releasing slots for workers\nSERVER_RELEASESEMAPHOREBUFFER_FLUSH_PERIOD_MILLISECONDS=100\nSERVER_RELEASESEMAPHOREBUFFER_FLUSH_ITEMS_THRESHOLD=200\n\n# Settings for writing queue items to the database\nSERVER_QUEUESTEPRUNBUFFER_FLUSH_PERIOD_MILLISECONDS=100\nSERVER_QUEUESTEPRUNBUFFER_FLUSH_ITEMS_THRESHOLD=500\n\n```\n\nBenchmarking and tuning on your own infrastructure is recommended to find the optimal values for your workload and use case."},"295":{"title":"Slow Time to Start","pageTitle":"Improving Performance","pageRoute":"hatchet://docs/self-hosting/improving-performance","content":"With higher throughput, you may see a slower time to start for each step run in a workflow. The reason for this is typically that each step run needs to be processed in an internal message queue before getting sent to the worker. You can increase the throughput of this internal queue by setting the following environment variable (default value of `100`):\n\n```\nSERVER_MSGQUEUE_RABBITMQ_QOS=200\n```\n\nNote that this refers to the number of messages that can be processed in parallel, and each message typically corresponds to at least one database write, so it will not improve performance if this value is significantly higher than the `DATABASE_MAX_CONNS` value. If you are seeing warnings in the engine logs that you are saturating connections, consider decreasing this value."},"296":{"title":"Database Settings and Autovacuum","pageTitle":"Improving Performance","pageRoute":"hatchet://docs/self-hosting/improving-performance","content":"There are several scenarios where Postgres flags may need to be modified to improve performance. By default, every workflow run and step run are stored for 30 days in the Postgres instance. Without tuning autovacuum settings, you may see high table bloat across many tables. If you are storing > 500 GB of workflow run or step run data, we recommend the following autovacuum settings to autovacuum more aggressively:\n\n```\nautovacuum_max_workers=10\nautovacuum_vacuum_scale_factor=0.1\nautovacuum_analyze_scale_factor=0.05\nautovacuum_vacuum_threshold=25\nautovacuum_analyze_threshold=25\nautovacuum_vacuum_cost_delay=10\nautovacuum_vacuum_cost_limit=1000\n```\n\nIf your database has enough memory capacity, you may need to increase the `work_mem` or `maintenance_work_mem` value. For example, on database instances with a large amount of memory available, we typically set the following settings:\n\n```\nmaintenance_work_mem=2147483647\nwork_mem=125828\n```\n\nAdditionally, if there is enough disk capacity, you may see improved performance setting the following flag:\n\n```\nmax_wal_size=15040\n```"},"297":{"title":"Scaling the Hatchet Engine","pageTitle":"Improving Performance","pageRoute":"hatchet://docs/self-hosting/improving-performance","content":"By default, the Hatchet engine runs all internal services on a single instance. The internal services on the Hatchet engine are as follows:\n\n- **grpc-api**: the gRPC endpoint for the Hatchet engine. This is the primary endpoint for Hatchet workers. Not to be confused with the Hatchet REST API, which is a separate service that we typically refer to as `api`.\n- **controllers**: the internal service that manages the lifecycle of workflow runs, step runs, and events. This service is write-heavy on the database and read-heavy from the message queue.\n- **scheduler**: the internal service that schedules step runs to workers. This service is both read-heavy and write-heavy on the database.\n\nIt is possible to horizontally scale the Hatchet engine by running multiple instances of the engine. However, if you are seeing a large number of warnings from the scheduler when running the other services in the same engine instance, we recommend running the scheduler on a separate instance. See the [high availability](./high-availability) documentation for more information on how to run the scheduler on a separate instance."},"298":{"title":"Read Replicas","pageTitle":"Read Replicas","pageRoute":"hatchet://docs/self-hosting/read-replicas","content":"# Read Replica Support\n\nFor high-throughput production deployments, Hatchet supports database read replicas to distribute database load and improve read performance. This feature allows you to direct read queries to a separate database instance while continuing to send write operations to the primary database. **This can significantly improve performance in read-heavy workloads without requiring application changes.**\n\nYou can enable read replica support by setting the following environment variables:\n\n```bash\nREAD_REPLICA_ENABLED=true\nREAD_REPLICA_DATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5432/hatchet'\nREAD_REPLICA_MAX_CONNS=200\nREAD_REPLICA_MIN_CONNS=50\n```"},"299":{"title":"Configuration Options","pageTitle":"Read Replicas","pageRoute":"hatchet://docs/self-hosting/read-replicas","content":"- `READ_REPLICA_ENABLED`: Set to `true` to enable read replica support\n- `READ_REPLICA_DATABASE_URL`: Connection string for the read replica database\n- `READ_REPLICA_MAX_CONNS`: Maximum number of connections in the read replica connection pool\n- `READ_REPLICA_MIN_CONNS`: Minimum number of connections to maintain in the read replica connection pool"},"300":{"title":"Limitations","pageTitle":"Read Replicas","pageRoute":"hatchet://docs/self-hosting/read-replicas","content":"- Replication lag may result in slightly stale or missing data being returned from read operations\n- The read replica is only utilized by analytical queries (to load workflow runs, task runs, and metrics in the UI)"},"301":{"title":"Trace Sampling","pageTitle":"Trace Sampling","pageRoute":"hatchet://docs/self-hosting/sampling","content":"# Trace Sampling\n\nFor a very high-volume setup, it may be desirable to sample results for the dashboard for the purpose of limiting the amount of data stored in the Hatchet database. **This does not impact the behavior of the Hatchet engine and all tasks will still be processed.** This can be done by setting the following environment variables:\n\n```bash\nSERVER_SAMPLING_ENABLED=t\nSERVER_SAMPLING_RATE=0.1 # only 10% of results will be sampled\n```\n\nSampling is done at the workflow run level, so all tasks within the same workflow will be sampled, along with all of their events. Sampling has the following limitations:\n\n- Parent tasks which spawn child tasks are not guaranteed to be sampled, even if their children are. This means that the child task may be shown in the dashboard without a corresponding parent task, and vice versa.\n- There is no way to configure sampling to ensure that failure events are sampled.\n- Only tasks which are sampled can be cancelled or replayed via the REST APIs: do not use this feature if dependent on programmatic cancellations and replays."},"302":{"title":"SMTP Server","pageTitle":"SMTP Server","pageRoute":"hatchet://docs/self-hosting/smtp-server","content":"# SMTP Server\n\nConfigure email delivery for tenant invites and Hatchet alerts using any standard SMTP provider (Gmail, SendGrid, AWS SES, etc)."},"303":{"title":"Prerequisites","pageTitle":"SMTP Server","pageRoute":"hatchet://docs/self-hosting/smtp-server","content":"- An SMTP provider that supports [PLAIN](https://datatracker.ietf.org/doc/html/rfc4616/) authentication with a username and password."},"304":{"title":"Configuration","pageTitle":"SMTP Server","pageRoute":"hatchet://docs/self-hosting/smtp-server","content":"Set the following environment variables:\n\n```bash\n# Enable SMTP\nexport SERVER_EMAIL_KIND=smtp\nexport SERVER_EMAIL_SMTP_ENABLED=true\n\n# Connection Settings\nexport SERVER_EMAIL_SMTP_SERVER_ADDR=smtp.gmail.com:587            # Host and port\nexport SERVER_EMAIL_SMTP_AUTH_USERNAME=your-email@yourdomain.com   # Username or API Key ID\nexport SERVER_EMAIL_SMTP_AUTH_PASSWORD=your-password               # Password or API Secret Key\n\n# Sender Identity\nexport SERVER_EMAIL_SMTP_FROM_EMAIL=noreply@yourdomain.com         # Sender email address\nexport SERVER_EMAIL_SMTP_SUPPORT_EMAIL=support@yourdomain.com      # Support contact email\nexport SERVER_EMAIL_SMTP_FROM_NAME=\"Hatchet\"                       # (Optional) Display name\n```"},"305":{"title":"Provider Reference","pageTitle":"SMTP Server","pageRoute":"hatchet://docs/self-hosting/smtp-server","content":"Common configuration values for major providers:\n\nProvider, Server Address, Username, Password\n\n**[Gmail](https://support.google.com/mail/answer/185833?hl=en)**, `smtp.gmail.com:587`, Your Email, [App Password](https://myaccount.google.com/apppasswords)\n**[SendGrid](https://docs.sendgrid.com/for-developers/sending-email/integrating-with-the-smtp-api)**, `smtp.sendgrid.net:587`, `apikey`, Your API Key\n**[AWS SES](https://docs.aws.amazon.com/ses/latest/dg/send-email-smtp.html)**, `email-smtp.us-east-1.amazonaws.com:587`, IAM Username, IAM Secret\n**[Outlook](https://support.microsoft.com/en-us/office/pop-imap-and-smtp-settings-8361e398-8af4-4e97-b147-6c6c4ac95353)**, `smtp.office365.com:587`, Your Email, Your Password\n\n> **Info:** To request another provider or SMTP authentication protocol, open a feature\n>   request on\n>   [GitHub](https://github.com/hatchet-dev/hatchet/issues/new?template=feature_request.md)."},"306":{"title":"Client","pageTitle":"Client","pageRoute":"hatchet://docs/reference/python/client","content":"# Hatchet Python SDK Reference\n\nThis is the Python SDK reference, documenting methods available for interacting with Hatchet resources. Check out the [user guide](../../home) for an introduction for getting your first tasks running."},"307":{"title":"The Hatchet Python Client","pageTitle":"Client","pageRoute":"hatchet://docs/reference/python/client","content":"Main client for interacting with the Hatchet SDK.\n\nThis class provides access to various client interfaces and utility methods for working with Hatchet workers, workflows, tasks, and our various feature clients.\n\nMethods:\n\nName, Description\n\n`worker`, Create a Hatchet worker on which to run workflows.\n`workflow`, Define a Hatchet workflow, which can then declare `task`s and be `run`, `schedule`d, and so on.\n`task`, A decorator to transform a function into a standalone Hatchet task that runs as part of a workflow.\n`durable_task`, A decorator to transform a function into a standalone Hatchet _durable_ task that runs as part of a workflow.\n\n### Attributes\n\n#### `cron`\n\nThe cron client is a client for managing cron workflows within Hatchet.\n\n#### `event`\n\nThe event client, which you can use to push events to Hatchet.\n\n#### `logs`\n\nThe logs client is a client for interacting with Hatchet's logs API.\n\n#### `metrics`\n\nThe metrics client is a client for reading metrics out of Hatchet's metrics API.\n\n#### `rate_limits`\n\nThe rate limits client is a wrapper for Hatchet's gRPC API that makes it easier to work with rate limits in Hatchet.\n\n#### `runs`\n\nThe runs client is a client for interacting with task and workflow runs within Hatchet.\n\n#### `scheduled`\n\nThe scheduled client is a client for managing scheduled workflows within Hatchet.\n\n#### `workers`\n\nThe workers client is a client for managing workers programmatically within Hatchet.\n\n#### `workflows`\n\nThe workflows client is a client for managing workflows programmatically within Hatchet.\n\nNote that workflows are the declaration, _not_ the individual runs. If you're looking for runs, use the `RunsClient` instead.\n\n#### `tenant_id`\n\nThe tenant id you're operating in.\n\n#### `namespace`\n\nThe current namespace you're interacting with.\n\n### Functions\n\n#### `worker`\n\nCreate a Hatchet worker on which to run workflows.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str`, The name of the worker., _required_\n`slots`, `int \\, None`, slot count for standard tasks., `None`\n`durable_slots`, `int \\, None`, slot count for durable tasks., `None`\n`labels`, `dict[str, str \\, int] \\, None`, A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels., `None`\n`workflows`, `list[BaseWorkflow[Any]] \\, None`, A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them., `None`\n`lifespan`, `LifespanFn \\, None`, A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks., `None`\n\nReturns:\n\nType, Description\n\n`Worker`, The created `Worker` object, which exposes an instance method `start` which can be called to start the worker.\n\n#### `workflow`\n\nDefine a Hatchet workflow, which can then declare `task`s and be `run`, `schedule`d, and so on.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str`, The name of the workflow., _required_\n`description`, `str \\, None`, A description for the workflow, `None`\n`input_validator`, `type[TWorkflowInput] \\, None`, A Pydantic model to use as a validator for the `input` to the tasks in the workflow. If no validator is provided, defaults to an `EmptyModel` under the hood. The `EmptyModel` is a Pydantic model with no fields specified, and with the `extra` config option set to `\"allow\"`., `None`\n`on_events`, `list[str] \\, None`, A list of event triggers for the workflow - events which cause the workflow to be run., `None`\n`on_crons`, `list[str] \\, None`, A list of cron triggers for the workflow., `None`\n`version`, `str \\, None`, A version for the workflow, `None`\n`sticky`, `StickyStrategy \\, None`, A sticky strategy for the workflow, `None`\n`default_priority`, `int`, The priority of the workflow. Higher values will cause this workflow to have priority in scheduling over other, lower priority ones., `1`\n`concurrency`, `int \\, ConcurrencyExpression \\, list[ConcurrencyExpression] \\, None`, A concurrency object controlling the concurrency settings for this workflow. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n`task_defaults`, `TaskDefaults`, A `TaskDefaults` object controlling the default task settings for this workflow., `TaskDefaults()`\n`default_filters`, `list[DefaultFilter] \\, None`, A list of filters to create with the workflow is created. Note that this is a helper to allow you to create filters \"declaratively\" without needing to make a separate API call once the workflow is created to create them., `None`\n`default_additional_metadata`, `JSONSerializableMapping \\, None`, A dictionary of additional metadata to attach to each run of this workflow by default., `None`\n\nReturns:\n\nType, Description\n\n`Workflow[EmptyModel] \\, Workflow[TWorkflowInput]`, The created `Workflow` object, which can be used to declare tasks, run the workflow, and so on.\n\n#### `task`\n\nA decorator to transform a function into a standalone Hatchet task that runs as part of a workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str \\, None`, The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator., `None`\n`description`, `str \\, None`, An optional description for the task., `None`\n`input_validator`, `type[TWorkflowInput] \\, None`, A Pydantic model to use as a validator for the input to the task. If no validator is provided, defaults to an `EmptyModel`., `None`\n`on_events`, `list[str] \\, None`, A list of event triggers for the task - events which cause the task to be run., `None`\n`on_crons`, `list[str] \\, None`, A list of cron triggers for the task., `None`\n`version`, `str \\, None`, A version for the task., `None`\n`sticky`, `StickyStrategy \\, None`, A sticky strategy for the task., `None`\n`default_priority`, `int`, The priority of the task. Higher values will cause this task to have priority in scheduling., `1`\n`concurrency`, `int \\, ConcurrencyExpression \\, list[ConcurrencyExpression] \\, None`, A concurrency object controlling the concurrency settings for this task. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n`schedule_timeout`, `Duration`, The maximum time allowed for scheduling the task., `timedelta(minutes=5)`\n`execution_timeout`, `Duration`, The maximum time allowed for executing the task., `timedelta(seconds=60)`\n`retries`, `int`, The number of times to retry the task before failing., `0`\n`rate_limits`, `list[RateLimit] \\, None`, A list of rate limit configurations for the task., `None`\n`desired_worker_labels`, `dict[str, DesiredWorkerLabel] \\, None`, A dictionary of desired worker labels that determine to which worker the task should be assigned., `None`\n`backoff_factor`, `float \\, None`, The backoff factor for controlling exponential backoff in retries., `None`\n`backoff_max_seconds`, `int \\, None`, The maximum number of seconds to allow retries with exponential backoff to continue., `None`\n`default_filters`, `list[DefaultFilter] \\, None`, A list of filters to create with the task is created. Note that this is a helper to allow you to create filters \"declaratively\" without needing to make a separate API call once the task is created to create them., `None`\n`default_additional_metadata`, `JSONSerializableMapping \\, None`, A dictionary of additional metadata to attach to each run of this task by default., `None`\n\nReturns:\n\nType, Description\n\n`Callable[[Callable[Concatenate[EmptyModel, Context, P], R \\, CoroutineLike[R]]], Standalone[EmptyModel, R]] \\, Callable[[Callable[Concatenate[TWorkflowInput, Context, P], R \\, CoroutineLike[R]]], Standalone[TWorkflowInput, R]]`, A decorator which creates a `Standalone` task object.\n\n#### `durable_task`\n\nA decorator to transform a function into a standalone Hatchet _durable_ task that runs as part of a workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str \\, None`, The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator., `None`\n`description`, `str \\, None`, An optional description for the task., `None`\n`input_validator`, `type[TWorkflowInput] \\, None`, A Pydantic model to use as a validator for the input to the task. If no validator is provided, defaults to an `EmptyModel`., `None`\n`on_events`, `list[str] \\, None`, A list of event triggers for the task - events which cause the task to be run., `None`\n`on_crons`, `list[str] \\, None`, A list of cron triggers for the task., `None`\n`version`, `str \\, None`, A version for the task., `None`\n`sticky`, `StickyStrategy \\, None`, A sticky strategy for the task., `None`\n`default_priority`, `int`, The priority of the task. Higher values will cause this task to have priority in scheduling., `1`\n`concurrency`, `int \\, ConcurrencyExpression \\, list[ConcurrencyExpression] \\, None`, A concurrency object controlling the concurrency settings for this task. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n`schedule_timeout`, `Duration`, The maximum time allowed for scheduling the task., `timedelta(minutes=5)`\n`execution_timeout`, `Duration`, The maximum time allowed for executing the task., `timedelta(seconds=60)`\n`retries`, `int`, The number of times to retry the task before failing., `0`\n`rate_limits`, `list[RateLimit] \\, None`, A list of rate limit configurations for the task., `None`\n`desired_worker_labels`, `dict[str, DesiredWorkerLabel] \\, None`, A dictionary of desired worker labels that determine to which worker the task should be assigned., `None`\n`backoff_factor`, `float \\, None`, The backoff factor for controlling exponential backoff in retries., `None`\n`backoff_max_seconds`, `int \\, None`, The maximum number of seconds to allow retries with exponential backoff to continue., `None`\n`default_filters`, `list[DefaultFilter] \\, None`, A list of filters to create with the task is created. Note that this is a helper to allow you to create filters \"declaratively\" without needing to make a separate API call once the task is created to create them., `None`\n`default_additional_metadata`, `JSONSerializableMapping \\, None`, A dictionary of additional metadata to attach to each run of this task by default., `None`\n\nReturns:\n\nType, Description\n\n`Callable[[Callable[Concatenate[EmptyModel, DurableContext, P], R \\, CoroutineLike[R]]], Standalone[EmptyModel, R]] \\, Callable[[Callable[Concatenate[TWorkflowInput, DurableContext, P], R \\, CoroutineLike[R]]], Standalone[TWorkflowInput, R]]`, A decorator which creates a `Standalone` task object."},"308":{"title":"Context","pageTitle":"Context","pageRoute":"hatchet://docs/reference/python/context","content":"# Context\n\nThe Hatchet Context class provides helper methods and useful data to tasks at runtime. It is passed as the second argument to all tasks and durable tasks.\n\nThere are two types of context classes you'll encounter:\n\n- `Context` - The standard context for regular tasks with methods for logging, task output retrieval, cancellation, and more\n- `DurableContext` - An extended context for durable tasks that includes additional methods for durable execution like `aio_wait_for` and `aio_sleep_for`"},"309":{"title":"Context","pageTitle":"Context","pageRoute":"hatchet://docs/reference/python/context","content":"Methods:\n\nName, Description\n\n`was_skipped`, Check if a given task was skipped. You can read about skipping in [the docs](/v1/conditions#checking-if-a-task-was-skipped).\n`task_output`, Get the output of a parent task in a DAG.\n`cancel`, Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True.\n`aio_cancel`, Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True.\n`done`, Check if the current task run has been cancelled.\n`log`, Log a line to the Hatchet API. This will send the log line to the Hatchet API and return immediately.\n`release_slot`, Manually release the slot for the current step run to free up a slot on the worker. Note that this is an advanced feature and should be used with caution.\n`put_stream`, Put a stream event to the Hatchet API. This will send the data to the Hatchet API and return immediately. You can then subscribe to the stream from a separate consumer.\n`refresh_timeout`, Refresh the timeout for the current task run. You can read about refreshing timeouts in [the docs](../../home/timeouts#refreshing-timeouts).\n`fetch_task_run_error`, **DEPRECATED**: Use `get_task_run_error` instead.\n\n### Attributes\n\n#### `was_triggered_by_event`\n\nA property that indicates whether the workflow was triggered by an event.\n\nReturns:\n\nType, Description\n\n`bool`, True if the workflow was triggered by an event, False otherwise.\n\n#### `workflow_input`\n\nThe input to the workflow, as a dictionary. It's recommended to use the `input` parameter to the task (the first argument passed into the task at runtime) instead of this property.\n\nReturns:\n\nType, Description\n\n`JSONSerializableMapping`, The input to the workflow.\n\n#### `lifespan`\n\nThe worker lifespan, if it exists. You can read about lifespans in [the docs](../../home/lifespans).\n\n**Note: You'll need to cast the return type of this property to the type returned by your lifespan generator.**\n\n#### `workflow_run_id`\n\nThe id of the current workflow run.\n\nReturns:\n\nType, Description\n\n`str`, The id of the current workflow run.\n\n#### `retry_count`\n\nThe retry count of the current task run, which corresponds to the number of times the task has been retried.\n\nReturns:\n\nType, Description\n\n`int`, The retry count of the current task run.\n\n#### `attempt_number`\n\nThe attempt number of the current task run, which corresponds to the number of times the task has been attempted, including the initial attempt. This is one more than the retry count.\n\nReturns:\n\nType, Description\n\n`int`, The attempt number of the current task run.\n\n#### `additional_metadata`\n\nThe additional metadata sent with the current task run.\n\nReturns:\n\nType, Description\n\n`JSONSerializableMapping`, The additional metadata sent with the current task run, or None if no additional metadata was sent.\n\n#### `parent_workflow_run_id`\n\nThe parent workflow run id of the current task run, if it exists. This is useful for knowing which workflow run spawned this run as a child.\n\nReturns:\n\nType, Description\n\n`str \\, None`, The parent workflow run id of the current task run, or None if it does not exist.\n\n#### `priority`\n\nThe priority that the current task was run with.\n\nReturns:\n\nType, Description\n\n`int \\, None`, The priority of the current task run, or None if no priority was set.\n\n#### `workflow_id`\n\nThe id of the workflow that this task belongs to.\n\nReturns:\n\nType, Description\n\n`str \\, None`, The id of the workflow that this task belongs to.\n\n#### `workflow_version_id`\n\nThe id of the workflow version that this task belongs to.\n\nReturns:\n\nType, Description\n\n`str \\, None`, The id of the workflow version that this task belongs to.\n\n#### `task_run_errors`\n\nA helper intended to be used in an on-failure step to retrieve the errors that occurred in upstream task runs.\n\nReturns:\n\nType, Description\n\n`dict[str, str]`, A dictionary mapping task names to their error messages.\n\n### Functions\n\n#### `was_skipped`\n\nCheck if a given task was skipped. You can read about skipping in [the docs](/v1/conditions#checking-if-a-task-was-skipped).\n\nParameters:\n\nName, Type, Description, Default\n\n`task`, `Task[TWorkflowInput, R]`, The task to check the status of (skipped or not)., _required_\n\nReturns:\n\nType, Description\n\n`bool`, True if the task was skipped, False otherwise.\n\n#### `task_output`\n\nGet the output of a parent task in a DAG.\n\nParameters:\n\nName, Type, Description, Default\n\n`task`, `Task[TWorkflowInput, R]`, The task whose output you want to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`R`, The output of the parent task, validated against the task's validators.\n\nRaises:\n\nType, Description\n\n`ValueError`, If the task was skipped or if the step output for the task is not found.\n\n#### `cancel`\n\nCancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True.\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_cancel`\n\nCancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True.\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `done`\n\nCheck if the current task run has been cancelled.\n\nReturns:\n\nType, Description\n\n`bool`, True if the task run has been cancelled, False otherwise.\n\n#### `log`\n\nLog a line to the Hatchet API. This will send the log line to the Hatchet API and return immediately.\n\nParameters:\n\nName, Type, Description, Default\n\n`line`, `str \\, JSONSerializableMapping`, The line to log. Can be a string or a JSON serializable mapping., _required_\n`raise_on_error`, `bool`, If True, will raise an exception if the log fails. Defaults to False., `False`\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `release_slot`\n\nManually release the slot for the current step run to free up a slot on the worker. Note that this is an advanced feature and should be used with caution.\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `put_stream`\n\nPut a stream event to the Hatchet API. This will send the data to the Hatchet API and return immediately. You can then subscribe to the stream from a separate consumer.\n\nParameters:\n\nName, Type, Description, Default\n\n`data`, `str \\, bytes`, The data to send to the Hatchet API. Can be a string or bytes., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `refresh_timeout`\n\nRefresh the timeout for the current task run. You can read about refreshing timeouts in [the docs](../../home/timeouts#refreshing-timeouts).\n\nParameters:\n\nName, Type, Description, Default\n\n`increment_by`, `str \\, timedelta`, The amount of time to increment the timeout by. Can be a string (e.g. \"5m\") or a timedelta object., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `fetch_task_run_error`\n\n**DEPRECATED**: Use `get_task_run_error` instead.\n\nA helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run.\n\nParameters:\n\nName, Type, Description, Default\n\n`task`, `Task[TWorkflowInput, R]`, The task whose error you want to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`str \\, None`, The error message of the task run, or None if no error occurred."},"310":{"title":"DurableContext","pageTitle":"Context","pageRoute":"hatchet://docs/reference/python/context","content":"Bases: `Context`\n\nMethods:\n\nName, Description\n\n`aio_wait_for`, Durably wait for either a sleep or an event.\n`aio_sleep_for`, Lightweight wrapper for durable sleep. Allows for shorthand usage of `ctx.aio_wait_for` when specifying a sleep condition.\n\n### Functions\n\n#### `aio_wait_for`\n\nDurably wait for either a sleep or an event.\n\nParameters:\n\nName, Type, Description, Default\n\n`signal_key`, `str`, The key to use for the durable event. This is used to identify the event in the Hatchet API., _required_\n`*conditions`, `SleepCondition \\, UserEventCondition \\, OrGroup`, The conditions to wait for. Can be a SleepCondition or UserEventCondition., `()`\n\nReturns:\n\nType, Description\n\n`dict[str, Any]`, A dictionary containing the results of the wait.\n\nRaises:\n\nType, Description\n\n`ValueError`, If the durable event listener is not available.\n\n#### `aio_sleep_for`\n\nLightweight wrapper for durable sleep. Allows for shorthand usage of `ctx.aio_wait_for` when specifying a sleep condition.\n\nFor more complicated conditions, use `ctx.aio_wait_for` directly."},"311":{"title":"Cron","pageTitle":"Cron","pageRoute":"hatchet://docs/reference/python/feature-clients/cron","content":"# Cron Client\n\nBases: `BaseRestClient`\n\nThe cron client is a client for managing cron workflows within Hatchet.\n\nMethods:\n\nName, Description\n\n`aio_create`, Create a new workflow cron trigger.\n`aio_delete`, Delete a workflow cron trigger.\n`aio_get`, Retrieve a specific workflow cron trigger by ID.\n`aio_list`, Retrieve a list of all workflow cron triggers matching the criteria.\n`create`, Create a new workflow cron trigger.\n`delete`, Delete a workflow cron trigger.\n`get`, Retrieve a specific workflow cron trigger by ID.\n`list`, Retrieve a list of all workflow cron triggers matching the criteria.\n\n### Functions\n\n#### `aio_create`\n\nCreate a new workflow cron trigger.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str`, The name of the workflow to trigger., _required_\n`cron_name`, `str`, The name of the cron trigger., _required_\n`expression`, `str`, The cron expression defining the schedule., _required_\n`input`, `JSONSerializableMapping`, The input data for the cron workflow., _required_\n`additional_metadata`, `JSONSerializableMapping`, Additional metadata associated with the cron trigger., _required_\n`priority`, `int \\, None`, The priority of the cron workflow trigger., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, The created cron workflow instance.\n\n#### `aio_delete`\n\nDelete a workflow cron trigger.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_id`, `str`, The ID of the cron trigger to delete., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_get`\n\nRetrieve a specific workflow cron trigger by ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_id`, `str`, The cron trigger ID or CronWorkflows instance to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, The requested cron workflow instance.\n\n#### `aio_list`\n\nRetrieve a list of all workflow cron triggers matching the criteria.\n\nParameters:\n\nName, Type, Description, Default\n\n`offset`, `int \\, None`, The offset to start the list from., `None`\n`limit`, `int \\, None`, The maximum number of items to return., `None`\n`workflow_id`, `str \\, None`, The ID of the workflow to filter by., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Filter by additional metadata keys., `None`\n`order_by_field`, `CronWorkflowsOrderByField \\, None`, The field to order the list by., `None`\n`order_by_direction`, `WorkflowRunOrderByDirection \\, None`, The direction to order the list by., `None`\n`workflow_name`, `str \\, None`, The name of the workflow to filter by., `None`\n`cron_name`, `str \\, None`, The name of the cron trigger to filter by., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflowsList`, A list of cron workflows.\n\n#### `create`\n\nCreate a new workflow cron trigger.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str`, The name of the workflow to trigger., _required_\n`cron_name`, `str`, The name of the cron trigger., _required_\n`expression`, `str`, The cron expression defining the schedule., _required_\n`input`, `JSONSerializableMapping`, The input data for the cron workflow., _required_\n`additional_metadata`, `JSONSerializableMapping`, Additional metadata associated with the cron trigger., _required_\n`priority`, `int \\, None`, The priority of the cron workflow trigger., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, The created cron workflow instance.\n\n#### `delete`\n\nDelete a workflow cron trigger.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_id`, `str`, The ID of the cron trigger to delete., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `get`\n\nRetrieve a specific workflow cron trigger by ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_id`, `str`, The cron trigger ID or CronWorkflows instance to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, The requested cron workflow instance.\n\n#### `list`\n\nRetrieve a list of all workflow cron triggers matching the criteria.\n\nParameters:\n\nName, Type, Description, Default\n\n`offset`, `int \\, None`, The offset to start the list from., `None`\n`limit`, `int \\, None`, The maximum number of items to return., `None`\n`workflow_id`, `str \\, None`, The ID of the workflow to filter by., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Filter by additional metadata keys., `None`\n`order_by_field`, `CronWorkflowsOrderByField \\, None`, The field to order the list by., `None`\n`order_by_direction`, `WorkflowRunOrderByDirection \\, None`, The direction to order the list by., `None`\n`workflow_name`, `str \\, None`, The name of the workflow to filter by., `None`\n`cron_name`, `str \\, None`, The name of the cron trigger to filter by., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflowsList`, A list of cron workflows."},"312":{"title":"Filters","pageTitle":"Filters","pageRoute":"hatchet://docs/reference/python/feature-clients/filters","content":"# Filters Client\n\nBases: `BaseRestClient`\n\nThe filters client is a client for interacting with Hatchet's filters API.\n\nMethods:\n\nName, Description\n\n`aio_create`, Create a new filter.\n`aio_delete`, Delete a filter by its ID.\n`aio_get`, Get a filter by its ID.\n`aio_list`, List filters for a given tenant.\n`aio_update`, Update a filter by its ID.\n`create`, Create a new filter.\n`delete`, Delete a filter by its ID.\n`get`, Get a filter by its ID.\n`list`, List filters for a given tenant.\n`update`, Update a filter by its ID.\n\n### Functions\n\n#### `aio_create`\n\nCreate a new filter.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to associate with the filter., _required_\n`expression`, `str`, The expression to evaluate for the filter., _required_\n`scope`, `str`, The scope for the filter., _required_\n`payload`, `JSONSerializableMapping \\, None`, The payload to send with the filter., `None`\n\nReturns:\n\nType, Description\n\n`V1Filter`, The created filter.\n\n#### `aio_delete`\n\nDelete a filter by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`filter_id`, `str`, The ID of the filter to delete., _required_\n\nReturns:\n\nType, Description\n\n`V1Filter`, The deleted filter.\n\n#### `aio_get`\n\nGet a filter by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`filter_id`, `str`, The ID of the filter to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`V1Filter`, The filter with the specified ID.\n\n#### `aio_list`\n\nList filters for a given tenant.\n\nParameters:\n\nName, Type, Description, Default\n\n`limit`, `int \\, None`, The maximum number of filters to return., `None`\n`offset`, `int \\, None`, The number of filters to skip before starting to collect the result set., `None`\n`workflow_ids`, `list[str] \\, None`, A list of workflow IDs to filter by., `None`\n`scopes`, `list[str] \\, None`, A list of scopes to filter by., `None`\n\nReturns:\n\nType, Description\n\n`V1FilterList`, A list of filters matching the specified criteria.\n\n#### `aio_update`\n\nUpdate a filter by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`filter_id`, `str`, The ID of the filter to update., _required_\n`updates`, `V1UpdateFilterRequest`, The updates to apply to the filter., _required_\n\nReturns:\n\nType, Description\n\n`V1Filter`, The updated filter.\n\n#### `create`\n\nCreate a new filter.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to associate with the filter., _required_\n`expression`, `str`, The expression to evaluate for the filter., _required_\n`scope`, `str`, The scope for the filter., _required_\n`payload`, `JSONSerializableMapping \\, None`, The payload to send with the filter., `None`\n\nReturns:\n\nType, Description\n\n`V1Filter`, The created filter.\n\n#### `delete`\n\nDelete a filter by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`filter_id`, `str`, The ID of the filter to delete., _required_\n\nReturns:\n\nType, Description\n\n`V1Filter`, The deleted filter.\n\n#### `get`\n\nGet a filter by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`filter_id`, `str`, The ID of the filter to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`V1Filter`, The filter with the specified ID.\n\n#### `list`\n\nList filters for a given tenant.\n\nParameters:\n\nName, Type, Description, Default\n\n`limit`, `int \\, None`, The maximum number of filters to return., `None`\n`offset`, `int \\, None`, The number of filters to skip before starting to collect the result set., `None`\n`workflow_ids`, `list[str] \\, None`, A list of workflow IDs to filter by., `None`\n`scopes`, `list[str] \\, None`, A list of scopes to filter by., `None`\n\nReturns:\n\nType, Description\n\n`V1FilterList`, A list of filters matching the specified criteria.\n\n#### `update`\n\nUpdate a filter by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`filter_id`, `str`, The ID of the filter to update., _required_\n`updates`, `V1UpdateFilterRequest`, The updates to apply to the filter., _required_\n\nReturns:\n\nType, Description\n\n`V1Filter`, The updated filter."},"313":{"title":"Logs","pageTitle":"Logs","pageRoute":"hatchet://docs/reference/python/feature-clients/logs","content":"# Logs Client\n\nBases: `BaseRestClient`\n\nThe logs client is a client for interacting with Hatchet's logs API.\n\nMethods:\n\nName, Description\n\n`aio_list`, List log lines for a given task run.\n`list`, List log lines for a given task run.\n\n### Functions\n\n#### `aio_list`\n\nList log lines for a given task run.\n\nParameters:\n\nName, Type, Description, Default\n\n`task_run_id`, `str`, The ID of the task run to list logs for., _required_\n`limit`, `int`, Maximum number of log lines to return (default: 1000)., `1000`\n`since`, `datetime \\, None`, The start time to get logs for., `None`\n`until`, `datetime \\, None`, The end time to get logs for., `None`\n\nReturns:\n\nType, Description\n\n`V1LogLineList`, A list of log lines for the specified task run.\n\n#### `list`\n\nList log lines for a given task run.\n\nParameters:\n\nName, Type, Description, Default\n\n`task_run_id`, `str`, The ID of the task run to list logs for., _required_\n`limit`, `int`, Maximum number of log lines to return (default: 1000)., `1000`\n`since`, `datetime \\, None`, The start time to get logs for., `None`\n`until`, `datetime \\, None`, The end time to get logs for., `None`\n\nReturns:\n\nType, Description\n\n`V1LogLineList`, A list of log lines for the specified task run."},"314":{"title":"Metrics","pageTitle":"Metrics","pageRoute":"hatchet://docs/reference/python/feature-clients/metrics","content":"# Metrics Client\n\nBases: `BaseRestClient`\n\nThe metrics client is a client for reading metrics out of Hatchet's metrics API.\n\nMethods:\n\nName, Description\n\n`aio_get_queue_metrics`, Retrieve the current queue metrics for the tenant.\n`aio_get_task_metrics`, Retrieve task metrics, grouped by status (queued, running, completed, failed, cancelled).\n`aio_get_task_stats`, Get task statistics for the tenant.\n`aio_scrape_tenant_prometheus_metrics`, Scrape Prometheus metrics for the tenant. Returns the metrics in Prometheus text format.\n`get_queue_metrics`, Retrieve the current queue metrics for the tenant.\n`get_task_metrics`, Retrieve task metrics, grouped by status (queued, running, completed, failed, cancelled).\n`get_task_stats`, Get task statistics for the tenant.\n`scrape_tenant_prometheus_metrics`, Scrape Prometheus metrics for the tenant. Returns the metrics in Prometheus text format.\n\n### Functions\n\n#### `aio_get_queue_metrics`\n\nRetrieve the current queue metrics for the tenant.\n\nReturns:\n\nType, Description\n\n`dict[str, Any]`, The current queue metrics\n\n#### `aio_get_task_metrics`\n\nRetrieve task metrics, grouped by status (queued, running, completed, failed, cancelled).\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, Start time for the metrics query (defaults to the past day if unset), `None`\n`until`, `datetime \\, None`, End time for the metrics query, `None`\n`workflow_ids`, `list[str] \\, None`, List of workflow IDs to filter the metrics by, `None`\n`parent_task_external_id`, `str \\, None`, ID of the parent task to filter by (note that parent task here refers to the task that spawned this task as a child), `None`\n`triggering_event_external_id`, `str \\, None`, ID of the triggering event to filter by, `None`\n\nReturns:\n\nType, Description\n\n`TaskMetrics`, Task metrics\n\n#### `aio_get_task_stats`\n\nGet task statistics for the tenant.\n\nReturns:\n\nType, Description\n\n`dict[str, TaskStat]`, A dictionary mapping task names to their statistics.\n\n#### `aio_scrape_tenant_prometheus_metrics`\n\nScrape Prometheus metrics for the tenant. Returns the metrics in Prometheus text format.\n\nReturns:\n\nType, Description\n\n`str`, The metrics, returned in Prometheus text format\n\n#### `get_queue_metrics`\n\nRetrieve the current queue metrics for the tenant.\n\nReturns:\n\nType, Description\n\n`dict[str, Any]`, The current queue metrics\n\n#### `get_task_metrics`\n\nRetrieve task metrics, grouped by status (queued, running, completed, failed, cancelled).\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, Start time for the metrics query (defaults to the past day if unset), `None`\n`until`, `datetime \\, None`, End time for the metrics query, `None`\n`workflow_ids`, `list[str] \\, None`, List of workflow IDs to filter the metrics by, `None`\n`parent_task_external_id`, `str \\, None`, ID of the parent task to filter by (note that parent task here refers to the task that spawned this task as a child), `None`\n`triggering_event_external_id`, `str \\, None`, ID of the triggering event to filter by, `None`\n\nReturns:\n\nType, Description\n\n`TaskMetrics`, Task metrics\n\n#### `get_task_stats`\n\nGet task statistics for the tenant.\n\nReturns:\n\nType, Description\n\n`dict[str, TaskStat]`, A dictionary mapping task names to their statistics.\n\n#### `scrape_tenant_prometheus_metrics`\n\nScrape Prometheus metrics for the tenant. Returns the metrics in Prometheus text format.\n\nReturns:\n\nType, Description\n\n`str`, The metrics, returned in Prometheus text format"},"315":{"title":"Rate Limits","pageTitle":"Rate Limits","pageRoute":"hatchet://docs/reference/python/feature-clients/rate_limits","content":"# Rate Limits Client\n\nBases: `BaseRestClient`\n\nThe rate limits client is a wrapper for Hatchet's gRPC API that makes it easier to work with rate limits in Hatchet.\n\nMethods:\n\nName, Description\n\n`aio_put`, Put a rate limit for a given key.\n`put`, Put a rate limit for a given key.\n\n### Functions\n\n#### `aio_put`\n\nPut a rate limit for a given key.\n\nParameters:\n\nName, Type, Description, Default\n\n`key`, `str`, The key to set the rate limit for., _required_\n`limit`, `int`, The rate limit to set., _required_\n`duration`, `RateLimitDuration`, The duration of the rate limit., `SECOND`\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `put`\n\nPut a rate limit for a given key.\n\nParameters:\n\nName, Type, Description, Default\n\n`key`, `str`, The key to set the rate limit for., _required_\n`limit`, `int`, The rate limit to set., _required_\n`duration`, `RateLimitDuration`, The duration of the rate limit., `SECOND`\n\nReturns:\n\nType, Description\n\n`None`, None"},"316":{"title":"Runs","pageTitle":"Runs","pageRoute":"hatchet://docs/reference/python/feature-clients/runs","content":"# Runs Client\n\nBases: `BaseRestClient`\n\nThe runs client is a client for interacting with task and workflow runs within Hatchet.\n\nMethods:\n\nName, Description\n\n`get`, Get workflow run details for a given workflow run ID.\n`aio_get`, Get workflow run details for a given workflow run ID.\n`get_status`, Get workflow run status for a given workflow run ID.\n`aio_get_status`, Get workflow run status for a given workflow run ID.\n`list`, List task runs according to a set of filters.\n`aio_list`, List task runs according to a set of filters.\n`create`, Trigger a new workflow run.\n`aio_create`, Trigger a new workflow run.\n`replay`, Replay a task or workflow run.\n`aio_replay`, Replay a task or workflow run.\n`bulk_replay`, Replay task or workflow runs in bulk, according to a set of filters.\n`aio_bulk_replay`, Replay task or workflow runs in bulk, according to a set of filters.\n`cancel`, Cancel a task or workflow run.\n`aio_cancel`, Cancel a task or workflow run.\n`bulk_cancel`, Cancel task or workflow runs in bulk, according to a set of filters.\n`aio_bulk_cancel`, Cancel task or workflow runs in bulk, according to a set of filters.\n`get_result`, Get the result of a workflow run by its external ID.\n`aio_get_result`, Get the result of a workflow run by its external ID.\n`get_run_ref`, Get a reference to a workflow run.\n`get_task_run`, Get task run details for a given task run ID.\n`aio_get_task_run`, Get task run details for a given task run ID.\n`bulk_cancel_by_filters_with_pagination`, Cancel runs matching the specified filters in chunks.\n`bulk_replay_by_filters_with_pagination`, Replay runs matching the specified filters in chunks.\n`aio_bulk_cancel_by_filters_with_pagination`, Cancel runs matching the specified filters in chunks.\n`aio_bulk_replay_by_filters_with_pagination`, Replay runs matching the specified filters in chunks.\n`subscribe_to_stream`\n\n### Functions\n\n#### `get`\n\nGet workflow run details for a given workflow run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_run_id`, `str`, The ID of the workflow run to retrieve details for., _required_\n\nReturns:\n\nType, Description\n\n`V1WorkflowRunDetails`, Workflow run details for the specified workflow run ID.\n\n#### `aio_get`\n\nGet workflow run details for a given workflow run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_run_id`, `str`, The ID of the workflow run to retrieve details for., _required_\n\nReturns:\n\nType, Description\n\n`V1WorkflowRunDetails`, Workflow run details for the specified workflow run ID.\n\n#### `get_status`\n\nGet workflow run status for a given workflow run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_run_id`, `str`, The ID of the workflow run to retrieve details for., _required_\n\nReturns:\n\nType, Description\n\n`V1TaskStatus`, The task status\n\n#### `aio_get_status`\n\nGet workflow run status for a given workflow run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_run_id`, `str`, The ID of the workflow run to retrieve details for., _required_\n\nReturns:\n\nType, Description\n\n`V1TaskStatus`, The task status\n\n#### `list`\n\nList task runs according to a set of filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, The start time for filtering task runs., `None`\n`only_tasks`, `bool`, Whether to only list task runs., `False`\n`offset`, `int \\, None`, The offset for pagination., `None`\n`limit`, `int \\, None`, The maximum number of task runs to return., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses to filter task runs by., `None`\n`until`, `datetime \\, None`, The end time for filtering task runs., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata to filter task runs by., `None`\n`workflow_ids`, `list[str] \\, None`, The workflow IDs to filter task runs by., `None`\n`worker_id`, `str \\, None`, The worker ID to filter task runs by., `None`\n`parent_task_external_id`, `str \\, None`, The parent task external ID to filter task runs by., `None`\n`triggering_event_external_id`, `str \\, None`, The event id that triggered the task run., `None`\n`include_payloads`, `bool`, Whether to include payloads in the response., `True`\n\nReturns:\n\nType, Description\n\n`V1TaskSummaryList`, A list of task runs matching the specified filters.\n\n#### `aio_list`\n\nList task runs according to a set of filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, The start time for filtering task runs., `None`\n`only_tasks`, `bool`, Whether to only list task runs., `False`\n`offset`, `int \\, None`, The offset for pagination., `None`\n`limit`, `int \\, None`, The maximum number of task runs to return., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses to filter task runs by., `None`\n`until`, `datetime \\, None`, The end time for filtering task runs., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata to filter task runs by., `None`\n`workflow_ids`, `list[str] \\, None`, The workflow IDs to filter task runs by., `None`\n`worker_id`, `str \\, None`, The worker ID to filter task runs by., `None`\n`parent_task_external_id`, `str \\, None`, The parent task external ID to filter task runs by., `None`\n`triggering_event_external_id`, `str \\, None`, The event id that triggered the task run., `None`\n`include_payloads`, `bool`, Whether to include payloads in the response., `True`\n\nReturns:\n\nType, Description\n\n`V1TaskSummaryList`, A list of task runs matching the specified filters.\n\n#### `create`\n\nTrigger a new workflow run.\n\nIMPORTANT: It's preferable to use `Workflow.run` (and similar) to trigger workflows if possible. This method is intended to be an escape hatch. For more details, see [the documentation](../../../sdk/python/runnables#workflow).\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str`, The name of the workflow to trigger., _required_\n`input`, `JSONSerializableMapping`, The input data for the workflow run., _required_\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata associated with the workflow run., `None`\n`priority`, `int \\, None`, The priority of the workflow run., `None`\n\nReturns:\n\nType, Description\n\n`V1WorkflowRunDetails`, The details of the triggered workflow run.\n\n#### `aio_create`\n\nTrigger a new workflow run.\n\nIMPORTANT: It's preferable to use `Workflow.run` (and similar) to trigger workflows if possible. This method is intended to be an escape hatch. For more details, see [the documentation](../../../sdk/python/runnables#workflow).\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str`, The name of the workflow to trigger., _required_\n`input`, `JSONSerializableMapping`, The input data for the workflow run., _required_\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata associated with the workflow run., `None`\n`priority`, `int \\, None`, The priority of the workflow run., `None`\n\nReturns:\n\nType, Description\n\n`V1WorkflowRunDetails`, The details of the triggered workflow run.\n\n#### `replay`\n\nReplay a task or workflow run.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The external ID of the task or workflow run to replay., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_replay`\n\nReplay a task or workflow run.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The external ID of the task or workflow run to replay., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `bulk_replay`\n\nReplay task or workflow runs in bulk, according to a set of filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`opts`, `BulkCancelReplayOpts`, Options for bulk replay, including filters and IDs., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_bulk_replay`\n\nReplay task or workflow runs in bulk, according to a set of filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`opts`, `BulkCancelReplayOpts`, Options for bulk replay, including filters and IDs., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `cancel`\n\nCancel a task or workflow run.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The external ID of the task or workflow run to cancel., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_cancel`\n\nCancel a task or workflow run.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The external ID of the task or workflow run to cancel., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `bulk_cancel`\n\nCancel task or workflow runs in bulk, according to a set of filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`opts`, `BulkCancelReplayOpts`, Options for bulk cancel, including filters and IDs., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_bulk_cancel`\n\nCancel task or workflow runs in bulk, according to a set of filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`opts`, `BulkCancelReplayOpts`, Options for bulk cancel, including filters and IDs., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `get_result`\n\nGet the result of a workflow run by its external ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The external ID of the workflow run to retrieve the result for., _required_\n\nReturns:\n\nType, Description\n\n`JSONSerializableMapping`, The result of the workflow run.\n\n#### `aio_get_result`\n\nGet the result of a workflow run by its external ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The external ID of the workflow run to retrieve the result for., _required_\n\nReturns:\n\nType, Description\n\n`JSONSerializableMapping`, The result of the workflow run.\n\n#### `get_run_ref`\n\nGet a reference to a workflow run.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_run_id`, `str`, The ID of the workflow run to get a reference to., _required_\n\nReturns:\n\nType, Description\n\n`WorkflowRunRef`, A reference to the specified workflow run.\n\n#### `get_task_run`\n\nGet task run details for a given task run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`task_run_id`, `str`, The ID of the task run to retrieve details for., _required_\n\nReturns:\n\nType, Description\n\n`V1TaskSummary`, Task run details for the specified task run ID.\n\n#### `aio_get_task_run`\n\nGet task run details for a given task run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`task_run_id`, `str`, The ID of the task run to retrieve details for., _required_\n\nReturns:\n\nType, Description\n\n`V1TaskSummary`, Task run details for the specified task run ID.\n\n#### `bulk_cancel_by_filters_with_pagination`\n\nCancel runs matching the specified filters in chunks.\n\nThe motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.\n\nThis method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.\n\nParameters:\n\nName, Type, Description, Default\n\n`sleep_time`, `int`, The time to sleep between processing chunks, in seconds., `3`\n`chunk_size`, `int`, The maximum number of run IDs to process in each chunk., `500`\n`since`, `datetime \\, None`, The start time for filtering runs., `None`\n`until`, `datetime \\, None`, The end time for filtering runs., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses to filter runs by., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata to filter runs by., `None`\n`workflow_ids`, `list[str] \\, None`, The workflow IDs to filter runs by., `None`\n\n#### `bulk_replay_by_filters_with_pagination`\n\nReplay runs matching the specified filters in chunks.\n\nThe motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.\n\nThis method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.\n\nParameters:\n\nName, Type, Description, Default\n\n`sleep_time`, `int`, The time to sleep between processing chunks, in seconds., `3`\n`chunk_size`, `int`, The maximum number of run IDs to process in each chunk., `500`\n`since`, `datetime \\, None`, The start time for filtering runs., `None`\n`until`, `datetime \\, None`, The end time for filtering runs., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses to filter runs by., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata to filter runs by., `None`\n`workflow_ids`, `list[str] \\, None`, The workflow IDs to filter runs by., `None`\n\n#### `aio_bulk_cancel_by_filters_with_pagination`\n\nCancel runs matching the specified filters in chunks.\n\nThe motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.\n\nThis method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.\n\nParameters:\n\nName, Type, Description, Default\n\n`sleep_time`, `int`, The time to sleep between processing chunks, in seconds., `3`\n`chunk_size`, `int`, The maximum number of run IDs to process in each chunk., `500`\n`since`, `datetime \\, None`, The start time for filtering runs., `None`\n`until`, `datetime \\, None`, The end time for filtering runs., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses to filter runs by., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata to filter runs by., `None`\n`workflow_ids`, `list[str] \\, None`, The workflow IDs to filter runs by., `None`\n\n#### `aio_bulk_replay_by_filters_with_pagination`\n\nReplay runs matching the specified filters in chunks.\n\nThe motivation for this method is to provide an easy way to perform bulk operations by filters over a larger number of runs than the API would normally be able to handle, with automatic pagination and chunking to help limit the pressure on the API.\n\nThis method first pulls the IDs of the runs from the API, and then feeds them back to the API in chunks.\n\nParameters:\n\nName, Type, Description, Default\n\n`sleep_time`, `int`, The time to sleep between processing chunks, in seconds., `3`\n`chunk_size`, `int`, The maximum number of run IDs to process in each chunk., `500`\n`since`, `datetime \\, None`, The start time for filtering runs., `None`\n`until`, `datetime \\, None`, The end time for filtering runs., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses to filter runs by., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata to filter runs by., `None`\n`workflow_ids`, `list[str] \\, None`, The workflow IDs to filter runs by., `None`\n\n#### `subscribe_to_stream`"},"317":{"title":"Scheduled","pageTitle":"Scheduled","pageRoute":"hatchet://docs/reference/python/feature-clients/scheduled","content":"# Scheduled Client\n\nBases: `BaseRestClient`\n\nThe scheduled client is a client for managing scheduled workflows within Hatchet.\n\nMethods:\n\nName, Description\n\n`aio_bulk_delete`, Bulk delete scheduled workflow runs.\n`aio_bulk_update`, Bulk reschedule scheduled workflow runs.\n`aio_create`, Creates a new scheduled workflow run.\n`aio_delete`, Deletes a scheduled workflow run by its ID.\n`aio_get`, Retrieves a specific scheduled workflow by scheduled run trigger ID.\n`aio_list`, Retrieves a list of scheduled workflows based on provided filters.\n`aio_update`, Reschedule a scheduled workflow run by its ID.\n`bulk_delete`, Bulk delete scheduled workflow runs.\n`bulk_update`, Bulk reschedule scheduled workflow runs.\n`create`, Creates a new scheduled workflow run.\n`delete`, Deletes a scheduled workflow run by its ID.\n`get`, Retrieves a specific scheduled workflow by scheduled run trigger ID.\n`list`, Retrieves a list of scheduled workflows based on provided filters.\n`update`, Reschedule a scheduled workflow run by its ID.\n\n### Functions\n\n#### `aio_bulk_delete`\n\nBulk delete scheduled workflow runs.\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_ids`, `list[str] \\, None`, Explicit list of scheduled workflow run IDs to delete., `None`\n`workflow_id`, `str \\, None`, Filter by workflow ID., `None`\n`parent_workflow_run_id`, `str \\, None`, Filter by parent workflow run ID., `None`\n`parent_step_run_id`, `str \\, None`, Filter by parent step run ID., `None`\n`statuses`, `list[ScheduledRunStatus] \\, None`, Filter by scheduled run statuses., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Filter by additional metadata key/value pairs., `None`\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflowsBulkDeleteResponse`, The bulk delete response containing deleted IDs and per-item errors.\n\nRaises:\n\nType, Description\n\n`ValueError`, If neither `scheduled_ids` nor any filter field is provided.\n\n#### `aio_bulk_update`\n\nBulk reschedule scheduled workflow runs.\n\nSee `bulk_update` for parameter details.\n\n#### `aio_create`\n\nCreates a new scheduled workflow run.\n\nIMPORTANT: It's preferable to use `Workflow.run` (and similar) to trigger workflows if possible. This method is intended to be an escape hatch. For more details, see [the documentation](../../../sdk/python/runnables#workflow).\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str`, The name of the workflow to schedule., _required_\n`trigger_at`, `datetime`, The datetime when the run should be triggered., _required_\n`input`, `JSONSerializableMapping`, The input data for the scheduled workflow., _required_\n`additional_metadata`, `JSONSerializableMapping`, Additional metadata associated with the future run as a key-value pair., _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflows`, The created scheduled workflow instance.\n\n#### `aio_delete`\n\nDeletes a scheduled workflow run by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_id`, `str`, The ID of the scheduled workflow run to delete., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_get`\n\nRetrieves a specific scheduled workflow by scheduled run trigger ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_id`, `str`, The scheduled workflow trigger ID to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflows`, The requested scheduled workflow instance.\n\n#### `aio_list`\n\nRetrieves a list of scheduled workflows based on provided filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`offset`, `int \\, None`, The offset to use in pagination., `None`\n`limit`, `int \\, None`, The maximum number of scheduled workflows to return., `None`\n`workflow_id`, `str \\, None`, The ID of the workflow to filter by., `None`\n`parent_workflow_run_id`, `str \\, None`, The ID of the parent workflow run to filter by., `None`\n`statuses`, `list[ScheduledRunStatus] \\, None`, A list of statuses to filter by., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata to filter by., `None`\n`order_by_field`, `ScheduledWorkflowsOrderByField \\, None`, The field to order the results by., `None`\n`order_by_direction`, `WorkflowRunOrderByDirection \\, None`, The direction to order the results by., `None`\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflowsList`, A list of scheduled workflows matching the provided filters.\n\n#### `aio_update`\n\nReschedule a scheduled workflow run by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_id`, `str`, The ID of the scheduled workflow run to reschedule., _required_\n`trigger_at`, `datetime`, The datetime when the run should be triggered., _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflows`, The updated scheduled workflow instance.\n\n#### `bulk_delete`\n\nBulk delete scheduled workflow runs.\n\nProvide either:\n\n- `scheduled_ids` (explicit list of scheduled run IDs), or\n- one or more filter fields (`workflow_id`, `parent_workflow_run_id`, `parent_step_run_id`, `statuses`, `additional_metadata`)\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_ids`, `list[str] \\, None`, Explicit list of scheduled workflow run IDs to delete., `None`\n`workflow_id`, `str \\, None`, Filter by workflow ID., `None`\n`parent_workflow_run_id`, `str \\, None`, Filter by parent workflow run ID., `None`\n`parent_step_run_id`, `str \\, None`, Filter by parent step run ID., `None`\n`statuses`, `list[ScheduledRunStatus] \\, None`, Filter by scheduled run statuses., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Filter by additional metadata key/value pairs., `None`\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflowsBulkDeleteResponse`, The bulk delete response containing deleted IDs and per-item errors.\n\nRaises:\n\nType, Description\n\n`ValueError`, If neither `scheduled_ids` nor any filter field is provided.\n\n#### `bulk_update`\n\nBulk reschedule scheduled workflow runs.\n\nParameters:\n\nName, Type, Description, Default\n\n`updates`, `list[ScheduledWorkflowsBulkUpdateItem] \\, list[tuple[str, datetime]]`, Either: - a list of `(scheduled_id, trigger_at)` tuples, or - a list of `ScheduledWorkflowsBulkUpdateItem` objects, _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflowsBulkUpdateResponse`, The bulk update response containing updated IDs and per-item errors.\n\n#### `create`\n\nCreates a new scheduled workflow run.\n\nIMPORTANT: It's preferable to use `Workflow.run` (and similar) to trigger workflows if possible. This method is intended to be an escape hatch. For more details, see [the documentation](../../../sdk/python/runnables#workflow).\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str`, The name of the workflow to schedule., _required_\n`trigger_at`, `datetime`, The datetime when the run should be triggered., _required_\n`input`, `JSONSerializableMapping`, The input data for the scheduled workflow., _required_\n`additional_metadata`, `JSONSerializableMapping`, Additional metadata associated with the future run as a key-value pair., _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflows`, The created scheduled workflow instance.\n\n#### `delete`\n\nDeletes a scheduled workflow run by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_id`, `str`, The ID of the scheduled workflow run to delete., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `get`\n\nRetrieves a specific scheduled workflow by scheduled run trigger ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_id`, `str`, The scheduled workflow trigger ID to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflows`, The requested scheduled workflow instance.\n\n#### `list`\n\nRetrieves a list of scheduled workflows based on provided filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`offset`, `int \\, None`, The offset to use in pagination., `None`\n`limit`, `int \\, None`, The maximum number of scheduled workflows to return., `None`\n`workflow_id`, `str \\, None`, The ID of the workflow to filter by., `None`\n`parent_workflow_run_id`, `str \\, None`, The ID of the parent workflow run to filter by., `None`\n`statuses`, `list[ScheduledRunStatus] \\, None`, A list of statuses to filter by., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata to filter by., `None`\n`order_by_field`, `ScheduledWorkflowsOrderByField \\, None`, The field to order the results by., `None`\n`order_by_direction`, `WorkflowRunOrderByDirection \\, None`, The direction to order the results by., `None`\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflowsList`, A list of scheduled workflows matching the provided filters.\n\n#### `update`\n\nReschedule a scheduled workflow run by its ID.\n\nNote: the server may reject rescheduling if the scheduled run has already\ntriggered, or if it was created via code definition (not via API).\n\nParameters:\n\nName, Type, Description, Default\n\n`scheduled_id`, `str`, The ID of the scheduled workflow run to reschedule., _required_\n`trigger_at`, `datetime`, The datetime when the run should be triggered., _required_\n\nReturns:\n\nType, Description\n\n`ScheduledWorkflows`, The updated scheduled workflow instance."},"318":{"title":"Webhooks","pageTitle":"Webhooks","pageRoute":"hatchet://docs/reference/python/feature-clients/webhooks","content":"# Webhooks Client\n\nBases: `BaseRestClient`\n\nThe webhooks client provides methods for managing incoming webhooks in Hatchet.\n\nWebhooks allow external systems to trigger Hatchet workflows by sending HTTP requests to dedicated endpoints. This enables real-time integration with third-party services like GitHub, Stripe, Slack, or any system that can send webhook events.\n\nMethods:\n\nName, Description\n\n`aio_create`, Create a new webhook.\n`aio_delete`, Delete a webhook by its name.\n`aio_get`, Get a webhook by its name.\n`aio_list`, List webhooks for a given tenant.\n`aio_update`, Update a webhook by its name.\n`create`, Create a new webhook.\n`delete`, Delete a webhook by its name.\n`get`, Get a webhook by its name.\n`list`, List webhooks for a given tenant.\n`update`, Update a webhook by its name.\n\n### Functions\n\n#### `aio_create`\n\nCreate a new webhook.\n\nParameters:\n\nName, Type, Description, Default\n\n`source_name`, `V1WebhookSourceName`, The source name identifying the external system sending webhook events., _required_\n`name`, `str`, The name of the webhook., _required_\n`event_key_expression`, `str`, A CEL expression used to extract the event key from the incoming payload., _required_\n`auth`, `V1WebhookBasicAuth \\, V1WebhookAPIKeyAuth \\, V1WebhookHMACAuth`, The authentication configuration for the webhook (basic auth, API key, or HMAC)., _required_\n`scope_expression`, `str \\, None`, An optional CEL expression used to extract the scope from the incoming payload., `None`\n`static_payload`, `dict[str, Any] \\, None`, An optional static payload to merge into every triggered event., `None`\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The created webhook.\n\n#### `aio_delete`\n\nDelete a webhook by its name.\n\nParameters:\n\nName, Type, Description, Default\n\n`webhook_name`, `str`, The name of the webhook to delete., _required_\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The deleted webhook.\n\n#### `aio_get`\n\nGet a webhook by its name.\n\nParameters:\n\nName, Type, Description, Default\n\n`webhook_name`, `str`, The name of the webhook to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The webhook with the specified name.\n\n#### `aio_list`\n\nList webhooks for a given tenant.\n\nParameters:\n\nName, Type, Description, Default\n\n`limit`, `int \\, None`, The maximum number of webhooks to return., `None`\n`offset`, `int \\, None`, The number of webhooks to skip before starting to collect the result set., `None`\n`webhook_names`, `list[str] \\, None`, A list of webhook names to filter by., `None`\n`source_names`, `list[V1WebhookSourceName] \\, None`, A list of source names to filter by., `None`\n\nReturns:\n\nType, Description\n\n`V1WebhookList`, A list of webhooks matching the specified criteria.\n\n#### `aio_update`\n\nUpdate a webhook by its name.\n\nParameters:\n\nName, Type, Description, Default\n\n`webhook_name`, `str`, The name of the webhook to update., _required_\n`event_key_expression`, `str \\, None`, An updated CEL expression used to extract the event key from the incoming payload., `None`\n`scope_expression`, `str \\, None`, An updated CEL expression used to extract the scope from the incoming payload., `None`\n`static_payload`, `dict[str, Any] \\, None`, An updated static payload to merge into every triggered event., `None`\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The updated webhook.\n\n#### `create`\n\nCreate a new webhook.\n\nParameters:\n\nName, Type, Description, Default\n\n`source_name`, `V1WebhookSourceName`, The source name identifying the external system sending webhook events., _required_\n`name`, `str`, The name of the webhook., _required_\n`event_key_expression`, `str`, A CEL expression used to extract the event key from the incoming payload., _required_\n`auth`, `V1WebhookBasicAuth \\, V1WebhookAPIKeyAuth \\, V1WebhookHMACAuth`, The authentication configuration for the webhook (basic auth, API key, or HMAC)., _required_\n`scope_expression`, `str \\, None`, An optional CEL expression used to extract the scope from the incoming payload., `None`\n`static_payload`, `dict[str, Any] \\, None`, An optional static payload to merge into every triggered event., `None`\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The created webhook.\n\n#### `delete`\n\nDelete a webhook by its name.\n\nParameters:\n\nName, Type, Description, Default\n\n`webhook_name`, `str`, The name of the webhook to delete., _required_\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The deleted webhook.\n\n#### `get`\n\nGet a webhook by its name.\n\nParameters:\n\nName, Type, Description, Default\n\n`webhook_name`, `str`, The name of the webhook to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The webhook with the specified name.\n\n#### `list`\n\nList webhooks for a given tenant.\n\nParameters:\n\nName, Type, Description, Default\n\n`limit`, `int \\, None`, The maximum number of webhooks to return., `None`\n`offset`, `int \\, None`, The number of webhooks to skip before starting to collect the result set., `None`\n`webhook_names`, `list[str] \\, None`, A list of webhook names to filter by., `None`\n`source_names`, `list[V1WebhookSourceName] \\, None`, A list of source names to filter by., `None`\n\nReturns:\n\nType, Description\n\n`V1WebhookList`, A list of webhooks matching the specified criteria.\n\n#### `update`\n\nUpdate a webhook by its name.\n\nParameters:\n\nName, Type, Description, Default\n\n`webhook_name`, `str`, The name of the webhook to update., _required_\n`event_key_expression`, `str \\, None`, An updated CEL expression used to extract the event key from the incoming payload., `None`\n`scope_expression`, `str \\, None`, An updated CEL expression used to extract the scope from the incoming payload., `None`\n`static_payload`, `dict[str, Any] \\, None`, An updated static payload to merge into every triggered event., `None`\n\nReturns:\n\nType, Description\n\n`V1Webhook`, The updated webhook."},"319":{"title":"Workers","pageTitle":"Workers","pageRoute":"hatchet://docs/reference/python/feature-clients/workers","content":"# Workers Client\n\nBases: `BaseRestClient`\n\nThe workers client is a client for managing workers programmatically within Hatchet.\n\nMethods:\n\nName, Description\n\n`aio_get`, Get a worker by its ID.\n`aio_list`, List all workers in the tenant determined by the client config.\n`aio_update`, Update a worker by its ID.\n`get`, Get a worker by its ID.\n`list`, List all workers in the tenant determined by the client config.\n`update`, Update a worker by its ID.\n\n### Functions\n\n#### `aio_get`\n\nGet a worker by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`worker_id`, `str`, The ID of the worker to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`Worker`, The worker.\n\n#### `aio_list`\n\nList all workers in the tenant determined by the client config.\n\nReturns:\n\nType, Description\n\n`WorkerList`, A list of workers.\n\n#### `aio_update`\n\nUpdate a worker by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`worker_id`, `str`, The ID of the worker to update., _required_\n`opts`, `UpdateWorkerRequest`, The update options., _required_\n\nReturns:\n\nType, Description\n\n`Worker`, The updated worker.\n\n#### `get`\n\nGet a worker by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`worker_id`, `str`, The ID of the worker to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`Worker`, The worker.\n\n#### `list`\n\nList all workers in the tenant determined by the client config.\n\nReturns:\n\nType, Description\n\n`WorkerList`, A list of workers.\n\n#### `update`\n\nUpdate a worker by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`worker_id`, `str`, The ID of the worker to update., _required_\n`opts`, `UpdateWorkerRequest`, The update options., _required_\n\nReturns:\n\nType, Description\n\n`Worker`, The updated worker."},"320":{"title":"Workflows","pageTitle":"Workflows","pageRoute":"hatchet://docs/reference/python/feature-clients/workflows","content":"# Workflows Client\n\nBases: `BaseRestClient`\n\nThe workflows client is a client for managing workflows programmatically within Hatchet.\n\nNote that workflows are the declaration, _not_ the individual runs. If you're looking for runs, use the `RunsClient` instead.\n\nMethods:\n\nName, Description\n\n`aio_delete`, Permanently delete a workflow.\n`aio_get`, Get a workflow by its ID.\n`aio_get_version`, Get a workflow version by the workflow ID and an optional version.\n`aio_list`, List all workflows in the tenant determined by the client config that match optional filters.\n`delete`, Permanently delete a workflow.\n`get`, Get a workflow by its ID.\n`get_version`, Get a workflow version by the workflow ID and an optional version.\n`list`, List all workflows in the tenant determined by the client config that match optional filters.\n\n### Functions\n\n#### `aio_delete`\n\nPermanently delete a workflow.\n\n**DANGEROUS: This will delete a workflow and all of its data**\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to delete., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `aio_get`\n\nGet a workflow by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`Workflow`, The workflow.\n\n#### `aio_get_version`\n\nGet a workflow version by the workflow ID and an optional version.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to retrieve the version for., _required_\n`version`, `str \\, None`, The version of the workflow to retrieve. If None, the latest version is returned., `None`\n\nReturns:\n\nType, Description\n\n`WorkflowVersion`, The workflow version.\n\n#### `aio_list`\n\nList all workflows in the tenant determined by the client config that match optional filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str \\, None`, The name of the workflow to filter by., `None`\n`limit`, `int \\, None`, The maximum number of items to return., `None`\n`offset`, `int \\, None`, The offset to start the list from., `None`\n\nReturns:\n\nType, Description\n\n`WorkflowList`, A list of workflows.\n\n#### `delete`\n\nPermanently delete a workflow.\n\n**DANGEROUS: This will delete a workflow and all of its data**\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to delete., _required_\n\nReturns:\n\nType, Description\n\n`None`, None\n\n#### `get`\n\nGet a workflow by its ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to retrieve., _required_\n\nReturns:\n\nType, Description\n\n`Workflow`, The workflow.\n\n#### `get_version`\n\nGet a workflow version by the workflow ID and an optional version.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_id`, `str`, The ID of the workflow to retrieve the version for., _required_\n`version`, `str \\, None`, The version of the workflow to retrieve. If None, the latest version is returned., `None`\n\nReturns:\n\nType, Description\n\n`WorkflowVersion`, The workflow version.\n\n#### `list`\n\nList all workflows in the tenant determined by the client config that match optional filters.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflow_name`, `str \\, None`, The name of the workflow to filter by., `None`\n`limit`, `int \\, None`, The maximum number of items to return., `None`\n`offset`, `int \\, None`, The offset to start the list from., `None`\n\nReturns:\n\nType, Description\n\n`WorkflowList`, A list of workflows."},"321":{"title":"Runnables","pageTitle":"Runnables","pageRoute":"hatchet://docs/reference/python/runnables","content":"# Runnables\n\n`Runnables` in the Hatchet SDK are things that can be run, namely tasks and workflows. The two main types of runnables you'll encounter are:\n\n- `Workflow`, which lets you define tasks and call all of the run, schedule, etc. methods\n- `Standalone`, which is a single task that's returned by `hatchet.task` and can be run, scheduled, etc."},"322":{"title":"Workflow","pageTitle":"Runnables","pageRoute":"hatchet://docs/reference/python/runnables","content":"Bases: `BaseWorkflow[TWorkflowInput]`\n\nA Hatchet workflow, which allows you to define tasks to be run and perform actions on the workflow.\n\nWorkflows in Hatchet represent coordinated units of work that can be triggered, scheduled, or run on a cron schedule. Each workflow can contain multiple tasks that can be arranged in dependencies (DAGs), have customized retry behavior, timeouts, concurrency controls, and more.\n\nExample:\n\n```python\nfrom pydantic import BaseModel\nfrom hatchet_sdk import Hatchet\n\nclass MyInput(BaseModel):\n    name: str\n\nhatchet = Hatchet()\nworkflow = hatchet.workflow(\"my-workflow\", input_type=MyInput)\n\n@workflow.task()\ndef greet(input, ctx):\n    return f\"Hello, {input.name}!\"\n\n# Run the workflow\nresult = workflow.run(MyInput(name=\"World\"))\n```\n\nWorkflows support various execution patterns including:\n\n- One-time execution with `run()` or `aio_run()`\n- Scheduled execution with `schedule()`\n- Cron-based recurring execution with `create_cron()`\n- Bulk operations with `run_many()`\n\nTasks within workflows can be defined with `@workflow.task()` or `@workflow.durable_task()` decorators and can be arranged into complex dependency patterns.\n\nMethods:\n\nName, Description\n\n`task`, A decorator to transform a function into a Hatchet task that runs as part of a workflow.\n`durable_task`, A decorator to transform a function into a durable Hatchet task that runs as part of a workflow.\n`on_failure_task`, A decorator to transform a function into a Hatchet on-failure task that runs as the last step in a workflow that had at least one task fail.\n`on_success_task`, A decorator to transform a function into a Hatchet on-success task that runs as the last step in a workflow that had all upstream tasks succeed.\n`run`, Run the workflow synchronously and wait for it to complete.\n`aio_run`, Run the workflow asynchronously and wait for it to complete.\n`run_no_wait`, Synchronously trigger a workflow run without waiting for it to complete.\n`aio_run_no_wait`, Asynchronously trigger a workflow run without waiting for it to complete.\n`run_many`, Run a workflow in bulk and wait for all runs to complete.\n`aio_run_many`, Run a workflow in bulk and wait for all runs to complete.\n`run_many_no_wait`, Run a workflow in bulk without waiting for all runs to complete.\n`aio_run_many_no_wait`, Run a workflow in bulk without waiting for all runs to complete.\n`schedule`, Schedule a workflow to run at a specific time.\n`aio_schedule`, Schedule a workflow to run at a specific time.\n`create_cron`, Create a cron job for the workflow.\n`aio_create_cron`, Create a cron job for the workflow.\n`create_bulk_run_item`, Create a bulk run item for the workflow. This is intended to be used in conjunction with the various `run_many` methods.\n`list_runs`, List runs of the workflow.\n`aio_list_runs`, List runs of the workflow.\n`create_filter`, Create a new filter.\n`aio_create_filter`, Create a new filter.\n\n### Attributes\n\n#### `name`\n\nThe (namespaced) name of the workflow.\n\n#### `tasks`\n\n#### `id`\n\n`cached`\n\nGet the ID of the workflow.\n\nReturns:\n\nType, Description\n\n`str`, The ID of the workflow.\n\nRaises:\n\nType, Description\n\n`ValueError`, If no workflow ID is found for the workflow name.\n\n### Functions\n\n#### `task`\n\nA decorator to transform a function into a Hatchet task that runs as part of a workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str \\, None`, The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator., `None`\n`schedule_timeout`, `Duration`, The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time., `timedelta(minutes=5)`\n`execution_timeout`, `Duration`, The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time., `timedelta(seconds=60)`\n`parents`, `list[Task[TWorkflowInput, Any]] \\, None`, A list of tasks that are parents of the task. Note: Parents must be defined before their children., `None`\n`retries`, `int`, The number of times to retry the task before failing., `0`\n`rate_limits`, `list[RateLimit] \\, None`, A list of rate limit configurations for the task., `None`\n`desired_worker_labels`, `dict[str, DesiredWorkerLabel] \\, None`, A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details., `None`\n`backoff_factor`, `float \\, None`, The backoff factor for controlling exponential backoff in retries., `None`\n`backoff_max_seconds`, `int \\, None`, The maximum number of seconds to allow retries with exponential backoff to continue., `None`\n`concurrency`, `int \\, list[ConcurrencyExpression] \\, None`, A list of concurrency expressions for the task. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n`wait_for`, `list[Condition \\, OrGroup] \\, None`, A list of conditions that must be met before the task can run., `None`\n`skip_if`, `list[Condition \\, OrGroup] \\, None`, A list of conditions that, if met, will cause the task to be skipped., `None`\n`cancel_if`, `list[Condition \\, OrGroup] \\, None`, A list of conditions that, if met, will cause the task to be canceled., `None`\n\nReturns:\n\nType, Description\n\n`Callable[[Callable[Concatenate[TWorkflowInput, Context, P], R \\, CoroutineLike[R]]], Task[TWorkflowInput, R]]`, A decorator which creates a `Task` object.\n\n#### `durable_task`\n\nA decorator to transform a function into a durable Hatchet task that runs as part of a workflow.\n\n**IMPORTANT:** This decorator creates a _durable_ task, which works using Hatchet's durable execution capabilities. This is an advanced feature of Hatchet.\n\nSee the Hatchet docs for more information on durable execution to decide if this is right for you.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str \\, None`, The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator., `None`\n`schedule_timeout`, `Duration`, The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time., `timedelta(minutes=5)`\n`execution_timeout`, `Duration`, The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time., `timedelta(seconds=60)`\n`parents`, `list[Task[TWorkflowInput, Any]] \\, None`, A list of tasks that are parents of the task. Note: Parents must be defined before their children., `None`\n`retries`, `int`, The number of times to retry the task before failing., `0`\n`rate_limits`, `list[RateLimit] \\, None`, A list of rate limit configurations for the task., `None`\n`desired_worker_labels`, `dict[str, DesiredWorkerLabel] \\, None`, A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details., `None`\n`backoff_factor`, `float \\, None`, The backoff factor for controlling exponential backoff in retries., `None`\n`backoff_max_seconds`, `int \\, None`, The maximum number of seconds to allow retries with exponential backoff to continue., `None`\n`concurrency`, `int \\, list[ConcurrencyExpression] \\, None`, A list of concurrency expressions for the task. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n`wait_for`, `list[Condition \\, OrGroup] \\, None`, A list of conditions that must be met before the task can run., `None`\n`skip_if`, `list[Condition \\, OrGroup] \\, None`, A list of conditions that, if met, will cause the task to be skipped., `None`\n`cancel_if`, `list[Condition \\, OrGroup] \\, None`, A list of conditions that, if met, will cause the task to be canceled., `None`\n\nReturns:\n\nType, Description\n\n`Callable[[Callable[Concatenate[TWorkflowInput, DurableContext, P], R \\, CoroutineLike[R]]], Task[TWorkflowInput, R]]`, A decorator which creates a `Task` object.\n\n#### `on_failure_task`\n\nA decorator to transform a function into a Hatchet on-failure task that runs as the last step in a workflow that had at least one task fail.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str \\, None`, The name of the on-failure task. If not specified, defaults to the name of the function being wrapped by the `on_failure_task` decorator., `None`\n`schedule_timeout`, `Duration`, The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time., `timedelta(minutes=5)`\n`execution_timeout`, `Duration`, The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time., `timedelta(seconds=60)`\n`retries`, `int`, The number of times to retry the on-failure task before failing., `0`\n`rate_limits`, `list[RateLimit] \\, None`, A list of rate limit configurations for the on-failure task., `None`\n`backoff_factor`, `float \\, None`, The backoff factor for controlling exponential backoff in retries., `None`\n`backoff_max_seconds`, `int \\, None`, The maximum number of seconds to allow retries with exponential backoff to continue., `None`\n`concurrency`, `int \\, list[ConcurrencyExpression] \\, None`, A list of concurrency expressions for the on-failure task. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n\nReturns:\n\nType, Description\n\n`Callable[[Callable[Concatenate[TWorkflowInput, Context, P], R \\, CoroutineLike[R]]], Task[TWorkflowInput, R]]`, A decorator which creates a `Task` object.\n\n#### `on_success_task`\n\nA decorator to transform a function into a Hatchet on-success task that runs as the last step in a workflow that had all upstream tasks succeed.\n\nParameters:\n\nName, Type, Description, Default\n\n`name`, `str \\, None`, The name of the on-success task. If not specified, defaults to the name of the function being wrapped by the `on_success_task` decorator., `None`\n`schedule_timeout`, `Duration`, The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time., `timedelta(minutes=5)`\n`execution_timeout`, `Duration`, The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time., `timedelta(seconds=60)`\n`retries`, `int`, The number of times to retry the on-success task before failing, `0`\n`rate_limits`, `list[RateLimit] \\, None`, A list of rate limit configurations for the on-success task., `None`\n`backoff_factor`, `float \\, None`, The backoff factor for controlling exponential backoff in retries., `None`\n`backoff_max_seconds`, `int \\, None`, The maximum number of seconds to allow retries with exponential backoff to continue., `None`\n`concurrency`, `int \\, list[ConcurrencyExpression] \\, None`, A list of concurrency expressions for the on-success task. If an integer is provided, it is treated as a constant concurrency limit with a `GROUP_ROUND_ROBIN` strategy, which means that only `N` runs of the task may execute at any given time., `None`\n\nReturns:\n\nType, Description\n\n`Callable[[Callable[Concatenate[TWorkflowInput, Context, P], R \\, CoroutineLike[R]]], Task[TWorkflowInput, R]]`, A decorator which creates a Task object.\n\n#### `run`\n\nRun the workflow synchronously and wait for it to complete.\n\nThis method triggers a workflow run, blocks until completion, and returns the final result.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow, must match the workflow's input type., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution like metadata and parent workflow ID., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`dict[str, Any]`, The result of the workflow execution as a dictionary.\n\n#### `aio_run`\n\nRun the workflow asynchronously and wait for it to complete.\n\nThis method triggers a workflow run, awaits until completion, and returns the final result.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow, must match the workflow's input type., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution like metadata and parent workflow ID., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`dict[str, Any]`, The result of the workflow execution as a dictionary.\n\n#### `run_no_wait`\n\nSynchronously trigger a workflow run without waiting for it to complete. This method is useful for starting a workflow run and immediately returning a reference to the run without blocking while the workflow runs.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowRunRef`, A `WorkflowRunRef` object representing the reference to the workflow run.\n\n#### `aio_run_no_wait`\n\nAsynchronously trigger a workflow run without waiting for it to complete. This method is useful for starting a workflow run and immediately returning a reference to the run without blocking while the workflow runs.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowRunRef`, A `WorkflowRunRef` object representing the reference to the workflow run.\n\n#### `run_many`\n\nRun a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n`return_exceptions`, `bool`, If `True`, exceptions will be returned as part of the results instead of raising them., `False`\n\nReturns:\n\nType, Description\n\n`list[dict[str, Any]] \\, list[dict[str, Any] \\, BaseException]`, A list of results for each workflow run.\n\n#### `aio_run_many`\n\nRun a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n`return_exceptions`, `bool`, If `True`, exceptions will be returned as part of the results instead of raising them., `False`\n\nReturns:\n\nType, Description\n\n`list[dict[str, Any]] \\, list[dict[str, Any] \\, BaseException]`, A list of results for each workflow run.\n\n#### `run_many_no_wait`\n\nRun a workflow in bulk without waiting for all runs to complete.\n\nThis method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n\nReturns:\n\nType, Description\n\n`list[WorkflowRunRef]`, A list of `WorkflowRunRef` objects, each representing a reference to a workflow run.\n\n#### `aio_run_many_no_wait`\n\nRun a workflow in bulk without waiting for all runs to complete.\n\nThis method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n\nReturns:\n\nType, Description\n\n`list[WorkflowRunRef]`, A list of `WorkflowRunRef` objects, each representing a reference to a workflow run.\n\n#### `schedule`\n\nSchedule a workflow to run at a specific time.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_at`, `datetime`, The time at which to schedule the workflow., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `ScheduleTriggerWorkflowOptions`, Additional options for workflow execution., `ScheduleTriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowVersion`, A `WorkflowVersion` object representing the scheduled workflow.\n\n#### `aio_schedule`\n\nSchedule a workflow to run at a specific time.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_at`, `datetime`, The time at which to schedule the workflow., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `ScheduleTriggerWorkflowOptions`, Additional options for workflow execution., `ScheduleTriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowVersion`, A `WorkflowVersion` object representing the scheduled workflow.\n\n#### `create_cron`\n\nCreate a cron job for the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_name`, `str`, The name of the cron job., _required_\n`expression`, `str`, The cron expression that defines the schedule for the cron job., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata for the cron job., `None`\n`priority`, `int \\, None`, The priority of the cron job. Must be between 1 and 3, inclusive., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, A `CronWorkflows` object representing the created cron job.\n\n#### `aio_create_cron`\n\nCreate a cron job for the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_name`, `str`, The name of the cron job., _required_\n`expression`, `str`, The cron expression that defines the schedule for the cron job., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata for the cron job., `None`\n`priority`, `int \\, None`, The priority of the cron job. Must be between 1 and 3, inclusive., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, A `CronWorkflows` object representing the created cron job.\n\n#### `create_bulk_run_item`\n\nCreate a bulk run item for the workflow. This is intended to be used in conjunction with the various `run_many` methods.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`key`, `str \\, None`, The key for the workflow run. This is used to identify the run in the bulk operation and for deduplication., `None`\n`options`, `TriggerWorkflowOptions`, Additional options for the workflow run., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowRunTriggerConfig`, A `WorkflowRunTriggerConfig` object that can be used to trigger the workflow run, which you then pass into the `run_many` methods.\n\n#### `list_runs`\n\nList runs of the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, The start time for the runs to be listed., `None`\n`until`, `datetime \\, None`, The end time for the runs to be listed., `None`\n`limit`, `int`, The maximum number of runs to be listed., `100`\n`offset`, `int \\, None`, The offset for pagination., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses of the runs to be listed., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata for filtering the runs., `None`\n`worker_id`, `str \\, None`, The ID of the worker that ran the tasks., `None`\n`parent_task_external_id`, `str \\, None`, The external ID of the parent task., `None`\n`only_tasks`, `bool`, Whether to list only task runs., `False`\n`triggering_event_external_id`, `str \\, None`, The event id that triggered the task run., `None`\n\nReturns:\n\nType, Description\n\n`list[V1TaskSummary]`, A list of `V1TaskSummary` objects representing the runs of the workflow.\n\n#### `aio_list_runs`\n\nList runs of the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, The start time for the runs to be listed., `None`\n`until`, `datetime \\, None`, The end time for the runs to be listed., `None`\n`limit`, `int`, The maximum number of runs to be listed., `100`\n`offset`, `int \\, None`, The offset for pagination., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses of the runs to be listed., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata for filtering the runs., `None`\n`worker_id`, `str \\, None`, The ID of the worker that ran the tasks., `None`\n`parent_task_external_id`, `str \\, None`, The external ID of the parent task., `None`\n`only_tasks`, `bool`, Whether to list only task runs., `False`\n`triggering_event_external_id`, `str \\, None`, The event id that triggered the task run., `None`\n\nReturns:\n\nType, Description\n\n`list[V1TaskSummary]`, A list of `V1TaskSummary` objects representing the runs of the workflow.\n\n#### `create_filter`\n\nCreate a new filter.\n\nParameters:\n\nName, Type, Description, Default\n\n`expression`, `str`, The expression to evaluate for the filter., _required_\n`scope`, `str`, The scope for the filter., _required_\n`payload`, `JSONSerializableMapping \\, None`, The payload to send with the filter., `None`\n\nReturns:\n\nType, Description\n\n`V1Filter`, The created filter.\n\n#### `aio_create_filter`\n\nCreate a new filter.\n\nParameters:\n\nName, Type, Description, Default\n\n`expression`, `str`, The expression to evaluate for the filter., _required_\n`scope`, `str`, The scope for the filter., _required_\n`payload`, `JSONSerializableMapping \\, None`, The payload to send with the filter., `None`\n\nReturns:\n\nType, Description\n\n`V1Filter`, The created filter."},"323":{"title":"Task","pageTitle":"Runnables","pageRoute":"hatchet://docs/reference/python/runnables","content":"Bases: `Generic[TWorkflowInput, R]`\n\nMethods:\n\nName, Description\n\n`mock_run`, Mimic the execution of a task. This method is intended to be used to unit test\n`aio_mock_run`, Mimic the execution of a task. This method is intended to be used to unit test\n\n### Functions\n\n#### `mock_run`\n\nMimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput \\, None`, The input to the task., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata to attach to the task., `None`\n`parent_outputs`, `dict[str, JSONSerializableMapping] \\, None`, Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={\"step_1\": {\"result\": \"Hello, world!\"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`., `None`\n`retry_count`, `int`, The number of times the task has been retried., `0`\n`lifespan`, `Any`, The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task., `None`\n`dependencies`, `dict[str, Any] \\, None`, Dependencies to be injected into the task. This is useful for tasks that have dependencies defined using `Depends`. **IMPORTANT**: You must pass the dependencies _directly_, **not** the `Depends` objects themselves. For example, if you have a task that has a dependency `config: Annotated[str, Depends(get_config)]`, you should pass `dependencies={\"config\": \"config_value\"}` to `aio_mock_run`., `None`\n\nReturns:\n\nType, Description\n\n`R`, The output of the task.\n\nRaises:\n\nType, Description\n\n`TypeError`, If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called.\n\n#### `aio_mock_run`\n\nMimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput \\, None`, The input to the task., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata to attach to the task., `None`\n`parent_outputs`, `dict[str, JSONSerializableMapping] \\, None`, Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={\"step_1\": {\"result\": \"Hello, world!\"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`., `None`\n`retry_count`, `int`, The number of times the task has been retried., `0`\n`lifespan`, `Any`, The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task., `None`\n`dependencies`, `dict[str, Any] \\, None`, Dependencies to be injected into the task. This is useful for tasks that have dependencies defined using `Depends`. **IMPORTANT**: You must pass the dependencies _directly_, **not** the `Depends` objects themselves. For example, if you have a task that has a dependency `config: Annotated[str, Depends(get_config)]`, you should pass `dependencies={\"config\": \"config_value\"}` to `aio_mock_run`., `None`\n\nReturns:\n\nType, Description\n\n`R`, The output of the task.\n\nRaises:\n\nType, Description\n\n`TypeError`, If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called."},"324":{"title":"Standalone","pageTitle":"Runnables","pageRoute":"hatchet://docs/reference/python/runnables","content":"Bases: `BaseWorkflow[TWorkflowInput]`, `Generic[TWorkflowInput, R]`\n\nMethods:\n\nName, Description\n\n`run`, Run the workflow synchronously and wait for it to complete.\n`aio_run`, Run the workflow asynchronously and wait for it to complete.\n`run_no_wait`, Trigger a workflow run without waiting for it to complete.\n`aio_run_no_wait`, Asynchronously trigger a workflow run without waiting for it to complete.\n`run_many`, Run a workflow in bulk and wait for all runs to complete.\n`aio_run_many`, Run a workflow in bulk and wait for all runs to complete.\n`run_many_no_wait`, Run a workflow in bulk without waiting for all runs to complete.\n`aio_run_many_no_wait`, Run a workflow in bulk without waiting for all runs to complete.\n`schedule`, Schedule a workflow to run at a specific time.\n`aio_schedule`, Schedule a workflow to run at a specific time.\n`create_cron`, Create a cron job for the workflow.\n`aio_create_cron`, Create a cron job for the workflow.\n`create_bulk_run_item`, Create a bulk run item for the workflow. This is intended to be used in conjunction with the various `run_many` methods.\n`list_runs`, List runs of the workflow.\n`aio_list_runs`, List runs of the workflow.\n`create_filter`, Create a new filter.\n`aio_create_filter`, Create a new filter.\n`delete`, Permanently delete the workflow.\n`aio_delete`, Permanently delete the workflow.\n`get_run_ref`, Get a reference to a task run by its run ID.\n`get_result`, Get the result of a task run by its run ID.\n`aio_get_result`, Get the result of a task run by its run ID.\n`mock_run`, Mimic the execution of a task. This method is intended to be used to unit test\n`aio_mock_run`, Mimic the execution of a task. This method is intended to be used to unit test\n\n### Functions\n\n#### `run`\n\nRun the workflow synchronously and wait for it to complete.\n\nThis method triggers a workflow run, blocks until completion, and returns the extracted result.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`R`, The extracted result of the workflow execution.\n\n#### `aio_run`\n\nRun the workflow asynchronously and wait for it to complete.\n\nThis method triggers a workflow run, awaits until completion, and returns the extracted result.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow, must match the workflow's input type., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution like metadata and parent workflow ID., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`R`, The extracted result of the workflow execution.\n\n#### `run_no_wait`\n\nTrigger a workflow run without waiting for it to complete.\n\nThis method triggers a workflow run and immediately returns a reference to the run without blocking while the workflow runs.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow, must match the workflow's input type., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution like metadata and parent workflow ID., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`TaskRunRef[TWorkflowInput, R]`, A `TaskRunRef` object representing the reference to the workflow run.\n\n#### `aio_run_no_wait`\n\nAsynchronously trigger a workflow run without waiting for it to complete. This method is useful for starting a workflow run and immediately returning a reference to the run without blocking while the workflow runs.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `TriggerWorkflowOptions`, Additional options for workflow execution., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`TaskRunRef[TWorkflowInput, R]`, A `TaskRunRef` object representing the reference to the workflow run.\n\n#### `run_many`\n\nRun a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n`return_exceptions`, `bool`, If `True`, exceptions will be returned as part of the results instead of raising them., `False`\n\nReturns:\n\nType, Description\n\n`list[R] \\, list[R \\, BaseException]`, A list of results for each workflow run.\n\n#### `aio_run_many`\n\nRun a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n`return_exceptions`, `bool`, If `True`, exceptions will be returned as part of the results instead of raising them., `False`\n\nReturns:\n\nType, Description\n\n`list[R] \\, list[R \\, BaseException]`, A list of results for each workflow run.\n\n#### `run_many_no_wait`\n\nRun a workflow in bulk without waiting for all runs to complete.\n\nThis method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n\nReturns:\n\nType, Description\n\n`list[TaskRunRef[TWorkflowInput, R]]`, A list of `WorkflowRunRef` objects, each representing a reference to a workflow run.\n\n#### `aio_run_many_no_wait`\n\nRun a workflow in bulk without waiting for all runs to complete.\n\nThis method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run.\n\nParameters:\n\nName, Type, Description, Default\n\n`workflows`, `list[WorkflowRunTriggerConfig]`, A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered., _required_\n\nReturns:\n\nType, Description\n\n`list[TaskRunRef[TWorkflowInput, R]]`, A list of `WorkflowRunRef` objects, each representing a reference to a workflow run.\n\n#### `schedule`\n\nSchedule a workflow to run at a specific time.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_at`, `datetime`, The time at which to schedule the workflow., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `ScheduleTriggerWorkflowOptions`, Additional options for workflow execution., `ScheduleTriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowVersion`, A `WorkflowVersion` object representing the scheduled workflow.\n\n#### `aio_schedule`\n\nSchedule a workflow to run at a specific time.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_at`, `datetime`, The time at which to schedule the workflow., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`options`, `ScheduleTriggerWorkflowOptions`, Additional options for workflow execution., `ScheduleTriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowVersion`, A `WorkflowVersion` object representing the scheduled workflow.\n\n#### `create_cron`\n\nCreate a cron job for the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_name`, `str`, The name of the cron job., _required_\n`expression`, `str`, The cron expression that defines the schedule for the cron job., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata for the cron job., `None`\n`priority`, `int \\, None`, The priority of the cron job. Must be between 1 and 3, inclusive., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, A `CronWorkflows` object representing the created cron job.\n\n#### `aio_create_cron`\n\nCreate a cron job for the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`cron_name`, `str`, The name of the cron job., _required_\n`expression`, `str`, The cron expression that defines the schedule for the cron job., _required_\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata for the cron job., `None`\n`priority`, `int \\, None`, The priority of the cron job. Must be between 1 and 3, inclusive., `None`\n\nReturns:\n\nType, Description\n\n`CronWorkflows`, A `CronWorkflows` object representing the created cron job.\n\n#### `create_bulk_run_item`\n\nCreate a bulk run item for the workflow. This is intended to be used in conjunction with the various `run_many` methods.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput`, The input data for the workflow., `cast(TWorkflowInput, EmptyModel())`\n`key`, `str \\, None`, The key for the workflow run. This is used to identify the run in the bulk operation and for deduplication., `None`\n`options`, `TriggerWorkflowOptions`, Additional options for the workflow run., `TriggerWorkflowOptions()`\n\nReturns:\n\nType, Description\n\n`WorkflowRunTriggerConfig`, A `WorkflowRunTriggerConfig` object that can be used to trigger the workflow run, which you then pass into the `run_many` methods.\n\n#### `list_runs`\n\nList runs of the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, The start time for the runs to be listed., `None`\n`until`, `datetime \\, None`, The end time for the runs to be listed., `None`\n`limit`, `int`, The maximum number of runs to be listed., `100`\n`offset`, `int \\, None`, The offset for pagination., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses of the runs to be listed., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata for filtering the runs., `None`\n`worker_id`, `str \\, None`, The ID of the worker that ran the tasks., `None`\n`parent_task_external_id`, `str \\, None`, The external ID of the parent task., `None`\n`only_tasks`, `bool`, Whether to list only task runs., `False`\n`triggering_event_external_id`, `str \\, None`, The event id that triggered the task run., `None`\n\nReturns:\n\nType, Description\n\n`list[V1TaskSummary]`, A list of `V1TaskSummary` objects representing the runs of the workflow.\n\n#### `aio_list_runs`\n\nList runs of the workflow.\n\nParameters:\n\nName, Type, Description, Default\n\n`since`, `datetime \\, None`, The start time for the runs to be listed., `None`\n`until`, `datetime \\, None`, The end time for the runs to be listed., `None`\n`limit`, `int`, The maximum number of runs to be listed., `100`\n`offset`, `int \\, None`, The offset for pagination., `None`\n`statuses`, `list[V1TaskStatus] \\, None`, The statuses of the runs to be listed., `None`\n`additional_metadata`, `dict[str, str] \\, None`, Additional metadata for filtering the runs., `None`\n`worker_id`, `str \\, None`, The ID of the worker that ran the tasks., `None`\n`parent_task_external_id`, `str \\, None`, The external ID of the parent task., `None`\n`only_tasks`, `bool`, Whether to list only task runs., `False`\n`triggering_event_external_id`, `str \\, None`, The event id that triggered the task run., `None`\n\nReturns:\n\nType, Description\n\n`list[V1TaskSummary]`, A list of `V1TaskSummary` objects representing the runs of the workflow.\n\n#### `create_filter`\n\nCreate a new filter.\n\nParameters:\n\nName, Type, Description, Default\n\n`expression`, `str`, The expression to evaluate for the filter., _required_\n`scope`, `str`, The scope for the filter., _required_\n`payload`, `JSONSerializableMapping \\, None`, The payload to send with the filter., `None`\n\nReturns:\n\nType, Description\n\n`V1Filter`, The created filter.\n\n#### `aio_create_filter`\n\nCreate a new filter.\n\nParameters:\n\nName, Type, Description, Default\n\n`expression`, `str`, The expression to evaluate for the filter., _required_\n`scope`, `str`, The scope for the filter., _required_\n`payload`, `JSONSerializableMapping \\, None`, The payload to send with the filter., `None`\n\nReturns:\n\nType, Description\n\n`V1Filter`, The created filter.\n\n#### `delete`\n\nPermanently delete the workflow.\n\n**DANGEROUS: This will delete a workflow and all of its data**\n\n#### `aio_delete`\n\nPermanently delete the workflow.\n\n**DANGEROUS: This will delete a workflow and all of its data**\n\n#### `get_run_ref`\n\nGet a reference to a task run by its run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The ID of the run to get the reference for., _required_\n\nReturns:\n\nType, Description\n\n`TaskRunRef[TWorkflowInput, R]`, A `TaskRunRef` object representing the reference to the task run.\n\n#### `get_result`\n\nGet the result of a task run by its run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The ID of the run to get the result for., _required_\n\nReturns:\n\nType, Description\n\n`R`, The result of the task run.\n\n#### `aio_get_result`\n\nGet the result of a task run by its run ID.\n\nParameters:\n\nName, Type, Description, Default\n\n`run_id`, `str`, The ID of the run to get the result for., _required_\n\nReturns:\n\nType, Description\n\n`R`, The result of the task run.\n\n#### `mock_run`\n\nMimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput \\, None`, The input to the task., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata to attach to the task., `None`\n`parent_outputs`, `dict[str, JSONSerializableMapping] \\, None`, Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={\"step_1\": {\"result\": \"Hello, world!\"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`., `None`\n`retry_count`, `int`, The number of times the task has been retried., `0`\n`lifespan`, `Any`, The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task., `None`\n`dependencies`, `dict[str, Any] \\, None`, Dependencies to be injected into the task. This is useful for tasks that have dependencies defined using `Depends`. **IMPORTANT**: You must pass the dependencies _directly_, **not** the `Depends` objects themselves. For example, if you have a task that has a dependency `config: Annotated[str, Depends(get_config)]`, you should pass `dependencies={\"config\": \"config_value\"}` to `aio_mock_run`., `None`\n\nReturns:\n\nType, Description\n\n`R`, The output of the task.\n\n#### `aio_mock_run`\n\nMimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks.\n\nParameters:\n\nName, Type, Description, Default\n\n`input`, `TWorkflowInput \\, None`, The input to the task., `None`\n`additional_metadata`, `JSONSerializableMapping \\, None`, Additional metadata to attach to the task., `None`\n`parent_outputs`, `dict[str, JSONSerializableMapping] \\, None`, Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={\"step_1\": {\"result\": \"Hello, world!\"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`., `None`\n`retry_count`, `int`, The number of times the task has been retried., `0`\n`lifespan`, `Any`, The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task., `None`\n`dependencies`, `dict[str, Any] \\, None`, Dependencies to be injected into the task. This is useful for tasks that have dependencies defined using `Depends`. **IMPORTANT**: You must pass the dependencies _directly_, **not** the `Depends` objects themselves. For example, if you have a task that has a dependency `config: Annotated[str, Depends(get_config)]`, you should pass `dependencies={\"config\": \"config_value\"}` to `aio_mock_run`., `None`\n\nReturns:\n\nType, Description\n\n`R`, The output of the task."},"325":{"title":"Asyncio","pageTitle":"Asyncio","pageRoute":"hatchet://docs/reference/python/asyncio","content":"# Working with `asyncio`\n\nHatchet's Python SDK, similarly to other popular libraries like FastAPI, Langchain, etc., makes heavy use of `asyncio`, and recommends that you do as well!\n\n\n  To learn the basics of `asyncio`, check out [this introduction from\n  FastAPI](https://fastapi.tiangolo.com/async/).\n\n\nHowever, as is the case in FastAPI, when using `asyncio` in Hatchet, you need to be careful to not have any blocking logic in the functions you define as tasks, as this will block the asyncio event loop and prevent additional work from executing until the blocking operation has completed.\n\nFor example, this is async-safe:\n\n```python\nasync def async_safe() -> int:\n    await asyncio.sleep(5)\n\n    return 42\n```\n\nBut this is not:\n\n```python\nasync def blocking() -> int:\n    time.sleep(5)\n\n    return 42\n```\n\nIn the second case, your worker will not be able to process any other work that's defined as async until the five-second sleep has finished.\n\n### Using `asyncio.to_thread` and `loop.run_in_executor`\n\nTo avoid problems caused by blocking code, you can run your blocking code in an executor with `asyncio.to_thread` or, more verbosely, `loop.run_in_executor`. The two examples below are async-safe and will no longer block the event loop.\n\n```python\nasync def to_thread() -> int:\n    await asyncio.to_thread(time.sleep, 5)\n\n    return 42\n```\n\n```python\nasync def run_in_executor() -> int:\n    loop = asyncio.get_event_loop()\n\n    await loop.run_in_executor(None, time.sleep, 5)\n\n    return 42\n```\n\n### More Resources for working with `asyncio`\n\nIf you're looking for more info on developing with AsyncIO more broadly, we highly recommend the following\nresources:\n\n- Python's Documentation on [Developing with\n  AsyncIO](https://docs.python.org/3/library/asyncio-dev.html)\n- Tusamma's Medium post about [How AsyncIO\n  works](https://medium.com/@tssovi/how-does-asyncio-works-f5386316b7fa)\n- Zac Hatfield-Dodds's PyCon 2023 talk on [Async: scaling structured concurrency with static and dynamic analysis](https://www.youtube.com/watch?v=FrpUb6OEbcc)"},"326":{"title":"Pydantic","pageTitle":"Pydantic","pageRoute":"hatchet://docs/reference/python/pydantic","content":"# Pydantic Support\n\nThe V1 Hatchet SDK leans heavily on [Pydantic](https://docs.pydantic.dev/latest/) (both internally and externally) for handling validation of workflow inputs and outputs, method inputs, and more.\n\n### Usage\n\nTo enable Pydantic for validation, you'll need to:\n\n1. Provide an `input_validator` as a parameter to your `workflow`.\n2. Add return type hints for your `tasks`.\n\n### Default Behavior\n\nBy default, if no `input_validator` is provided, the `EmptyModel` is used, which is a Pydantic model that accepts any input. For example:\n\n```python\nfrom hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet\n\nhatchet = Hatchet()\n\n\n@hatchet.task()\ndef simple(input: EmptyModel, ctx: Context) -> dict[str, str]:\n    return {\"result\": \"Hello, world!\"}\n\n\n@hatchet.durable_task()\nasync def simple_durable(input: EmptyModel, ctx: DurableContext) -> dict[str, str]:\n    # durable tasks should be async\n    return {\"result\": \"Hello, world!\"}\n\n\ndef main() -> None:\n    worker = hatchet.worker(\n        \"test-worker\",\n        workflows=[simple, simple_durable],\n    )\n    worker.start()\n```\n\nIn this simple example, the `input` that's injected into the task accepts an argument `input`, which is of type `EmptyModel`. The `EmptyModel` can be imported directly from Hatchet, and is an alias for:\n\n```python\nfrom pydantic import BaseModel, ConfigDict\n\nclass EmptyModel(BaseModel):\n    model_config = ConfigDict(extra=\"allow\")\n```\n\nNote that since `extra=\"allow\"` is set, workflows will not fail with validation errors if an extra field is provided.\n\n### Example Usage\n\nWe highly recommend creating Pydantic models to represent your workflow inputs and outputs. This will help you catch errors early and ensure that your workflows are well-typed. For example, consider a fanout workflow like this:\n\n```python\nclass ParentInput(BaseModel):\n    n: int = 100\n\n\nclass ChildInput(BaseModel):\n    a: str\n\n\nparent_wf = hatchet.workflow(name=\"FanoutParent\", input_validator=ParentInput)\nchild_wf = hatchet.workflow(name=\"FanoutChild\", input_validator=ChildInput)\n\n\n@parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:\n    print(\"spawning child\")\n\n    result = await child_wf.aio_run_many(\n        [\n            child_wf.create_bulk_run_item(\n                input=ChildInput(a=str(i)),\n                additional_metadata={\"hello\": \"earth\"},\n                key=f\"child{i}\",\n            )\n            for i in range(input.n)\n        ],\n    )\n\n    print(f\"results {result}\")\n\n    return {\"results\": result}\n```\n\nIn this case, we've defined two workflows: a parent and a child. They both have their inputs typed, and the parent spawns the child. Note that `child_wf.create_workflow_run_config` is typed, so the type checker (and your IDE) know the type of the input to the child workflow.\n\nThen, the child tasks are defined as follows:\n\n```python\n@child_wf.task()\nasync def process(input: ChildInput, ctx: Context) -> dict[str, str]:\n    print(f\"child process {input.a}\")\n    return {\"status\": input.a}\n\n\n@child_wf.task(parents=[process])\nasync def process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n    process_output = ctx.task_output(process)\n    a = process_output[\"status\"]\n\n    return {\"status2\": a + \"2\"}\n```\n\nIn the children, the inputs are validated by Pydantic, so you can access their attributes directly without needing a type cast or parsing a dictionary with the inputs instead."},"327":{"title":"Lifespans","pageTitle":"Lifespans","pageRoute":"hatchet://docs/reference/python/lifespans","content":"# Lifespans\n\n\n  Lifespans are an **experimental feature** in Hatchet, and are subject to\n  change.\n\n\nHatchet's Python SDK allows you define a **_lifespan_**, which is an async generator that runs when your worker starts up and cleans up when it exits, which lets you share state across all of the tasks running on the worker. This behaves almost identically to [FastAPI's lifespans](https://fastapi.tiangolo.com/advanced/events/), and is intended to be used in the same way. Lifespans are useful for sharing state like connection pools across all tasks on a single worker. They also work great for loading expensive machine learning models into memory before the worker starts.\n\n\n  We recommend only using lifespans for storing **_immutable_** state to share\n  between tasks running on your worker. The intention is not to e.g. store a\n  counter of the number of tasks that a worker has run and increment that\n  counter on each task run. This is prone to unexpected behavior due to\n  concurrency in Hatchet."},"328":{"title":"Usage","pageTitle":"Lifespans","pageRoute":"hatchet://docs/reference/python/lifespans","content":"To use Hatchet's `lifespan` feature, define an async generator and pass it into your `worker`:\n\n```python\nclass Lifespan(BaseModel):\n    model_config = ConfigDict(arbitrary_types_allowed=True)\n\n    foo: str\n    pool: ConnectionPool\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n    print(\"Running lifespan!\")\n    with ConnectionPool(\"postgres://hatchet:hatchet@localhost:5431/hatchet\") as pool:\n        yield Lifespan(\n            foo=\"bar\",\n            pool=pool,\n        )\n\n    print(\"Cleaning up lifespan!\")\n\n\nworker = hatchet.worker(\n    \"test-worker\", slots=1, workflows=[lifespan_workflow], lifespan=lifespan\n)\n```\n\nWhen the worker starts, it will run the lifespan up to the `yield`. Then, on worker shutdown, it will clean up by running everything after the `yield` (the same as with any other generator).\n\n\n  Your lifespan must only `yield` **_once_**.\n\n\nThen, to use your lifespan in a task, you can extract it from the context with `Context.lifespan`.\n\n```python\nclass TaskOutput(BaseModel):\n    num_rows: int\n    external_ids: list[UUID]\n\n\nlifespan_workflow = hatchet.workflow(name=\"LifespanWorkflow\")\n\n\n@lifespan_workflow.task()\ndef sync_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n    pool = cast(Lifespan, ctx.lifespan).pool\n\n    with pool.connection() as conn:\n        query = conn.execute(\"SELECT * FROM v1_lookup_table_olap LIMIT 5;\")\n        rows = query.fetchall()\n\n        for row in rows:\n            print(row)\n\n        print(\"executed sync task with lifespan\", ctx.lifespan)\n\n        return TaskOutput(\n            num_rows=len(rows),\n            external_ids=[cast(UUID, row[0]) for row in rows],\n        )\n```\n\n\n  For type checking, cast the `Context.lifespan` to whatever type your lifespan\n  generator yields.\n\n\nAnd that's it! Now, any task running on the worker with the lifespan provided will have access to the lifespan data."},"329":{"title":"Dependency Injection","pageTitle":"Dependency Injection","pageRoute":"hatchet://docs/reference/python/dependency-injection","content":"# Dependency Injection\n\n\n  Dependency injection is an **experimental feature** in Hatchet, and is subject\n  to change.\n\n\nHatchet's Python SDK allows you to inject **_dependencies_** into your tasks, FastAPI style. These dependencies can be either synchronous or asynchronous functions. They are executed before the task is triggered, and their results are injected into the task as parameters.\n\nThis behaves almost identically to [FastAPI's dependency injection](https://fastapi.tiangolo.com/tutorial/dependencies/), and is intended to be used in the same way. Dependencies are useful for sharing logic between tasks that you'd like to avoid repeating, or would like to factor out of the task logic itself (e.g. to make testing easier).\n\n\nSince dependencies are run before tasks are executed, having many dependencies (or any that take a long time to evaluate) can cause tasks to experience significantly delayed start times, as they must wait for all dependencies to finish evaluating."},"330":{"title":"Usage","pageTitle":"Dependency Injection","pageRoute":"hatchet://docs/reference/python/dependency-injection","content":"To add dependencies to your tasks, import `Depends` from the `hatchet_sdk`. Then:\n\n```python\nasync def async_dep(input: EmptyModel, ctx: Context) -> str:\n    return ASYNC_DEPENDENCY_VALUE\n\n\ndef sync_dep(input: EmptyModel, ctx: Context) -> str:\n    return SYNC_DEPENDENCY_VALUE\n\n\n@asynccontextmanager\nasync def async_cm_dep(\n    input: EmptyModel, ctx: Context, async_dep: Annotated[str, Depends(async_dep)]\n) -> AsyncGenerator[str, None]:\n    try:\n        yield ASYNC_CM_DEPENDENCY_VALUE + \"_\" + async_dep\n    finally:\n        pass\n\n\n@contextmanager\ndef sync_cm_dep(\n    input: EmptyModel, ctx: Context, sync_dep: Annotated[str, Depends(sync_dep)]\n) -> Generator[str, None, None]:\n    try:\n        yield SYNC_CM_DEPENDENCY_VALUE + \"_\" + sync_dep\n    finally:\n        pass\n\n\n@contextmanager\ndef base_cm_dep(input: EmptyModel, ctx: Context) -> Generator[str, None, None]:\n    try:\n        yield CHAINED_CM_VALUE\n    finally:\n        pass\n\n\ndef chained_dep(\n    input: EmptyModel, ctx: Context, base_cm: Annotated[str, Depends(base_cm_dep)]\n) -> str:\n    return \"chained_\" + base_cm\n\n\n@asynccontextmanager\nasync def base_async_cm_dep(\n    input: EmptyModel, ctx: Context\n) -> AsyncGenerator[str, None]:\n    try:\n        yield CHAINED_ASYNC_CM_VALUE\n    finally:\n        pass\n\n\nasync def chained_async_dep(\n    input: EmptyModel,\n    ctx: Context,\n    base_async_cm: Annotated[str, Depends(base_async_cm_dep)],\n) -> str:\n    return \"chained_\" + base_async_cm\n```\n\nIn this example, we've declared two dependencies: one synchronous and one asynchronous. You can do anything you like in your dependencies, such as creating database sessions, managing configuration, sharing instances of service-layer logic, and more.\n\nOnce you've defined your dependency functions, inject them into your tasks as follows:\n\n```python\n@hatchet.task()\nasync def async_task_with_dependencies(\n    _i: EmptyModel,\n    ctx: Context,\n    async_dep: Annotated[str, Depends(async_dep)],\n    sync_dep: Annotated[str, Depends(sync_dep)],\n    async_cm_dep: Annotated[str, Depends(async_cm_dep)],\n    sync_cm_dep: Annotated[str, Depends(sync_cm_dep)],\n    chained_dep: Annotated[str, Depends(chained_dep)],\n    chained_async_dep: Annotated[str, Depends(chained_async_dep)],\n) -> Output:\n    return Output(\n        sync_dep=sync_dep,\n        async_dep=async_dep,\n        async_cm_dep=async_cm_dep,\n        sync_cm_dep=sync_cm_dep,\n        chained_dep=chained_dep,\n        chained_async_dep=chained_async_dep,\n    )\n```\n\n\n  Important note: Your dependency functions must take two positional arguments:\n  the workflow input and the `Context` (the same as any other task).\n\n\nThat's it! Now, whenever your task is triggered, its dependencies will be evaluated, and the results will be injected into the task at runtime for you to use as needed."},"331":{"title":"Dataclass Support","pageTitle":"Dataclass Support","pageRoute":"hatchet://docs/reference/python/dataclasses","content":"# Dataclass Support\n\nThroughout the docs, we use Pydantic models in virtually all of our Python examples for validating task inputs and outputs. This is the recommended path, as it provides lots of safety guarantees as you're writing tasks. With that said, Hatchet also supports using `dataclasses` as both input and output types to tasks. **Dataclass support was added in SDK version 1.21.0.**\n\n> **Warning:** Dataclasses do not perform any type validation on instantiation like Pydantic\n>   models do.\n\n### Usage\n\nTo use a dataclass instead of a Pydantic model, you'll need to:\n\n1. Provide an `input_validator` as a parameter to your `workflow` or `task` (in the case of a standalone task with `hatchet.task`).\n2. Add return type hints for your `tasks`.\n\n### Example Usage\n\n`dataclass` validators work exactly like Pydantic models in Hatchet. First, you create the classes:\n\n```python\n@dataclass\nclass Input:\n    name: str\n\n\n@dataclass\nclass Output:\n    message: str\n```\n\nAnd then you provide the classes to your workflow or task:\n\n```python\n@hatchet.task(input_validator=Input)\ndef say_hello(input: Input, ctx: Context) -> Output:\n    return Output(message=f\"Hello, {input.name}!\")\n```\n\nAnd finally, triggering works the same as well - you just provide the dataclass instance as input:\n\n```python\nsay_hello.run(input=Input(name=\"Hatchet\"))\n```"},"332":{"title":"Client","pageTitle":"Client","pageRoute":"hatchet://docs/reference/typescript/client","content":"# Hatchet TypeScript SDK Reference\n\nThis is the TypeScript SDK reference, documenting methods available for interacting with Hatchet resources.\nCheck out the [user guide](https://docs.hatchet.run/home/) for an introduction to getting your first tasks running.\n\n<a id=\"hatchetclient\"></a>\n\n### HatchetClient\n\nHatchetV1 implements the main client interface for interacting with the Hatchet workflow engine.\nIt provides methods for creating and executing workflows, as well as managing workers.\n\nImplements\n\n- `IHatchetClient`\n\nProperties\n\nProperty, Type, Description\n\n<a id=\"property-tenantid\"></a> `tenantId`, `string`, The tenant ID for the Hatchet client\n\nAccessors\n\n<a id=\"api\"></a>\n\n##### `api`\n\nGet Signature\n\nGet the API client for making HTTP requests to the Hatchet API\nNote: This is not recommended for general use, but is available for advanced scenarios\n\nReturns\n\n`Api`\\<`unknown`\\>\n\nA API client instance\n\n<a id=\"crons\"></a>\n\n##### `crons`\n\nGet Signature\n\nGet the cron client for creating and managing cron workflow runs\n\nReturns\n\n[`CronClient`](feature-clients/crons.mdx#cronclient)\n\nA cron client instance\n\nImplementation of\n\n```ts\nIHatchetClient.crons;\n```\n\n<a id=\"events\"></a>\n\n##### `events`\n\nGet Signature\n\nGet the event client for creating and managing event workflow runs\n\nReturns\n\n`EventClient`\n\nA event client instance\n\nImplementation of\n\n```ts\nIHatchetClient.events;\n```\n\n<a id=\"filters\"></a>\n\n##### `filters`\n\nGet Signature\n\nGet the filters client for creating and managing filters\n\nReturns\n\n[`FiltersClient`](feature-clients/filters.mdx#filtersclient)\n\nA filters client instance\n\nImplementation of\n\n```ts\nIHatchetClient.filters;\n```\n\n<a id=\"logs\"></a>\n\n##### `logs`\n\nGet Signature\n\nGet the logs client for creating and managing logs\n\nReturns\n\n[`LogsClient`](feature-clients/logs.mdx#logsclient)\n\nA logs client instance\n\nImplementation of\n\n```ts\nIHatchetClient.logs;\n```\n\n<a id=\"metrics\"></a>\n\n##### `metrics`\n\nGet Signature\n\nGet the metrics client for creating and managing metrics\n\nReturns\n\n[`MetricsClient`](feature-clients/metrics.mdx#metricsclient)\n\nA metrics client instance\n\nImplementation of\n\n```ts\nIHatchetClient.metrics;\n```\n\n<a id=\"ratelimits\"></a>\n\n##### `ratelimits`\n\nGet Signature\n\nGet the rate limits client for creating and managing rate limits\n\nReturns\n\n[`RatelimitsClient`](feature-clients/ratelimits.mdx#ratelimitsclient)\n\nA rate limits client instance\n\nImplementation of\n\n```ts\nIHatchetClient.ratelimits;\n```\n\n<a id=\"runs\"></a>\n\n##### `runs`\n\nGet Signature\n\nGet the runs client for creating and managing runs\n\nReturns\n\n[`RunsClient`](feature-clients/runs.mdx#runsclient)\n\nA runs client instance\n\nImplementation of\n\n```ts\nIHatchetClient.runs;\n```\n\n<a id=\"scheduled\"></a>\n\n##### `scheduled`\n\nGet Signature\n\nGet the schedules client for creating and managing scheduled workflow runs\n\nReturns\n\n[`ScheduleClient`](feature-clients/schedules.mdx#scheduleclient)\n\nA schedules client instance\n\nImplementation of\n\n```ts\nIHatchetClient.scheduled;\n```\n\n<a id=\"schedules\"></a>\n\n##### `schedules`\n\nGet Signature\n\nAlias\n\nscheduled\n\nReturns\n\n[`ScheduleClient`](feature-clients/schedules.mdx#scheduleclient)\n\n<a id=\"tasks\"></a>\n\n##### `tasks`\n\nGet Signature\n\nGet the tasks client for creating and managing tasks\n\nReturns\n\n[`WorkflowsClient`](feature-clients/workflows.mdx#workflowsclient)\n\nA tasks client instance\n\n<a id=\"tenant\"></a>\n\n##### `tenant`\n\nGet Signature\n\nGet the tenant client for managing tenants\n\nReturns\n\n`TenantClient`\n\nA tenant client instance\n\nImplementation of\n\n```ts\nIHatchetClient.tenant;\n```\n\n<a id=\"webhooks\"></a>\n\n##### `webhooks`\n\nGet Signature\n\nGet the webhooks client for creating and managing webhooks\n\nReturns\n\n[`WebhooksClient`](feature-clients/webhooks.mdx#webhooksclient)\n\nA webhooks client instance\n\nImplementation of\n\n```ts\nIHatchetClient.webhooks;\n```\n\n<a id=\"workers\"></a>\n\n##### `workers`\n\nGet Signature\n\nGet the workers client for creating and managing workers\n\nReturns\n\n[`WorkersClient`](feature-clients/workers.mdx#workersclient)\n\nA workers client instance\n\nImplementation of\n\n```ts\nIHatchetClient.workers;\n```\n\n<a id=\"workflows\"></a>\n\n##### `workflows`\n\nGet Signature\n\nGet the workflows client for creating and managing workflows\n\nReturns\n\n[`WorkflowsClient`](feature-clients/workflows.mdx#workflowsclient)\n\nA workflows client instance\n\nImplementation of\n\n```ts\nIHatchetClient.workflows;\n```\n\n#### Methods\n\n<a id=\"durabletask\"></a>\n\n##### `durableTask()`\n\nImplementation of the durableTask method.\n\nCreates a new durable task workflow.\nTypes can be explicitly specified as generics or inferred from the function signature.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `CreateDurableTaskWorkflowOpts`\\<`I` & `Resolved`\\<`GlobalInput`, `MiddlewareBefore`\\>, `MergeIfNonEmpty`\\<`O`, `GlobalOutput`\\>\\>, Durable task configuration options\n\nReturns\n\n[`TaskWorkflowDeclaration`](Runnables.mdx#taskworkflowdeclaration)\\<`I`, `O`, `GlobalInput`, `GlobalOutput`, `MiddlewareBefore`, `MiddlewareAfter`\\>\n\nA TaskWorkflowDeclaration instance for a durable task\n\nCreates a new durable task workflow with types inferred from the function parameter.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `object` & `Omit`\\<`CreateDurableTaskWorkflowOpts`\\<`I`, `O`\\>, `\"fn\"`\\>, Durable task configuration options with function that defines types\n\nReturns\n\n[`TaskWorkflowDeclaration`](Runnables.mdx#taskworkflowdeclaration)\\<`I`, `O`, `GlobalInput`, `GlobalOutput`, `MiddlewareBefore`, `MiddlewareAfter`\\>\n\nA TaskWorkflowDeclaration instance with inferred types\n\n<a id=\"run\"></a>\n\n##### `run()`\n\nTriggers a workflow run and waits for the result.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `Workflow` \\, `BaseWorkflowDeclaration`\\<`I`, `O`\\>, The workflow to run, either as a Workflow instance or workflow name\n`input`, `I`, The input data for the workflow\n`options`, `RunOpts`, Configuration options for the workflow run\n\nReturns\n\n`Promise`\\<`O`\\>\n\nA promise that resolves with the workflow result\n\n<a id=\"runandwait\"></a>\n\n##### `runAndWait()`\n\nTriggers a workflow run and waits for the result.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `Workflow` \\, `BaseWorkflowDeclaration`\\<`I`, `O`\\>, The workflow to run, either as a Workflow instance or workflow name\n`input`, `I`, The input data for the workflow\n`options`, `RunOpts`, Configuration options for the workflow run\n\nReturns\n\n`Promise`\\<`O`\\>\n\nA promise that resolves with the workflow result\n\nAlias\n\nrun\n\n<a id=\"runnowait\"></a>\n\n##### `runNoWait()`\n\nTriggers a workflow run without waiting for completion.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `Workflow` \\, `BaseWorkflowDeclaration`\\<`I`, `O`\\>, The workflow to run, either as a Workflow instance or workflow name\n`input`, `I`, The input data for the workflow\n`options`, `RunOpts`, Configuration options for the workflow run\n\nReturns\n\n`Promise`\\<`WorkflowRunRef`\\<`O`\\>\\>\n\nA WorkflowRunRef containing the run ID and methods to interact with the run\n\n<a id=\"task\"></a>\n\n##### `task()`\n\nImplementation of the task method.\n\nCreates a new task workflow.\nTypes can be explicitly specified as generics or inferred from the function signature.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `CreateTaskWorkflowOpts`\\<`I` & `Resolved`\\<`GlobalInput`, `MiddlewareBefore`\\>, `MergeIfNonEmpty`\\<`O`, `GlobalOutput`\\>\\>, Task configuration options\n\nReturns\n\n[`TaskWorkflowDeclaration`](Runnables.mdx#taskworkflowdeclaration)\\<`I`, `O`, `GlobalInput`, `GlobalOutput`, `MiddlewareBefore`, `MiddlewareAfter`\\>\n\nA TaskWorkflowDeclaration instance\n\nCreates a new task workflow with types inferred from the function parameter.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `object` & `Omit`\\<`CreateTaskWorkflowOpts`\\<`I`, `O`\\>, `\"fn\"`\\>, Task configuration options with function that defines types\n\nReturns\n\n[`TaskWorkflowDeclaration`](Runnables.mdx#taskworkflowdeclaration)\\<`I`, `O`, `GlobalInput`, `GlobalOutput`, `MiddlewareBefore`, `MiddlewareAfter`\\>\n\nA TaskWorkflowDeclaration instance with inferred types\n\n<a id=\"worker\"></a>\n\n##### `worker()`\n\nCreates a new worker instance for processing workflow tasks.\n\nParameters\n\nParameter, Type, Description\n\n`name`, `string`, -\n`options?`, `number` \\, `CreateWorkerOpts`, Configuration options for creating the worker\n\nReturns\n\n`Promise`\\<`Worker`\\>\n\nA promise that resolves with a new HatchetWorker instance\n\n<a id=\"workflow\"></a>\n\n##### `workflow()`\n\nCreates a new workflow definition.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `CreateWorkflowOpts`, Configuration options for creating the workflow\n\nReturns\n\n[`WorkflowDeclaration`](Runnables.mdx#workflowdeclaration)\\<`I`, `O`, `Resolved`\\<`GlobalInput`, `MiddlewareBefore`\\>\\>\n\nA new Workflow instance\n\nNote\n\nIt is possible to create an orphaned workflow if no client is available using @hatchet/client CreateWorkflow"},"333":{"title":"Context","pageTitle":"Context","pageRoute":"hatchet://docs/reference/typescript/Context","content":"# Context\n\nThe Hatchet Context class provides helper methods and useful data to tasks at runtime. It is passed as the second argument to all tasks and durable tasks.\n\nThere are two types of context classes you'll encounter:\n\n- Context - The standard context for regular tasks with methods for logging, task output retrieval, cancellation, and more.\n- DurableContext - An extended context for durable tasks that includes additional methods for durable execution.\n\n<a id=\"context\"></a>\n\n### Context\n\nExtended by\n\n- [`DurableContext`](#durablecontext)\n\n#### Methods\n\n<a id=\"additionalmetadata\"></a>\n\n##### `additionalMetadata()`\n\nRetrieves additional metadata associated with the current workflow run.\n\nReturns\n\n`Record`\\<`string`, `string`\\>\n\nA record of metadata key-value pairs.\n\n<a id=\"bulkrunchildren\"></a>\n\n##### `bulkRunChildren()`\n\nRuns multiple children workflows in parallel and waits for all results.\n\nParameters\n\nParameter, Type, Description\n\n`children`, `object`[], An array of objects containing the workflow name, input data, and options for each workflow.\n\nReturns\n\n`Promise`\\<`P`[]\\>\n\nA list of results from the children workflows.\n\n<a id=\"bulkrunnowaitchildren\"></a>\n\n##### `bulkRunNoWaitChildren()`\n\nRuns multiple children workflows in parallel without waiting for their results.\n\nParameters\n\nParameter, Type, Description\n\n`children`, `object`[], An array of objects containing the workflow name, input data, and options for each workflow.\n\nReturns\n\n`Promise`\\<`WorkflowRunRef`\\<`P`\\>[]\\>\n\nA list of workflow run references to the enqueued runs.\n\n<a id=\"childindex\"></a>\n\n##### `childIndex()`\n\nGets the index of this workflow if it was spawned as part of a bulk operation.\n\nReturns\n\n`number` \\| `undefined`\n\nThe child index number, or undefined if not set.\n\n<a id=\"childkey\"></a>\n\n##### `childKey()`\n\nGets the key associated with this workflow if it was spawned as a child workflow.\n\nReturns\n\n`string` \\| `undefined`\n\nThe child key, or undefined if not set.\n\n<a id=\"errors\"></a>\n\n##### `errors()`\n\nReturns errors from any task runs in the workflow.\n\nReturns\n\n`Record`\\<`string`, `string`\\>\n\nA record mapping task names to error messages.\n\nThrows\n\nA warning if no errors are found (this method should be used in on-failure tasks).\n\n<a id=\"filterpayload\"></a>\n\n##### `filterPayload()`\n\nGets the payload from the filter that matched when triggering the event.\n\nReturns\n\n`Record`\\<`string`, `any`\\>\n\nThe payload.\n\n<a id=\"parentoutput\"></a>\n\n##### `parentOutput()`\n\nRetrieves the output of a parent task.\n\nParameters\n\nParameter, Type, Description\n\n`parentTask`, \\, `string` \\, `CreateWorkflowTaskOpts`\\<`any`, `L`\\> \\, `CreateWorkflowDurableTaskOpts`\\<`any`, `L`\\>, The a CreateTaskOpts or string of the parent task name.\n\nReturns\n\n`Promise`\\<`L`\\>\n\nThe output of the specified parent task.\n\nThrows\n\nAn error if the task output is not found.\n\n<a id=\"parentworkflowrunid\"></a>\n\n##### `parentWorkflowRunId()`\n\nGets the ID of the parent workflow run if this workflow was spawned as a child.\n\nReturns\n\n`string` \\| `undefined`\n\nThe parent workflow run ID, or undefined if not a child workflow.\n\n<a id=\"putstream\"></a>\n\n##### `putStream()`\n\nStreams data from the current task run.\n\nParameters\n\nParameter, Type, Description\n\n`data`, `string` \\, `Uint8Array`\\<`ArrayBufferLike`\\>, The data to stream (string or binary).\n\nReturns\n\n`Promise`\\<`void`\\>\n\nA promise that resolves when the data has been streamed.\n\n<a id=\"refreshtimeout\"></a>\n\n##### `refreshTimeout()`\n\nRefreshes the timeout for the current task.\n\nParameters\n\nParameter, Type, Description\n\n`incrementBy`, `Duration`, The interval by which to increment the timeout. The interval should be specified in the format of '10s' for 10 seconds, '1m' for 1 minute, or '1d' for 1 day.\n\nReturns\n\n`Promise`\\<`void`\\>\n\n<a id=\"releaseslot\"></a>\n\n##### `releaseSlot()`\n\nReleases a worker slot for a task run such that the worker can pick up another task.\nNote: this is an advanced feature that may lead to unexpected behavior if used incorrectly.\n\nReturns\n\n`Promise`\\<`void`\\>\n\nA promise that resolves when the slot has been released.\n\n<a id=\"rethrowifcancelled\"></a>\n\n##### `rethrowIfCancelled()`\n\nHelper for broad `catch` blocks so cancellation isn't accidentally swallowed.\n\nExample:\n\n```ts\ntry { ... } catch (e) { ctx.rethrowIfCancelled(e); ... }\n```\n\nParameters\n\nParameter, Type\n\n`err`, `unknown`\n\nReturns\n\n`void`\n\n<a id=\"retrycount\"></a>\n\n##### `retryCount()`\n\nGets the number of times the current task has been retried.\n\nReturns\n\n`number`\n\nThe retry count.\n\n<a id=\"runchild\"></a>\n\n##### `runChild()`\n\nRuns a new workflow and waits for its result.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, \\, `string` \\, `BaseWorkflowDeclaration`\\<`Q`, `P`\\> \\, [`TaskWorkflowDeclaration`](Runnables.mdx#taskworkflowdeclaration)\\<`Q`, `P`, \\{ \\}, \\{ \\}, \\{ \\}, \\{ \\}\\>, The workflow to run (name, Workflow instance, or WorkflowV1 instance).\n`input`, `Q`, The input data for the workflow.\n`options?`, `ChildRunOpts`, An options object containing key, sticky, priority, and additionalMetadata.\n\nReturns\n\n`Promise`\\<`P`\\>\n\nThe result of the workflow.\n\n<a id=\"runnowaitchild\"></a>\n\n##### `runNoWaitChild()`\n\nEnqueues a new workflow without waiting for its result.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `BaseWorkflowDeclaration`\\<`Q`, `P`\\>, The workflow to enqueue (name, Workflow instance, or WorkflowV1 instance).\n`input`, `Q`, The input data for the workflow.\n`options?`, `ChildRunOpts`, An options object containing key, sticky, priority, and additionalMetadata.\n\nReturns\n\n`Promise`\\<`WorkflowRunRef`\\<`P`\\>\\>\n\nA reference to the spawned workflow run.\n\n<a id=\"taskname\"></a>\n\n##### `taskName()`\n\nGets the name of the current running task.\n\nReturns\n\n`string`\n\nThe name of the task.\n\n<a id=\"taskrunexternalid\"></a>\n\n##### `taskRunExternalId()`\n\nGets the ID of the current task run.\n\nReturns\n\n`string`\n\nThe task run ID.\n\n<a id=\"triggeredbyevent\"></a>\n\n##### `triggeredByEvent()`\n\nDetermines if the workflow was triggered by an event.\n\nReturns\n\n`boolean`\n\nTrue if the workflow was triggered by an event, otherwise false.\n\n<a id=\"triggers\"></a>\n\n##### `triggers()`\n\nGets the dag conditional triggers for the current workflow run.\n\nReturns\n\n`TriggerData`\n\nThe triggers for the current workflow.\n\n<a id=\"userdata\"></a>\n\n##### `userData()`\n\nGets the user data associated with the workflow.\n\nReturns\n\n`K`\n\nThe user data.\n\n<a id=\"workflowid\"></a>\n\n##### `workflowId()`\n\nGets the workflow ID of the currently running workflow.\n\nReturns\n\n`string` \\| `undefined`\n\nThe workflow id.\n\n<a id=\"workflowname\"></a>\n\n##### `workflowName()`\n\nGets the name of the current workflow.\n\nReturns\n\n`string`\n\nThe name of the workflow.\n\n<a id=\"workflowrunid\"></a>\n\n##### `workflowRunId()`\n\nGets the ID of the current workflow run.\n\nReturns\n\n`string`\n\nThe workflow run ID.\n\n<a id=\"workflowversionid\"></a>\n\n##### `workflowVersionId()`\n\nGets the workflow version ID of the currently running workflow.\n\nReturns\n\n`string` \\| `undefined`\n\nThe workflow version ID.\n\n---\n\n<a id=\"contextworker\"></a>\n\n### ContextWorker\n\nContextWorker is a wrapper around the V1Worker class that provides a more user-friendly interface for the worker from the context of a run.\n\n#### Methods\n\n<a id=\"hasworkflow\"></a>\n\n##### `hasWorkflow()`\n\nChecks if the worker has a registered workflow.\n\nParameters\n\nParameter, Type, Description\n\n`workflowName`, `string`, The name of the workflow to check.\n\nReturns\n\n`boolean`\n\nTrue if the workflow is registered, otherwise false.\n\n<a id=\"id\"></a>\n\n##### `id()`\n\nGets the ID of the worker.\n\nReturns\n\n`string` \\| `undefined`\n\nThe ID of the worker.\n\n<a id=\"labels\"></a>\n\n##### `labels()`\n\nGets the current state of the worker labels.\n\nReturns\n\n`WorkerLabels`\n\nThe labels of the worker.\n\n<a id=\"upsertlabels\"></a>\n\n##### `upsertLabels()`\n\nUpserts the a set of labels on the worker.\n\nParameters\n\nParameter, Type, Description\n\n`labels`, `WorkerLabels`, The labels to upsert.\n\nReturns\n\n`Promise`\\<`WorkerLabels`\\>\n\nA promise that resolves when the labels have been upserted.\n\n---\n\n<a id=\"durablecontext\"></a>\n\n### DurableContext\n\nDurableContext provides helper methods and useful data to durable tasks at runtime.\nIt extends the Context class and includes additional methods for durable execution like sleepFor and waitFor.\n\nExtends\n\n- [`Context`](#context)\\<`T`, `K`\\>\n\nAccessors\n\n<a id=\"invocationcount\"></a>\n\n##### `invocationCount`\n\nGet Signature\n\nThe invocation count for the current durable task. Used for deduplication across replays.\n\nReturns\n\n`number`\n\n#### Methods\n\n<a id=\"now\"></a>\n\n##### `now()`\n\nGet the current timestamp, memoized across replays. Returns the same Date on every replay of the same task run.\n\nReturns\n\n`Promise`\\<`Date`\\>\n\nThe memoized current timestamp.\n\n<a id=\"sleepfor\"></a>\n\n##### `sleepFor()`\n\nPauses execution for the specified duration.\nDuration is \"global\" meaning it will wait in real time regardless of transient failures like worker restarts.\n\nParameters\n\nParameter, Type, Description\n\n`duration`, `Duration`, The duration to sleep for.\n`readableDataKey?`, `string`, -\n\nReturns\n\n`Promise`\\<`SleepResult`\\>\n\nA promise that resolves with a SleepResult when the sleep duration has elapsed.\n\n<a id=\"sleepuntil\"></a>\n\n##### `sleepUntil()`\n\nDurably sleep until a specific timestamp.\nUses the memoized `now()` to compute the remaining duration, then delegates to `sleepFor`.\n\nParameters\n\nParameter, Type, Description\n\n`wakeAt`, `Date`, The timestamp to sleep until.\n\nReturns\n\n`Promise`\\<`SleepResult`\\>\n\nA SleepResult containing the actual duration slept.\n\n<a id=\"spawnchild\"></a>\n\n##### `spawnChild()`\n\nSpawns a child workflow through the durable event log, waits for the child to complete.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, \\, `string` \\, `BaseWorkflowDeclaration`\\<`Q`, `P`\\> \\, [`TaskWorkflowDeclaration`](Runnables.mdx#taskworkflowdeclaration)\\<`Q`, `P`, \\{ \\}, \\{ \\}, \\{ \\}, \\{ \\}\\>, The workflow to spawn.\n`input?`, `Q`, The input data for the child workflow.\n`options?`, `ChildRunOpts`, Options for spawning the child workflow.\n\nReturns\n\n`Promise`\\<`P`\\>\n\nThe result of the child workflow.\n\n<a id=\"spawnchildren\"></a>\n\n##### `spawnChildren()`\n\nSpawns multiple child workflows through the durable event log, waits for all to complete.\n\nParameters\n\nParameter, Type, Description\n\n`children`, `object`[], An array of objects containing the workflow, input, and options for each child.\n\nReturns\n\n`Promise`\\<`P`[]\\>\n\nA list of results from the child workflows.\n\n<a id=\"waitfor\"></a>\n\n##### `waitFor()`\n\nPauses execution until the specified conditions are met.\nConditions are \"global\" meaning they will wait in real time regardless of transient failures like worker restarts.\n\nParameters\n\nParameter, Type, Description\n\n`conditions`, `Conditions` \\, `Conditions`[], The conditions to wait for.\n\nReturns\n\n`Promise`\\<`Record`\\<`string`, `any`\\>\\>\n\nA promise that resolves with the event that satisfied the conditions.\n\n<a id=\"waitforevent\"></a>\n\n##### `waitForEvent()`\n\nLightweight wrapper for waiting for a user event. Allows for shorthand usage of\n`ctx.waitFor` when specifying a user event condition.\n\nFor more complicated conditions, use `ctx.waitFor` directly.\n\nParameters\n\nParameter, Type, Description\n\n`key`, `string`, The event key to wait for.\n`expression?`, `string`, An optional CEL expression to filter events.\n`payloadSchema?`, `T`, An optional Zod schema to validate and parse the event payload.\n\nReturns\n\n`Promise`\\<`TypeOf`\\<`T`\\>\\>\n\nThe event payload, validated against the schema if provided.\n\nLightweight wrapper for waiting for a user event. Allows for shorthand usage of\n`ctx.waitFor` when specifying a user event condition.\n\nFor more complicated conditions, use `ctx.waitFor` directly.\n\nParameters\n\nParameter, Type, Description\n\n`key`, `string`, The event key to wait for.\n`expression?`, `string`, An optional CEL expression to filter events.\n\nReturns\n\n`Promise`\\<`Record`\\<`string`, `any`\\>\\>\n\nThe event payload, validated against the schema if provided."},"334":{"title":"Crons","pageTitle":"Crons","pageRoute":"hatchet://docs/reference/typescript/feature-clients/crons","content":"<a id=\"cronclient\"></a>\n\n### Cron Client\n\nThe cron client is a client for managing cron workflows within Hatchet.\n\n#### Methods\n\n<a id=\"create\"></a>\n\n##### `create()`\n\nCreates a new Cron workflow.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `BaseWorkflowDeclaration`\\<`any`, `any`\\> \\, `Workflow`, The workflow identifier or Workflow object.\n`cron`, \\{ `additionalMetadata?`: `Record`\\<`string`, `string`\\>; `expression`: `string`; `input?`: `Record`\\<`string`, `any`\\>; `name`: `string`; `priority?`: `number`; \\}, The input data for creating the Cron Trigger.\n`cron.additionalMetadata?`, `Record`\\<`string`, `string`\\>, -\n`cron.expression`, `string`, -\n`cron.input?`, `Record`\\<`string`, `any`\\>, -\n`cron.name`, `string`, -\n`cron.priority?`, `number`, -\n\nReturns\n\n`Promise`\\<`CronWorkflows`\\>\n\nA promise that resolves to the created CronWorkflows object.\n\nThrows\n\nWill throw an error if the input is invalid or the API call fails.\n\n<a id=\"delete\"></a>\n\n##### `delete()`\n\nDeletes an existing Cron Trigger.\n\nParameters\n\nParameter, Type, Description\n\n`cron`, `string` \\, `CronWorkflows`, The Cron Trigger ID as a string or CronWorkflows object.\n\nReturns\n\n`Promise`\\<`void`\\>\n\nA promise that resolves when the Cron Trigger is deleted.\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nRetrieves a specific Cron Trigger by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`cron`, `string` \\, `CronWorkflows`, The Cron Trigger ID as a string or CronWorkflows object.\n\nReturns\n\n`Promise`\\<`CronWorkflows`\\>\n\nA promise that resolves to the CronWorkflows object.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists all Cron Triggers based on the provided query parameters.\n\nParameters\n\nParameter, Type, Description\n\n`query`, `object` & `object`, Query parameters for listing Cron Triggers.\n\nReturns\n\n`Promise`\\<`CronWorkflowsList`\\>\n\nA promise that resolves to a CronWorkflowsList object."},"335":{"title":"Filters","pageTitle":"Filters","pageRoute":"hatchet://docs/reference/typescript/feature-clients/filters","content":"<a id=\"filtersclient\"></a>\n\n### Filters Client\n\nThe filters client is a client for interacting with Hatchet's filters API.\n\n#### Methods\n\n<a id=\"create\"></a>\n\n##### `create()`\n\nCreates a new filter.\n\nParameters\n\nParameter, Type, Description\n\n`opts`, `V1CreateFilterRequest`, The options for the create operation.\n\nReturns\n\n`Promise`\\<`V1Filter`\\>\n\nA promise that resolves to the created filter.\n\n<a id=\"delete\"></a>\n\n##### `delete()`\n\nDeletes a filter by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`filterId`, `string`, The ID of the filter to delete.\n\nReturns\n\n`Promise`\\<`V1Filter`\\>\n\nA promise that resolves to the deleted filter.\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nGets a filter by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`filterId`, `string`, The ID of the filter to get.\n\nReturns\n\n`Promise`\\<`V1Filter`\\>\n\nA promise that resolves to the filter.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists all filters.\n\nParameters\n\nParameter, Type, Description\n\n`opts?`, \\{ `limit?`: `number`; `offset?`: `number`; `scopes?`: ...[]; `workflowIds?`: ...[]; \\}, The options for the list operation.\n`opts.limit?`, `number`, The number of filters to return.\n`opts.offset?`, `number`, The number of filters to skip before returning the result set.\n`opts.scopes?`, ...[], A list of scopes to filter by.\n`opts.workflowIds?`, ...[], A list of workflow IDs to filter by.\n\nReturns\n\n`Promise`\\<`V1FilterList`\\>\n\nA promise that resolves to the list of filters.\n\n<a id=\"update\"></a>\n\n##### `update()`\n\nUpdates a filter by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`filterId`, `string`, The ID of the filter to update.\n`updates`, `V1UpdateFilterRequest`, The updates to apply to the filter.\n\nReturns\n\n`Promise`\\<`V1Filter`\\>\n\nA promise that resolves to the updated filter."},"336":{"title":"Logs","pageTitle":"Logs","pageRoute":"hatchet://docs/reference/typescript/feature-clients/logs","content":"<a id=\"logsclient\"></a>\n\n### Logs Client\n\nThe logs client is a client for interacting with Hatchet's logs API.\n\n#### Methods\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists the logs for a given task run.\n\nParameters\n\nParameter, Type, Description\n\n`taskRunId`, `string`, The ID of the task run to list logs for.\n`opts?`, [`ListLogsOpts`](#listlogsopts), The options filter for the list operation.\n\nReturns\n\n`Promise`\\<`V1LogLineList`\\>\n\nA promise that resolves to the list of logs."},"337":{"title":"Type Aliases","pageTitle":"Logs","pageRoute":"hatchet://docs/reference/typescript/feature-clients/logs","content":"<a id=\"listlogsopts\"></a>\n\n### ListLogsOpts\n\n```ts\ntype ListLogsOpts = object;\n```\n\nThe options for the list logs operation.\n\nProperties\n\nProperty, Type, Description\n\n<a id=\"attempt\"></a> `attempt?`, `number`, Filter logs by attempt number.\n<a id=\"levels\"></a> `levels?`, `V1LogLineLevel`[], Filter logs by log level.\n<a id=\"limit\"></a> `limit?`, `number`, The maximum number of log lines to return.\n<a id=\"orderbydirection\"></a> `orderByDirection?`, `V1LogLineOrderByDirection`, The direction to order the logs by.\n<a id=\"search\"></a> `search?`, `string`, Filter logs by a search string.\n<a id=\"since\"></a> `since?`, `Date`, Return only logs after this date.\n<a id=\"until\"></a> `until?`, `Date`, Return only logs before this date."},"338":{"title":"Metrics","pageTitle":"Metrics","pageRoute":"hatchet://docs/reference/typescript/feature-clients/metrics","content":"<a id=\"metricsclient\"></a>\n\n### Metrics Client\n\nThe metrics client is a client for reading metrics out of Hatchet’s metrics API.\n\n#### Methods\n\n<a id=\"getqueuemetrics\"></a>\n\n##### `getQueueMetrics()`\n\nReturns the queue metrics for the current tenant.\n\nParameters\n\nParameter, Type, Description\n\n`opts?`, `RequestParams`, The options for the request.\n\nReturns\n\n`Promise`\\<`TenantStepRunQueueMetrics`\\>\n\nThe queue metrics for the current tenant.\n\n<a id=\"gettaskstats\"></a>\n\n##### `getTaskStats()`\n\nGet task statistics for the tenant.\n\nReturns\n\n`Promise`\\<`TaskStats`\\>\n\nA record mapping task names to their statistics.\n\n<a id=\"gettaskstatusmetrics\"></a>\n\n##### `getTaskStatusMetrics()`\n\nReturns aggregate task run counts grouped by status (queued, running, completed, failed, cancelled)\n\nParameters\n\nParameter, Type, Description\n\n`query`, \\{ `additional_metadata?`: `string`[]; `parent_task_external_id?`: `string`; `since`: `string`; `triggering_event_external_id?`: `string`; `until?`: `string`; `workflow_ids?`: `string`[]; \\}, Filters for the metrics query (e.g. `since`, `until`, `workflow_ids`).\n`query.additional_metadata?`, `string`[], Additional metadata k-v pairs to filter by\n`query.parent_task_external_id?`, `string`, The parent task's external id **Format** uuid **Min Length** 36 **Max Length** 36\n`query.since?`, `string`, The start time to get metrics for **Format** date-time\n`query.triggering_event_external_id?`, `string`, The id of the event that triggered the task **Format** uuid **Min Length** 36 **Max Length** 36\n`query.until?`, `string`, The end time to get metrics for **Format** date-time\n`query.workflow_ids?`, `string`[], The workflow id to find runs for\n`requestParams?`, `RequestParams`, Optional request-level overrides (headers, signal, etc.).\n\nReturns\n\n`Promise`\\<`TaskStatusMetrics`\\>\n\nCounts per status for the matched task runs.\n\n<a id=\"scrapeprometheusmetrics\"></a>\n\n##### `scrapePrometheusMetrics()`\n\nScrape Prometheus metrics for the tenant.\n\nReturns\n\n`Promise`\\<`string`\\>\n\nThe metrics in Prometheus text format."},"339":{"title":"Ratelimits","pageTitle":"Ratelimits","pageRoute":"hatchet://docs/reference/typescript/feature-clients/ratelimits","content":"<a id=\"ratelimitsclient\"></a>\n\n### RatelimitsClient\n\nThe rate limits client is a wrapper for Hatchet’s gRPC API that makes it easier to work with rate limits in Hatchet.\n\n#### Methods\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists all rate limits for the current tenant.\n\nParameters\n\nParameter, Type, Description\n\n`opts`, \\, \\{ `limit?`: `number`; `offset?`: `number`; `orderByDirection?`: `RateLimitOrderByDirection`; `orderByField?`: `RateLimitOrderByField`; `search?`: `string`; \\} \\, `undefined`, The options for the list operation.\n\nReturns\n\n`Promise`\\<`RateLimitList`\\>\n\nA promise that resolves to the list of rate limits.\n\n<a id=\"upsert\"></a>\n\n##### `upsert()`\n\nUpserts a rate limit for the current tenant.\n\nParameters\n\nParameter, Type, Description\n\n`opts`, `CreateRateLimitOpts`, The options for the upsert operation.\n\nReturns\n\n`Promise`\\<`string`\\>\n\nA promise that resolves to the key of the upserted rate limit."},"340":{"title":"Runs","pageTitle":"Runs","pageRoute":"hatchet://docs/reference/typescript/feature-clients/runs","content":"<a id=\"runsclient\"></a>\n\n### Runs Client\n\nThe runs client is a client for interacting with task and workflow runs within Hatchet.\n\n#### Methods\n\n<a id=\"branchdurabletask\"></a>\n\n##### `branchDurableTask()`\n\nFork (reset) a durable task from a specific node, triggering re-execution from that point.\n\nParameters\n\nParameter, Type, Default value, Description\n\n`taskExternalId`, `string`, `undefined`, The external ID of the durable task to reset.\n`nodeId`, `number`, `undefined`, The node ID to replay from.\n`branchId`, `number`, `0`, -\n\nReturns\n\n`Promise`\\<`AxiosResponse`\\<`V1BranchDurableTaskResponse`, `any`, \\{\n\\}\\>\\>\n\n<a id=\"cancel\"></a>\n\n##### `cancel()`\n\nCancels a task or workflow run by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`opts`, `CancelRunOpts`, The options for the cancel operation.\n\nReturns\n\n`Promise`\\<`AxiosResponse`\\<`V1CancelledTasks`, `any`, \\{\n\\}\\>\\>\n\nA promise that resolves to the cancelled run.\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nGets a task or workflow run by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`run`, `string` \\, `WorkflowRunRef`\\<`T`\\>, The ID of the run to get.\n\nReturns\n\n`Promise`\\<`V1WorkflowRunDetails`\\>\n\nA promise that resolves to the run.\n\n<a id=\"get_status\"></a>\n\n##### `get_status()`\n\nGets the status of a task or workflow run by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`run`, `string` \\, `WorkflowRunRef`\\<`T`\\>, The ID of the run to get the status of.\n\nReturns\n\n`Promise`\\<`V1TaskStatus`\\>\n\nA promise that resolves to the status of the run.\n\n<a id=\"gettaskexternalid\"></a>\n\n##### `getTaskExternalId()`\n\nResolve the task external ID for a workflow run. For runs with multiple tasks,\nreturns the first task's external ID.\n\nParameters\n\nParameter, Type, Description\n\n`workflowRunId`, `string`, The workflow run ID to look up.\n\nReturns\n\n`Promise`\\<`string`\\>\n\nThe task external ID.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists all task and workflow runs for the current tenant.\n\nParameters\n\nParameter, Type, Description\n\n`opts?`, `Partial`\\<`ListRunsOpts`\\>, The options for the list operation.\n\nReturns\n\n`Promise`\\<`V1TaskSummaryList`\\>\n\nA promise that resolves to the list of runs.\n\n<a id=\"replay\"></a>\n\n##### `replay()`\n\nReplays a task or workflow run by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`opts`, `ReplayRunOpts`, The options for the replay operation.\n\nReturns\n\n`Promise`\\<`AxiosResponse`\\<`V1ReplayedTasks`, `any`, \\{\n\\}\\>\\>\n\nA promise that resolves to the replayed run.\n\n<a id=\"restoretask\"></a>\n\n##### `restoreTask()`\n\nRestore an evicted durable task so it can resume execution.\n\nParameters\n\nParameter, Type, Description\n\n`taskExternalId`, `string`, The external ID of the evicted task.\n\nReturns\n\n`Promise`\\<`AxiosResponse`\\<`V1RestoreTaskResponse`, `any`, \\{\n\\}\\>\\>\n\n<a id=\"subscribetostream\"></a>\n\n##### `subscribeToStream()`\n\nSubscribes to a stream of events for a task or workflow run by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`workflowRunId`, `string`, The ID of the run to subscribe to.\n\nReturns\n\n`AsyncIterableIterator`\\<`string`\\>\n\nA promise that resolves to the stream of events."},"341":{"title":"Schedules","pageTitle":"Schedules","pageRoute":"hatchet://docs/reference/typescript/feature-clients/schedules","content":"<a id=\"scheduleclient\"></a>\n\n### ScheduleClient\n\nThe scheduled client is a client for managing scheduled workflows within Hatchet\n\n#### Methods\n\n<a id=\"bulkdelete\"></a>\n\n##### `bulkDelete()`\n\nBulk deletes scheduled runs (by explicit IDs and/or a filter).\n\nParameters\n\nParameter, Type, Description\n\n`opts`, \\{ `filter?`: `ScheduledWorkflowsBulkDeleteFilter`; `scheduledRuns?`: (... \\, ...)[]; \\}, Either `scheduledRuns` (ids/objects) and/or a server-side filter.\n`opts.filter?`, `ScheduledWorkflowsBulkDeleteFilter`, -\n`opts.scheduledRuns?`, (... \\, ...)[], -\n\nReturns\n\n`Promise`\\<`ScheduledWorkflowsBulkDeleteResponse`\\>\n\nA promise that resolves to deleted ids + per-id errors.\n\n<a id=\"bulkupdate\"></a>\n\n##### `bulkUpdate()`\n\nBulk updates (reschedules) scheduled runs.\n\nParameters\n\nParameter, Type, Description\n\n`updates`, `object`[], List of id/object + new triggerAt.\n\nReturns\n\n`Promise`\\<`ScheduledWorkflowsBulkUpdateResponse`\\>\n\nA promise that resolves to updated ids + per-id errors.\n\n<a id=\"create\"></a>\n\n##### `create()`\n\nCreates a new Scheduled Run.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `Workflow`, The workflow name or Workflow object.\n`cron`, \\{ `additionalMetadata?`: `Record`\\<`string`, `string`\\>; `input?`: `Record`\\<`string`, `any`\\>; `priority?`: `number`; `triggerAt`: `Date`; \\}, -\n`cron.additionalMetadata?`, `Record`\\<`string`, `string`\\>, -\n`cron.input?`, `Record`\\<`string`, `any`\\>, -\n`cron.priority?`, `number`, -\n`cron.triggerAt`, `Date`, -\n\nReturns\n\n`Promise`\\<`ScheduledWorkflows`\\>\n\nA promise that resolves to the created ScheduledWorkflows object.\n\nThrows\n\nWill throw an error if the input is invalid or the API call fails.\n\n###### Important\n\nThis method is instrumented by HatchetInstrumentor.\\_patchScheduleCreate.\nKeep the signature in sync with the instrumentor wrapper.\n\n<a id=\"delete\"></a>\n\n##### `delete()`\n\nDeletes an existing Scheduled Run.\n\nParameters\n\nParameter, Type, Description\n\n`scheduledRun`, `string` \\, `ScheduledWorkflows`, The Scheduled Run ID as a string or ScheduledWorkflows object.\n\nReturns\n\n`Promise`\\<`void`\\>\n\nA promise that resolves when the Scheduled Run is deleted.\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nRetrieves a specific Scheduled Run by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`scheduledRun`, `string` \\, `ScheduledWorkflows`, The Scheduled Run ID as a string or ScheduledWorkflows object.\n\nReturns\n\n`Promise`\\<`ScheduledWorkflows`\\>\n\nA promise that resolves to the ScheduledWorkflows object.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists all Cron Triggers based on the provided query parameters.\n\nParameters\n\nParameter, Type, Description\n\n`query`, `object` & `object`, Query parameters for listing Scheduled Runs.\n\nReturns\n\n`Promise`\\<`ScheduledWorkflowsList`\\>\n\nA promise that resolves to a ScheduledWorkflowsList object.\n\n<a id=\"update\"></a>\n\n##### `update()`\n\nUpdates (reschedules) an existing Scheduled Run.\n\nParameters\n\nParameter, Type, Description\n\n`scheduledRun`, `string` \\, `ScheduledWorkflows`, The Scheduled Run ID as a string or ScheduledWorkflows object.\n`update`, \\{ `triggerAt`: `Date`; \\}, The update payload (currently only triggerAt).\n`update.triggerAt`, `Date`, -\n\nReturns\n\n`Promise`\\<`ScheduledWorkflows`\\>\n\nA promise that resolves to the updated ScheduledWorkflows object."},"342":{"title":"Webhooks","pageTitle":"Webhooks","pageRoute":"hatchet://docs/reference/typescript/feature-clients/webhooks","content":"<a id=\"webhooksclient\"></a>\n\n### Webhooks Client\n\nClient for managing incoming webhooks in Hatchet.\n\nWebhooks allow external systems to trigger Hatchet workflows by sending\nHTTP requests to dedicated endpoints. This enables real-time integration\nwith third-party services like GitHub, Stripe, Slack, or any system that\ncan send webhook events.\n\n#### Methods\n\n<a id=\"create\"></a>\n\n##### `create()`\n\nCreates a new webhook.\n\nParameters\n\nParameter, Type, Description\n\n`request`, `CreateWebhookOptions`, The request options for the create operation.\n\nReturns\n\n`Promise`\\<`V1Webhook`\\>\n\nA promise that resolves to the created webhook.\n\n<a id=\"delete\"></a>\n\n##### `delete()`\n\nDeletes a webhook by its name.\n\nParameters\n\nParameter, Type, Description\n\n`webhookName`, `string`, The name of the webhook to delete.\n\nReturns\n\n`Promise`\\<`V1Webhook`\\>\n\nA promise that resolves to the deleted webhook.\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nGets a webhook by its name.\n\nParameters\n\nParameter, Type, Description\n\n`webhookName`, `string`, The name of the webhook to get.\n\nReturns\n\n`Promise`\\<`V1Webhook`\\>\n\nA promise that resolves to the webhook.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nLists all webhooks for the current tenant.\n\nParameters\n\nParameter, Type, Description\n\n`options?`, \\{ `limit?`: `number`; `offset?`: `number`; `sourceNames?`: ...[]; `webhookNames?`: ...[]; \\}, The options for the list operation.\n`options.limit?`, `number`, -\n`options.offset?`, `number`, -\n`options.sourceNames?`, ...[], -\n`options.webhookNames?`, ...[], -\n\nReturns\n\n`Promise`\\<`V1WebhookList`\\>\n\nA promise that resolves to the list of webhooks.\n\n<a id=\"update\"></a>\n\n##### `update()`\n\nUpdates a webhook by its name.\n\nParameters\n\nParameter, Type, Description\n\n`webhookName`, `string`, The name of the webhook to update.\n`options`, `Partial`\\<`V1UpdateWebhookRequest`\\>, The options for the update operation.\n\nReturns\n\n`Promise`\\<`V1Webhook`\\>\n\nA promise that resolves to the updated webhook."},"343":{"title":"Workers","pageTitle":"Workers","pageRoute":"hatchet://docs/reference/typescript/feature-clients/workers","content":"<a id=\"workersclient\"></a>\n\n### Workers Client\n\nThe workers client is a client for managing workers programmatically within Hatchet.\n\n#### Methods\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nGet a worker by its ID.\n\nParameters\n\nParameter, Type, Description\n\n`workerId`, `string`, The ID of the worker to get.\n\nReturns\n\n`Promise`\\<`Worker`\\>\n\nA promise that resolves to the worker.\n\n<a id=\"ispaused\"></a>\n\n##### `isPaused()`\n\nCheck if a worker is paused.\n\nParameters\n\nParameter, Type, Description\n\n`workerId`, `string`, The ID of the worker to check.\n\nReturns\n\n`Promise`\\<`boolean`\\>\n\nA promise that resolves to true if the worker is paused, false otherwise.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nList all workers in the tenant.\n\nReturns\n\n`Promise`\\<`WorkerList`\\>\n\nA promise that resolves to the list of workers.\n\n<a id=\"pause\"></a>\n\n##### `pause()`\n\nPause a worker.\n\nParameters\n\nParameter, Type, Description\n\n`workerId`, `string`, The ID of the worker to pause.\n\nReturns\n\n`Promise`\\<`Worker`\\>\n\nA promise that resolves to the paused worker.\n\n<a id=\"unpause\"></a>\n\n##### `unpause()`\n\nUnpause a worker.\n\nParameters\n\nParameter, Type, Description\n\n`workerId`, `string`, The ID of the worker to unpause.\n\nReturns\n\n`Promise`\\<`Worker`\\>\n\nA promise that resolves to the unpaused worker."},"344":{"title":"Workflows","pageTitle":"Workflows","pageRoute":"hatchet://docs/reference/typescript/feature-clients/workflows","content":"<a id=\"workflowsclient\"></a>\n\n### Workflows Client\n\nThe workflows client is a client for managing workflows programmatically within Hatchet.\n\nNOTE: that workflows are the declaration, not the individual runs. If you're looking for runs, use the RunsClient instead.\n\n#### Methods\n\n<a id=\"delete\"></a>\n\n##### `delete()`\n\nDelete a workflow by its name, ID, or object.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `BaseWorkflowDeclaration`\\<`any`, `any`\\> \\, `Workflow`, The workflow name, ID, or object.\n\nReturns\n\n`Promise`\\<`void`\\>\n\nA promise that resolves to the deleted workflow.\n\n<a id=\"get\"></a>\n\n##### `get()`\n\nGet a workflow by its name, ID, or object.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, `string` \\, `BaseWorkflowDeclaration`\\<`any`, `any`\\> \\, `Workflow`, The workflow name, ID, or object.\n\nReturns\n\n`Promise`\\<`Workflow`\\>\n\nA promise that resolves to the workflow.\n\n<a id=\"getworkflowidfromname\"></a>\n\n##### `getWorkflowIdFromName()`\n\nGets the workflow ID from a workflow name, ID, or object.\nIf the input is not a valid UUID, it will look up the workflow by name.\n\nParameters\n\nParameter, Type, Description\n\n`workflow`, \\, `string` \\, `WorkflowDefinition` \\, `BaseWorkflowDeclaration`\\<`any`, `any`\\> \\, `Workflow`, The workflow name, ID, or object.\n\nReturns\n\n`Promise`\\<`string`\\>\n\nThe workflow ID as a string.\n\n<a id=\"list\"></a>\n\n##### `list()`\n\nList all workflows in the tenant.\n\nParameters\n\nParameter, Type, Description\n\n`opts?`, \\{ `limit?`: `number`; `name?`: `string`; `offset?`: `number`; \\}, The options for the list operation.\n`opts.limit?`, `number`, The number to limit by **Format** int **Default** `50`\n`opts.name?`, `string`, Search by name\n`opts.offset?`, `number`, The number to skip **Format** int **Default** `0`\n\nReturns\n\n`Promise`\\<`WorkflowList`\\>\n\nA promise that resolves to the list of workflows."},"345":{"title":"Runnables","pageTitle":"Runnables","pageRoute":"hatchet://docs/reference/typescript/Runnables","content":"# Runnables\n\n`Runnables` in the Hatchet TypeScript SDK are things that can be run, namely tasks and workflows. The two main types of runnables you'll encounter are:\n\n- `WorkflowDeclaration`, returned by `hatchet.workflow(...)`, which lets you define tasks and call `run()`, `schedule()`, `cron()`, etc.\n- `TaskWorkflowDeclaration`, returned by `hatchet.task(...)`, which is a single standalone task that exposes the same execution helpers as a workflow.\n\n<a id=\"taskworkflowdeclaration\"></a>\n\n### TaskWorkflowDeclaration\n\nA standalone task declaration that can be run like a workflow.\n\n`TaskWorkflowDeclaration` is returned by `hatchet.task(...)` and wraps a single\ntask definition while exposing the same execution helpers as workflows, such as\n`run()`, `runNoWait()`, `schedule()`, and `cron()` (inherited from\n`BaseWorkflowDeclaration`).\n\nExample:\n\n```typescript\nconst greet = hatchet.task<{ name: string }, { message: string }>({\n  name: \"greet\",\n  fn: async (input) => ({ message: `Hello, ${input.name}!` }),\n});\n\nawait greet.run({ name: \"World\" });\nconst ref = await greet.runNoWait({ name: \"World\" });\n```\n\n#### Template\n\nExtra fields added to the task fn input by pre-middleware hooks.\n\n#### Methods\n\n<a id=\"cron\"></a>\n\n##### `cron()`\n\nCreates a cron schedule for the task.\n\nParameters\n\nParameter, Type, Description\n\n`name`, `string`, The name of the cron schedule.\n`expression`, `string`, The cron expression defining the schedule.\n`input`, `I` & `GlobalInput`, The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`CronWorkflows`\\>\n\nA promise that resolves with the cron workflow details.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.cron;\n```\n\n<a id=\"delay\"></a>\n\n##### `delay()`\n\nSchedules the task to run after a specified delay.\n\nParameters\n\nParameter, Type, Description\n\n`duration`, `number`, The delay in seconds before the task should run.\n`input`, `I` & `GlobalInput`, The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`ScheduledWorkflows`\\>\n\nA promise that resolves with the scheduled workflow details.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.delay;\n```\n\n<a id=\"run\"></a>\n\n##### `run()`\n\nTriggers a task run and waits for the result.\n\nParameters\n\nParameter, Type, Description\n\n`input`, `I` & `GlobalInput`, The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`O` & `Resolved`\\<`GlobalOutput`, `MiddlewareAfter`\\>\\>\n\nA promise that resolves with the task output merged with post-middleware fields.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.run;\n```\n\n<a id=\"runandwait\"></a>\n\n##### `runAndWait()`\n\nTriggers a task run and waits for the result.\n\nParameters\n\nParameter, Type, Description\n\n`input`, `I` & `GlobalInput`, The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`O` & `Resolved`\\<`GlobalOutput`, `MiddlewareAfter`\\>\\>\n\nA promise that resolves with the task output merged with post-middleware fields.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.runAndWait;\n```\n\n<a id=\"runnowait\"></a>\n\n##### `runNoWait()`\n\nTriggers a task run without waiting for completion.\n\nParameters\n\nParameter, Type, Description\n\n`input`, `I` & `GlobalInput`, The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`WorkflowRunRef`\\<`O` & `Resolved`\\<..., ...\\>\\>\\>\n\nA WorkflowRunRef containing the run ID and methods to get results.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.runNoWait;\n```\n\nTriggers a task run without waiting for completion.\n\nParameters\n\nParameter, Type, Description\n\n`input`, `I` & `GlobalInput`[], The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`WorkflowRunRef`\\<... & ...\\>[]\\>\n\nA WorkflowRunRef containing the run ID and methods to get results.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.runNoWait;\n```\n\n<a id=\"schedule\"></a>\n\n##### `schedule()`\n\nSchedules the task to run at a specific date and time.\n\nParameters\n\nParameter, Type, Description\n\n`enqueueAt`, `Date`, The date when the task should be triggered.\n`input`, `I` & `GlobalInput`, The input data for the task, including global input fields.\n`options?`, `RunOpts`, Optional configuration for this task run.\n\nReturns\n\n`Promise`\\<`ScheduledWorkflows`\\>\n\nA promise that resolves with the scheduled workflow details.\n\nOverrides\n\n```ts\nBaseWorkflowDeclaration.schedule;\n```\n\n---\n\n<a id=\"workflowdeclaration\"></a>\n\n### WorkflowDeclaration\n\nA Hatchet workflow, which lets you define tasks and perform actions on the workflow.\n\nWorkflows in Hatchet represent coordinated units of work that can be triggered,\nscheduled, or run on a cron schedule. Each workflow can contain multiple tasks\nthat can be arranged in dependencies (DAGs), with customized retry behavior,\ntimeouts, concurrency controls, and more.\n\nExample:\n\n```typescript\nimport { hatchet } from \"./hatchet-client\";\n\ntype MyInput = { name: string };\n\nconst workflow = hatchet.workflow({\n  name: \"my-workflow\",\n});\n\nworkflow.task({\n  name: \"greet\",\n  fn: async (input) => {\n    return { message: `Hello, ${input.name}!` };\n  },\n});\n\n// Run the workflow\nawait workflow.run({ name: \"World\" });\n```\n\nWorkflows support various execution patterns, including:\n\n- One-time execution with `run()` and `runNoWait()`\n- Scheduled execution with `schedule()`\n- Cron-based recurring execution with `cron()`\n- Bulk execution by passing an array input to `run()` and `runNoWait()`\n\nTasks within workflows can be defined with `workflow.task()` or\n`workflow.durableTask()` and arranged into complex dependency patterns.\n\n#### Methods\n\n<a id=\"durabletask\"></a>\n\n##### `durableTask()`\n\nAdds a durable task to the workflow.\nThe return type will be either the property on O that corresponds to the task name,\nor if there is no matching property, the inferred return type of the function.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `Omit`\\<`CreateWorkflowTaskOpts`\\<`I`, `TO`\\>, `\"fn\"`\\> & `object`, The task configuration options.\n\nReturns\n\n`CreateWorkflowDurableTaskOpts`\\<`I`, `TO`\\>\n\nThe task options that were added.\n\n<a id=\"onfailure\"></a>\n\n##### `onFailure()`\n\nAdds an onFailure task to the workflow.\nThis will only run if any task in the workflow fails.\n\nParameters\n\nParameter, Type, Description\n\n`options`, \\, `Omit`\\<`CreateOnFailureTaskOpts`\\<..., ...\\>, `\"fn\"`\\> & `object` \\, [`TaskWorkflowDeclaration`](#taskworkflowdeclaration)\\<`any`, `any`, \\{ \\}, \\{ \\}, \\{ \\}, \\{ \\}\\>, The task configuration options.\n\nReturns\n\n`CreateWorkflowTaskOpts`\\<`I`, `TaskOutputType`\\<`O`, `Name`, `L`\\>\\>\n\nThe task options that were added.\n\n<a id=\"onsuccess\"></a>\n\n##### `onSuccess()`\n\nAdds an onSuccess task to the workflow.\nThis will only run if all tasks in the workflow complete successfully.\n\nParameters\n\nParameter, Type, Description\n\n`options`, \\, [`TaskWorkflowDeclaration`](#taskworkflowdeclaration)\\<`any`, `any`, \\{ \\}, \\{ \\}, \\{ \\}, \\{ \\}\\> \\, `Omit`\\<`CreateOnSuccessTaskOpts`\\<..., ...\\>, `\"fn\"`\\> & `object`, The task configuration options.\n\nReturns\n\n`CreateWorkflowTaskOpts`\\<`I`, `TaskOutputType`\\<`O`, `Name`, `L`\\>\\>\n\nThe task options that were added.\n\n<a id=\"task\"></a>\n\n##### `task()`\n\nAdds a task to the workflow.\nThe return type will be either the property on O that corresponds to the task name,\nor if there is no matching property, the inferred return type of the function.\n\nParameters\n\nParameter, Type, Description\n\n`options`, \\, `Omit`\\<`CreateWorkflowTaskOpts`\\<..., ...\\>, `\"fn\"`\\> & `object` \\, [`TaskWorkflowDeclaration`](#taskworkflowdeclaration)\\<`I`, `TO`, \\{ \\}, \\{ \\}, \\{ \\}, \\{ \\}\\>, The task configuration options.\n\nReturns\n\n`CreateWorkflowTaskOpts`\\<`I`, `TO`\\>\n\nThe task options that were added."},"346":{"title":"Functions","pageTitle":"Runnables","pageRoute":"hatchet://docs/reference/typescript/Runnables","content":"<a id=\"createdurabletaskworkflow\"></a>\n\n### `CreateDurableTaskWorkflow()`\n\nCreates a new durable task workflow declaration with types inferred from the function parameter.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `object` & `Omit`\\<`CreateWorkflowDurableTaskOpts`\\<`I`, `O`\\>, `\"fn\"`\\>, The durable task configuration options.\n`client?`, `IHatchetClient`, Optional Hatchet client instance.\n\nReturns\n\n[`TaskWorkflowDeclaration`](#taskworkflowdeclaration)\\<`I`, `O`\\>\n\nA new TaskWorkflowDeclaration with inferred types.\n\n---\n\n<a id=\"createtaskworkflow\"></a>\n\n### `CreateTaskWorkflow()`\n\nCreates a new task workflow declaration with types inferred from the function parameter.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `object` & `Omit`\\<`CreateTaskWorkflowOpts`\\<`I`, `O`\\>, `\"fn\"`\\>, The task configuration options.\n`client?`, `IHatchetClient`, Optional Hatchet client instance.\n\nReturns\n\n[`TaskWorkflowDeclaration`](#taskworkflowdeclaration)\\<`I`, `O`\\>\n\nA new TaskWorkflowDeclaration with inferred types.\n\n---\n\n<a id=\"createworkflow\"></a>\n\n### `CreateWorkflow()`\n\nCreates a new workflow instance.\n\nParameters\n\nParameter, Type, Description\n\n`options`, `CreateWorkflowOpts`, The options for creating the workflow. Optionally include a Zod schema via the `input` field to generate a JSON Schema for the backend.\n`client?`, `IHatchetClient`, Optional Hatchet client instance.\n\nReturns\n\n[`WorkflowDeclaration`](#workflowdeclaration)\\<`I`, `O`\\>\n\nA new Workflow instance."},"347":{"title":"Setup & Installation","pageTitle":"Setup & Installation","pageRoute":"hatchet://docs/reference/cli/index","content":"# Hatchet CLI Setup & Installation\n\n> **Warning:** The Hatchet CLI is currently in beta and may have breaking changes in future\n>   releases.\n\nThe Hatchet CLI is a command-line tool with utilities for running workers locally, interacting with a running Hatchet deployment, and running a local Hatchet instance for development."},"348":{"title":"Features","pageTitle":"Setup & Installation","pageRoute":"hatchet://docs/reference/cli/index","content":"- **Quickstarts**: the `hatchet quickstart` command sets up a local Hatchet instance with a sample project to help you get started quickly.\n\n- **Local worker reloading**: the [`hatchet worker dev`](/cli/running-workers-locally) command lets you run a worker locally with automatic reloading when code changes are detected.\n\n- **A full built-in TUI**: the [`hatchet tui`](/cli/tui) command lets you interact with your Hatchet deployment through a terminal user interface (TUI) that provides real-time observability into tasks, workflows, workers, and more.\n\n- **Profiles**: the [`hatchet profile`](/cli/profiles) commands allow you to manage multiple Hatchet instances and tenants with named profiles, making it easy to switch between different environments."},"349":{"title":"Installation","pageTitle":"Setup & Installation","pageRoute":"hatchet://docs/reference/cli/index","content":"The recommended way to install the Hatchet CLI is via our install script or Homebrew:\n\n#### Native Install (Recommended)\n\n**MacOS, Linux, WSL**\n\n```sh\n    curl -fsSL https://install.hatchet.run/install.sh | bash\n```\n\n#### Homebrew\n\n**MacOS**\n\n```sh\n    brew install hatchet-dev/hatchet/hatchet --cask\n```"},"350":{"title":"Verifying Installation","pageTitle":"Setup & Installation","pageRoute":"hatchet://docs/reference/cli/index","content":"After installation, verify that the Hatchet CLI is installed correctly by checking its version:\n\n```sh\nhatchet --version\n```"},"351":{"title":"Profiles","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"# Profiles\n\nThe Hatchet CLI supports managing multiple Hatchet instances and tenants using named profiles. This feature makes it easy to switch between different environments, such as development, staging, and production."},"352":{"title":"Creating a Profile","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"You can create a new profile using the `hatchet profile add` command. You will need to provide a Hatchet API token for the profile.\n\n```sh\nhatchet profile add\n```\n\nThis command will prompt you to enter the API token followed by the profile name. You can also provide these as flags:\n\n```sh\nhatchet profile add --name [name] --token [token]\n```"},"353":{"title":"Listing Profiles","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"You can list all the profiles you have configured using the `hatchet profile list` command:\n\n```sh\nhatchet profile list\n```\n\nThis will display all configured profiles, with the default profile marked with `(default)` if one is set."},"354":{"title":"Setting a Default Profile","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"You can set a profile as the default using the `hatchet profile set-default` command. The default profile will be automatically used when no profile is specified with the `--profile` flag.\n\n```sh\n# Set default profile interactively (prompts for selection)\nhatchet profile set-default\n\n# Set a specific profile as default\nhatchet profile set-default --name [name]\n```\n\nOnce a default profile is set, you can run commands without specifying the `--profile` flag:\n\n```sh\n# Uses the default profile\nhatchet worker dev\n```\n\nTo unset the default profile:\n\n```sh\nhatchet profile unset-default\n```"},"355":{"title":"Using a Profile","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"To use a specific profile for your Hatchet CLI commands, you can specify the profile name using the `--profile` flag. This overrides the default profile if one is set.\n\n```sh\nhatchet worker dev --profile [name]\n```"},"356":{"title":"Updating a Profile","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"You can update an existing profile using the `hatchet profile update` command. This allows you to change the API token associated with a profile.\n\n```sh\nhatchet profile update\n```"},"357":{"title":"Deleting a Profile","pageTitle":"Profiles","pageRoute":"hatchet://docs/reference/cli/profiles","content":"You can delete a profile using the `hatchet profile remove` command:\n\n```sh\nhatchet profile remove\n```\n\nIf you remove a profile that is set as the default, the default profile setting will be automatically cleared."},"358":{"title":"Running Hatchet Locally","pageTitle":"Running Hatchet Locally","pageRoute":"hatchet://docs/reference/cli/running-hatchet-locally","content":"# Running Hatchet Locally\n\nThe Hatchet CLI provides the `hatchet server` commands to run a local instance of Hatchet for development and testing purposes. This local instance relies on Docker to run the necessary services.\n\nWhen `DOCKER_HOST` is not set, Hatchet resolves the Docker host from the current Docker context. This respects `DOCKER_CONTEXT` when it is set, and otherwise uses the active context from your local Docker configuration. If `DOCKER_HOST` is set explicitly, it takes precedence."},"359":{"title":"Prerequisites","pageTitle":"Running Hatchet Locally","pageRoute":"hatchet://docs/reference/cli/running-hatchet-locally","content":"Before running Hatchet locally, you must have Docker installed on your machine. You can download Docker from [here](https://www.docker.com/get-started)."},"360":{"title":"Starting Hatchet Locally","pageTitle":"Running Hatchet Locally","pageRoute":"hatchet://docs/reference/cli/running-hatchet-locally","content":"To start a local instance of Hatchet, run the following command in your terminal:\n\n```sh\nhatchet server start\n```"},"361":{"title":"Stopping Hatchet Locally","pageTitle":"Running Hatchet Locally","pageRoute":"hatchet://docs/reference/cli/running-hatchet-locally","content":"To stop the local Hatchet instance, run the following command:\n\n```sh\nhatchet server stop\n```"},"362":{"title":"Reference","pageTitle":"Running Hatchet Locally","pageRoute":"hatchet://docs/reference/cli/running-hatchet-locally","content":"#### `hatchet server start`\n\n```txt\nStart a local Hatchet server environment using Docker containers. This command will start both a PostgreSQL database and a Hatchet server instance, automatically creating a local profile for easy access.\n\nUsage:\n  hatchet server start [flags]\n\nExamples:\n  # Start server with default settings (port 8888)\n  hatchet server start\n\n  # Start server with custom dashboard port\n  hatchet server start --dashboard-port 9000\n\n  # Start server with custom ports and project name\n  hatchet server start --dashboard-port 9000 --grpc-port 8077 --project-name my-hatchet\n\n  # Start server with custom profile name\n  hatchet server start --profile my-local\n\nFlags:\n  -d, --dashboard-port int    Port for the Hatchet dashboard (default: auto-detect starting at 8888)\n  -g, --grpc-port int         Port for the Hatchet gRPC server (default: auto-detect starting at 7077)\n  -h, --help                  help for start\n  -n, --profile string        Name for the local profile (default: local) (default \"local\")\n  -p, --project-name string   Docker project name for containers (default: hatchet-cli)\n\nGlobal Flags:\n  -v, --version   The version of the hatchet cli.\n```\n\n#### `hatchet server stop`\n\n```txt\nStop a local Hatchet server environment that was started using Docker containers with the 'hatchet server start' command.\n\nUsage:\n  hatchet server stop [flags]\n\nExamples:\n  # Stop the local Hatchet server\n  hatchet server stop\n\n  # Stop the local Hatchet server with a custom project name\n  hatchet server stop --project-name my-hatchet\n\nFlags:\n  -h, --help                  help for stop\n  -p, --project-name string   Docker project name for containers (default: hatchet-cli)\n\nGlobal Flags:\n  -v, --version   The version of the hatchet cli.\n```"},"363":{"title":"Running Workers Locally","pageTitle":"Running Workers Locally","pageRoute":"hatchet://docs/reference/cli/running-workers-locally","content":"# Running Workers Locally\n\nThe Hatchet CLI provides the `hatchet worker` commands to run Hatchet workers locally for development and testing purposes."},"364":{"title":"Setting up a hatchet.yaml file","pageTitle":"Running Workers Locally","pageRoute":"hatchet://docs/reference/cli/running-workers-locally","content":"> **Info:** If you've set up a project using `hatchet quickstart`, a `hatchet.yaml` file\n>   is already created for you in the project directory.\n\nThe `hatchet worker` commands rely on a `hatchet.yaml` configuration file to define the worker settings. You can create a `hatchet.yaml` file in your project directory which resembles the following (you will need to adjust the `preCmds` and `runCmd` fields to match your project's setup):\n\n#### Python\n\n```yaml\ndev:\n  preCmds: [\"poetry install\"]\n  runCmd: \"poetry run python src/worker.py\"\n  files:\n    - \"**/*.py\"\n    - \"!**/__pycache__/**\"\n    - \"!**/.venv/**\"\n  reload: true\n```\n\n#### Typescript\n\n```yaml\ndev:\n  preCmds: [\"pnpm install\"]\n  runCmd: \"pnpm start\"\n  files:\n    - \"**/*.ts\"\n    - \"!**/node_modules/**\"\n  reload: true\n```\n\n#### Go\n\n```yaml\ndev:\n  preCmds: [\"go mod download\"]\n  runCmd: \"go run ./cmd/worker\"\n  files:\n    - \"**/*.go\"\n  reload: true\n```"},"365":{"title":"Running a worker","pageTitle":"Running Workers Locally","pageRoute":"hatchet://docs/reference/cli/running-workers-locally","content":"Once you have a `hatchet.yaml` file set up, you can run a worker locally using the following command:\n\n```sh\nhatchet worker dev\n```\n\nTo run a worker with a specific profile, you can run:\n\n```sh\nhatchet worker dev --profile <profile-name>\n```\n\n### Disabling auto-reload\n\nIf you want to run the worker without auto-reloading on file changes, you can set the `dev.reload` field to `false` in your `hatchet.yaml` file:\n\n```yaml\ndev:\n  reload: false\n```\n\nOr you can pass the `--no-reload` flag when running the worker:\n\n```sh\nhatchet worker dev --no-reload\n```\n\n### Overriding the run command\n\nYou can override the `runCmd` specified in the `hatchet.yaml` file by using the `--run-cmd` flag:\n\n```sh\nhatchet worker dev --run-cmd \"npm run dev\"\n```"},"366":{"title":"Triggering Workflows","pageTitle":"Triggering Workflows","pageRoute":"hatchet://docs/reference/cli/triggering-workflows","content":"# Triggering Workflows\n\nYou can use the `hatchet trigger` command to trigger workflows locally for testing and development purposes. This command allows you to set up triggers in your `hatchet.yaml` file that define how to run specific workflows."},"367":{"title":"Example","pageTitle":"Triggering Workflows","pageRoute":"hatchet://docs/reference/cli/triggering-workflows","content":"In your `hatchet.yaml` file, you can define a trigger for a simple workflow like this:\n\n```yaml\ntriggers:\n  - name: \"simple\"\n    command: \"poetry run python src/run.py\"\n    description: \"Trigger a simple workflow\"\n```\n\nThen, you can select this trigger when running the `hatchet trigger` command:\n\n```sh\nhatchet trigger simple\n```\n\nOr just `hatchet trigger`, which will prompt you to select a trigger interactively."},"368":{"title":"Using the Hatchet TUI","pageTitle":"Using the Hatchet TUI","pageRoute":"hatchet://docs/reference/cli/tui","content":"# Using the Hatchet TUI\n\nThe Hatchet CLI includes a built-in terminal user interface (TUI) that you can use to interact with your Hatchet deployment directly from the terminal. The TUI provides real-time observability into tasks, workflows, workers, and more:\n\n```sh\nhatchet tui\n```\n\n# Features\n\n> **Info:** You can access help documentation by pressing the `h` key within the TUI. This\n>   will display a list of available commands and their descriptions."},"369":{"title":"Runs View","pageTitle":"Using the Hatchet TUI","pageRoute":"hatchet://docs/reference/cli/tui","content":"The Runs view provides a similar experience to the Runs page in the Hatchet dashboard. You can filter and list runs based on filters.\n\n<video controls preload=\"auto\">\n  <source src=\"../../../public/cli/runs_view.mp4\" type=\"video/mp4\" />\n  Your browser does not support the video tag\n</video>"},"370":{"title":"Workflows View","pageTitle":"Using the Hatchet TUI","pageRoute":"hatchet://docs/reference/cli/tui","content":"The Workflows view allows you to see the list of workflows defined in your Hatchet deployment. You can view some details about each workflow, along with recent runs:\n\n<video controls preload=\"auto\">\n  <source src=\"../../../public/cli/workflows_view.mp4\" type=\"video/mp4\" />\n  Your browser does not support the video tag\n</video>"},"371":{"title":"Workers View","pageTitle":"Using the Hatchet TUI","pageRoute":"hatchet://docs/reference/cli/tui","content":"The Workers view shows the status of workers connected to your Hatchet deployment. You can see which workers are online, their registered workflows, and some other information.\n\n<video controls preload=\"auto\">\n  <source src=\"../../../public/cli/workers_view.mp4\" type=\"video/mp4\" />\n  Your browser does not support the video tag\n</video>"},"372":{"title":"Contributing","pageTitle":"Contributing","pageRoute":"hatchet://docs/contributing/index","content":"# Contributing\n\n> **Note:** this guide### Setup\n\n1. Start the Database and Queue services:\n\n```sh\ntask start-db\n```\n\n2. Install dependencies, run migrations, generate encryption keys, and seed the database:\n\n```sh\ntask setup\n```\n\n### Starting the dev server\n\nStart the Hatchet engine, API server, dashboard, and Prisma studio:\n\n```sh\ntask start-dev # or task start-dev-tmux if you want to use tmux panes\n```\n\n### Creating and testing workflows\n\nTo create and test workflows, run the examples in the `./examples` directory.\n\nYou will need to add the tenant (output from the `task seed-dev` command) to the `.env` file in each example directory. An example `.env` file for the `./examples/simple` directory can be generated via:\n\n```sh\nalias get_token='go run ./cmd/hatchet-admin token create --name local --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52'\n\ncat > ./examples/simple/.env <\n\n# optional\nOTEL_EXPORTER_OTLP_HEADERS=<optional-headers>\n\n# optional\nOTEL_EXPORTER_OTLP_ENDPOINT=<collector-url>\n```\n\n### CloudKMS\n\nCloudKMS can be used to generate master encryption keys:\n\n```\ngcloud kms keyrings create \"development\" --location \"global\"\ngcloud kms keys create \"development\" --location \"global\" --keyring \"development\" --purpose \"encryption\"\ngcloud kms keys list --location \"global\" --keyring \"development\"\n```\n\nFrom the last step, copy the Key URI and set the following environment variable:\n\n```\nSERVER_ENCRYPTION_CLOUDKMS_KEY_URI=gcp-kms://projects//locations/global/keyRings/development/cryptoKeys/development\n```\n\nGenerate a service account in GCP which can encrypt/decrypt on CloudKMS, then download a service account JSON file and set it via:\n\n```\nSERVER_ENCRYPTION_CLOUDKMS_CREDENTIALS_JSON='{...}'\n```"},"373":{"title":"Issues","pageTitle":"Contributing","pageRoute":"hatchet://docs/contributing/index","content":"### Query engine leakage\n\nSometimes the spawned query engines from Prisma don't get killed when hot reloading. You can run `task kill-query-engines` on OSX to kill the query engines.\n\nMake sure you call `.Disconnect` on the database config object when writing CLI commands which interact with the database. If you don't, and you try to wrap these CLI commands in a new command, it will never exit, for example:\n\n```\nexport HATCHET_CLIENT_TOKEN=\"$(go run ./cmd/hatchet-admin token create --tenant-id <tenant>)\"\n```"},"374":{"title":"Setup","pageTitle":"GitHub App Setup","pageRoute":"hatchet://docs/contributing/github-app-setup","content":"### Using `ngrok`\n\nYou can use `ngrok` to expose a local port to the internet to accept incoming webhooks from Github. To do this, run the following:\n\n```sh\ntask start-ngrok\n```\n\nMake note of the `https` URL as you will need it later.\n\n### Github App Creation\n\nTo create a Github app that can read from your repositories, navigate to your organization settings page (alternately, you can navigate to your personal settings page) and select **Developer Settings** in the sidebar. Go to **Github Apps** and select **New Github App**. You should use the following settings:\n\n- Homepage URL: you can set this as https://hatchet.run, or some other domain for your organization.\n- Callback URL: `<protocol>://<your-domain>/api/v1/users/github-app/callback`\n- The **Request user authorization (OAuth) during installation** checkbox should be checked.\n- Webhook URL: `<protocol>://<your-https-ngrok-address>/api/v1/github/webhook`\n- Webhook secret: generate a random webhook secret for your domain, for example by running `cat /dev/urandom | base64 | head -c 32`. **Make note of this secret, as you will need it later**.\n- Permissions:\n  - **Repository:**\n    - **Checks (Read & write)**: required to write Github checks for each commit/PR.\n    - **Contents (Read):** required for Hatchet to read files from the repository.\n    - **Metadata (Read-only):** mandatory, required for Github apps that integrate with repositories.\n    - **Pull Requests (Read & write):** required for Hatchet to add comments to Github PRs, and to create PRs.\n    - **Webhooks (Read & write):** required for Hatchet to create a Github repository webhooks that notify the Hatchet instance when PRs are updated.\n  - **Account:**\n    - **Email addresses (read-only)**: required for Hatchet to read your Github email address for authentication.\n\n### Creating a Secret and Private Key\n\nAfter creating the Github App, create the following:\n\n- In the \"Client secrets\" section, select **Generate a new client secret**. You will need this secret in the following section.\n- In the \"Private keys\" section, download a new private key for your app. You will need this private key in the following section.\n\n### Private Keys and Environment Variables\n\nAfter creating the private key, you can place it somewhere in your filesystem and set the `SERVER_VCS_GITHUB_APP_SECRET_PATH` environment variable to the path of the private key.\n\nMake sure the following environment variables are set:\n\n```txt\nSERVER_VCS_KIND=github\nSERVER_VCS_GITHUB_ENABLED=true\nSERVER_VCS_GITHUB_APP_CLIENT_ID=<client-id>\nSERVER_VCS_GITHUB_APP_CLIENT_SECRET=<client-secret>\nSERVER_VCS_GITHUB_APP_NAME=<app-name>\nSERVER_VCS_GITHUB_APP_WEBHOOK_SECRET=<webhook-secret>\nSERVER_VCS_GITHUB_APP_WEBHOOK_URL=<webhook-url>\nSERVER_VCS_GITHUB_APP_ID=<app-id>\nSERVER_VCS_GITHUB_APP_SECRET_PATH=<path-to-pem-file>\n```"},"375":{"title":"SDKs","pageTitle":"SDKs","pageRoute":"hatchet://docs/contributing/sdks","content":"# SDKs\n\nThis document tracks the feature support of the various SDKs, and aims to consolidate the expected behavior around environment variables and configuration loading."},"376":{"title":"Environment Variables","pageTitle":"SDKs","pageRoute":"hatchet://docs/contributing/sdks","content":"Each SDK should support the following environment variables:\n\nVariable, Description, Required, Default\n\n`HATCHET_CLIENT_TOKEN`, The tenant-scoped API token to use., Yes, N/A\n`HATCHET_CLIENT_HOST_PORT`, The host and port of the Hatchet server to connect to, in `host:port` format. SDKs should handle schemes and trailing slashes, i.e. `https://host:port, No, Automatically detected in new tokens.\n`HATCHET_CLIENT_TLS_STRATEGY`, The TLS strategy to use. Valid values are `none`, `tls`, and `mtls`., No, `tls`\n`HATCHET_CLIENT_TLS_CERT_FILE`, The path to the TLS client certificate file to use., Only if strategy is set to `mtls`, N/A\n`HATCHET_CLIENT_TLS_CERT`, The TLS client key file to use., Only if strategy is set to `mtls`, N/A\n`HATCHET_CLIENT_TLS_KEY_FILE`, The path to the TLS client key file to use., Only if strategy is set to `mtls`, N/A\n`HATCHET_CLIENT_TLS_KEY`, The TLS client key to use., Only if strategy is set to `mtls`, N/A\n`HATCHET_CLIENT_TLS_ROOT_CA_FILE`, The path to the TLS root CA file to use., Only if the server certificate is not signed by a public authority that's available to your environment, N/A\n`HATCHET_CLIENT_TLS_ROOT_CA`, The TLS root CA to use., Only if the server certificate is not signed by a public authority that's available to your environment, N/A\n`HATCHET_CLIENT_TLS_SERVER_NAME`, The TLS server name to use., No, Defaults to the `host` of the `host:port`\n\nThe following environment variables are deprecated:\n\nVariable, Description, Explanation\n\n`HATCHET_CLIENT_TENANT_ID`, The tenant ID to use., This is now part of the token."},"377":{"title":"Compatibility Matrices","pageTitle":"SDKs","pageRoute":"hatchet://docs/contributing/sdks","content":"### DAGs\n\nWhether the SDKs support full DAG-style execution.\n\nSDK, DAGs?, Notes\n\nGo SDK, Yes\nPython SDK, Yes\nTypescript SDK, Yes\n\n### Timeouts\n\nWhether the SDKs support setting timeouts and cancelling after timeouts.\n\nSDK, Timeouts?, Step cancellation?, Notes\n\nGo SDK, Yes, Yes\nPython SDK, Yes, Yes, If thread is blocking, this won't be respected\nTypescript SDK, Yes, Unknown\n\n### Middleware\n\nWhether the SDKs support setting middleware.\n\nSDK, Middleware?, Notes\n\nGo SDK, Yes\nPython SDK, No\nTypescript SDK, No\n\n### Separately Registering and Calling Actions\n\nWhether the SDKs support separately registering and calling actions, instead of defining them inline in the workflows.\n\nSDK, Supported?, Notes\n\nGo SDK, Yes\nPython SDK, No\nTypescript SDK, No\n\n### Custom Services\n\nWhether the SDKs support defining services to logically separate workflows and actions.\n\nSDK, Supported?, Notes\n\nGo SDK, Yes\nPython SDK, No\nTypescript SDK, No\n\n### Scheduled Workflows\n\nWhether the SDKs support defining scheduled workflows.\n\nSDK, Supported?, Notes\n\nGo SDK, Yes\nPython SDK, No\nTypescript SDK, No"}},"dirtCount":0,"index":[["~5",{"1":{"288":1}}],["~48",{"1":{"288":1}}],["~40",{"1":{"288":1}}],["~220",{"1":{"288":1}}],["~2",{"1":{"288":2}}],["~25ms",{"1":{"177":1}}],["qos=200",{"1":{"295":1}}],["qos",{"1":{"261":1}}],["qos`",{"1":{"261":1}}],["quantile",{"1":{"268":1}}],["query",{"1":{"146":2,"174":1,"196":1,"294":2,"314":4,"328":2,"334":2,"338":1,"341":2,"373":4},"2":{"328":1}}],["querying",{"1":{"128":1,"173":1}}],["queries`",{"1":{"255":1}}],["queries",{"1":{"79":1,"130":1,"231":1,"255":1,"268":4,"294":1,"298":1,"300":1}}],["questions",{"0":{"208":1},"1":{"38":1,"208":2}}],["queuer",{"1":{"268":3}}],["queuesteprunbuffer",{"1":{"294":4}}],["queues",{"0":{"73":1},"1":{"30":1,"82":1,"87":1,"123":1,"129":2,"210":1}}],["queueing",{"1":{"29":1,"288":1}}],["queue",{"0":{"261":1},"1":{"18":1,"22":1,"49":1,"64":1,"65":1,"73":5,"75":1,"83":1,"124":1,"128":1,"129":2,"130":1,"144":2,"169":1,"175":1,"223":1,"225":2,"255":4,"261":3,"263":4,"268":12,"294":2,"295":2,"297":1,"314":10,"338":2,"372":1}}],["queued",{"0":{"169":1},"1":{"17":1,"75":1,"104":1,"109":1,"112":2,"123":2,"124":7,"129":3,"130":4,"174":1,"268":10,"314":4,"338":1},"2":{"130":3}}],["queuing",{"1":{"0":2,"74":1,"175":1}}],["quick",{"0":{"166":1,"180":1},"1":{"90":1,"104":1,"178":1,"216":1}}],["quickstarts",{"1":{"348":1}}],["quickstart`",{"1":{"7":1,"348":1,"364":1}}],["quickstart",{"0":{"7":1,"227":1,"232":1,"234":1,"238":1},"1":{"6":2,"7":8,"8":1,"178":1,"184":2,"218":1,"224":2,"227":1,"232":1},"3":{"7":1,"8":1}}],["quickly",{"1":{"4":1,"6":1,"150":1,"225":1,"226":1,"348":1}}],["⚠️",{"1":{"254":1,"258":1,"260":4}}],["↔",{"1":{"177":1}}],["❌",{"1":{"153":1}}],["✅",{"1":{"153":1}}],["71",{"1":{"283":2,"285":2}}],["78",{"1":{"278":3}}],["7",{"1":{"268":2,"288":1}}],["7070`",{"1":{"234":1,"238":1,"254":1}}],["7070",{"1":{"227":2,"234":1,"238":1,"241":2,"243":2}}],["7077",{"1":{"225":9,"227":2,"230":1,"294":1,"362":1}}],["707d0855",{"1":{"129":1,"148":1,"228":1,"243":1,"268":1,"372":1}}],["7200",{"1":{"35":1}}],["youtube",{"1":{"325":1}}],["yourdomain",{"1":{"304":3},"2":{"304":1}}],["yourself",{"1":{"204":1}}],["yml",{"1":{"225":4,"227":3,"231":1},"2":{"227":1}}],["yml`",{"1":{"225":2,"226":1,"227":1,"278":1,"285":1}}],["ymir",{"1":{"5":1}}],["yields",{"1":{"328":1}}],["yield",{"1":{"153":4,"160":2,"328":1,"330":4}}],["yarn",{"1":{"127":7},"2":{"127":1}}],["yaml`",{"1":{"241":1,"243":1,"245":1,"364":3,"365":3,"366":1,"367":1}}],["yaml",{"0":{"364":1},"1":{"127":2,"238":2,"268":1,"278":3},"2":{"127":1,"238":1,"278":1}}],["y",{"1":{"127":2}}],["yesterday",{"1":{"266":4}}],["yes",{"1":{"62":4,"154":3,"158":3,"376":1,"377":12}}],["<tenant>",{"1":{"373":1}}],["<video",{"1":{"369":1,"370":1,"371":1}}],["<profile",{"1":{"365":1}}],["<password>",{"1":{"245":2}}],["<`globaloutput`",{"1":{"345":2}}],["<`globalinput`",{"1":{"332":3}}],["<`boolean`",{"1":{"343":1}}],["<`listrunsopts`",{"1":{"340":1}}],["<`l`",{"1":{"333":1}}],["<`ratelimitlist`",{"1":{"339":1}}],["<`record`",{"1":{"333":2}}],["<`v1updatewebhookrequest`",{"1":{"342":1}}],["<`v1webhooklist`",{"1":{"342":1}}],["<`v1webhook`",{"1":{"342":4}}],["<`v1workflowrundetails`",{"1":{"340":1}}],["<`v1restoretaskresponse`",{"1":{"340":1}}],["<`v1replayedtasks`",{"1":{"340":1}}],["<`v1tasksummarylist`",{"1":{"340":1}}],["<`v1taskstatus`",{"1":{"340":1}}],["<`v1cancelledtasks`",{"1":{"340":1}}],["<`v1branchdurabletaskresponse`",{"1":{"340":1}}],["<`v1loglinelist`",{"1":{"336":1}}],["<`v1filterlist`",{"1":{"335":1}}],["<`v1filter`",{"1":{"335":4}}],["<`void`",{"1":{"333":3,"334":1,"341":1,"344":1}}],["<`cronworkflowslist`",{"1":{"334":1}}],["<`cronworkflows`",{"1":{"334":2,"345":1}}],["<`createworkflowdurabletaskopts`",{"1":{"346":1}}],["<`createworkflowtaskopts`",{"1":{"345":2}}],["<`createonsuccesstaskopts`",{"1":{"345":1}}],["<`createonfailuretaskopts`",{"1":{"345":1}}],["<`createtaskworkflowopts`",{"1":{"332":1,"346":1}}],["<`createdurabletaskworkflowopts`",{"1":{"332":1}}],["<`taskstatusmetrics`",{"1":{"338":1}}],["<`taskstats`",{"1":{"338":1}}],["<`tenantsteprunqueuemetrics`",{"1":{"338":1}}],["<`typeof`",{"1":{"333":1}}],["<`t`",{"1":{"333":2,"340":2}}],["<`scheduledworkflowslist`",{"1":{"341":1}}],["<`scheduledworkflows`",{"1":{"341":3,"345":2}}],["<`scheduledworkflowsbulkupdateresponse`",{"1":{"341":1}}],["<`scheduledworkflowsbulkdeleteresponse`",{"1":{"341":1}}],["<`sleepresult`",{"1":{"333":2}}],["<`string`",{"1":{"333":5,"334":4,"338":1,"339":1,"340":2,"341":4,"344":1}}],["<`date`",{"1":{"333":1}}],["<`q`",{"1":{"333":5}}],["<`axiosresponse`",{"1":{"340":4}}],["<`arraybufferlike`",{"1":{"333":1}}],["<`any`",{"1":{"333":2,"334":1,"344":3,"345":2}}],["<`p`",{"1":{"333":6}}],["<`workflowlist`",{"1":{"344":1}}],["<`workflow`",{"1":{"344":1}}],["<`workflowrunref`",{"1":{"332":1,"333":2,"345":2}}],["<`workerlist`",{"1":{"343":1}}],["<`workerlabels`",{"1":{"333":1}}],["<`worker`",{"1":{"332":1,"343":3}}],["<`o`",{"1":{"332":5,"345":5}}],["<`i`",{"1":{"332":12,"345":6,"346":5}}],["<`unknown`",{"1":{"332":1}}],["<a",{"1":{"332":24,"333":40,"334":5,"335":6,"336":2,"337":8,"338":5,"339":3,"340":10,"341":8,"342":6,"343":6,"344":5,"345":12,"346":3}}],["<migration",{"1":{"279":1}}],["<source",{"1":{"369":1,"370":1,"371":1}}],["<secret2>",{"1":{"252":1}}],["<secret1>",{"1":{"252":1}}],["<script",{"1":{"127":1}}],["<credentials",{"1":{"252":1}}],["<base64",{"1":{"252":3}}],["<db",{"1":{"245":2}}],["<host>",{"1":{"245":2}}],["<user>",{"1":{"245":2}}],["<<~text",{"1":{"160":1}}],["<=",{"1":{"117":4}}],["<",{"1":{"59":4,"60":2,"68":2,"104":2,"105":3,"135":1,"136":1,"142":2,"160":2,"173":4,"238":4,"294":1,"345":5,"369":1,"370":1,"371":1,"372":1}}],["<your",{"1":{"21":1,"374":2}}],["zac",{"1":{"325":1}}],["zero",{"1":{"57":1,"61":1,"109":1}}],["zones",{"1":{"248":1}}],["zone",{"1":{"35":1,"37":2,"42":2}}],["zod",{"1":{"20":1,"333":1,"346":1}}],["xyz",{"1":{"136":1}}],["x",{"1":{"46":2,"216":3}}],["8af4",{"1":{"305":1}}],["8aa0fc74b3e5",{"1":{"46":1}}],["8aa0fc74b3e5`",{"1":{"46":1}}],["8gb",{"1":{"292":1}}],["8361e398",{"1":{"305":1}}],["83",{"1":{"287":1}}],["8xlarge`",{"1":{"286":1}}],["8888",{"1":{"225":8,"362":2}}],["8",{"1":{"115":1,"225":2,"227":1,"268":2,"286":1}}],["8077",{"1":{"362":1}}],["80",{"1":{"227":1}}],["8080`",{"1":{"234":1,"254":1}}],["8080",{"1":{"227":4,"234":1,"241":2,"243":4,"253":1}}],["80ab",{"1":{"129":1,"148":1,"228":1,"243":1,"268":1,"372":1}}],["8000",{"1":{"162":2}}],["8001",{"1":{"146":4}}],["800",{"1":{"74":1,"78":1,"287":1}}],["8023",{"1":{"52":1}}],["8s",{"1":{"60":3}}],["8601",{"1":{"196":2}}],["86",{"1":{"35":1,"72":1}}],["$server",{"1":{"241":10}}],["$stdout",{"1":{"142":1}}],["$1",{"1":{"238":1}}],["$container",{"1":{"234":2,"238":1}}],["$pod",{"1":{"234":4,"238":2}}],["$namespace",{"1":{"234":6,"238":3}}],["$",{"1":{"43":1,"59":1,"84":1,"142":4,"157":1,"214":2,"216":3,"219":3,"220":2,"221":1,"238":3,"241":1,"294":1,"345":2,"373":1}}],["│",{"1":{"38":15}}],["┌─────────────",{"1":{"38":6}}],["6c6c4ac95353",{"1":{"305":1}}],["69",{"1":{"238":1}}],["64",{"1":{"231":1}}],["65",{"1":{"50":1}}],["6",{"1":{"38":5,"225":2,"227":1,"231":1,"268":2,"288":1}}],["600",{"1":{"65":1,"287":1}}],["60s",{"1":{"64":1,"289":4}}],["60",{"1":{"35":6,"84":7,"113":1,"118":1,"241":3,"268":1,"287":1,"289":1}}],["4e97",{"1":{"305":1}}],["4e1f",{"1":{"129":1,"148":1,"228":1,"243":1,"268":1,"372":1}}],["4xlarge`",{"1":{"290":2}}],["4xx",{"1":{"61":1}}],["443",{"1":{"240":2,"241":2}}],["4mb",{"1":{"167":1}}],["4907",{"1":{"146":1}}],["456",{"1":{"129":1,"154":1}}],["451b",{"1":{"52":1}}],["47",{"1":{"109":1}}],["42",{"1":{"325":4}}],["422",{"1":{"62":1}}],["429`",{"1":{"62":2}}],["429=false",{"1":{"62":1}}],["429",{"1":{"62":3}}],["409",{"1":{"62":1}}],["403",{"1":{"62":1}}],["401",{"1":{"62":1}}],["404",{"1":{"62":2}}],["400",{"1":{"35":1,"62":1,"72":1,"287":1}}],["4s",{"1":{"60":3}}],["4d4c",{"1":{"46":2}}],["4",{"1":{"37":1,"42":1,"57":1,"65":2,"66":2,"69":1,"74":1,"75":1,"76":1,"81":1,"82":2,"84":3,"107":1,"123":1,"124":1,"127":1,"132":1,"133":1,"136":2,"149":2,"153":1,"164":1,"166":1,"210":1,"216":1,"238":1,"249":1,"251":1,"268":2,"276":1,"278":1,"284":1,"292":1}}],["05",{"1":{"296":1}}],["0kb",{"1":{"289":1}}],["0d47a1",{"1":{"173":1}}],["0`",{"1":{"146":1,"258":1,"265":1,"272":1,"283":2,"291":1}}],["00z",{"1":{"129":2}}],["000+",{"1":{"175":1}}],["000",{"1":{"5":1}}],["01",{"1":{"129":2}}],["0",{"1":{"35":3,"38":6,"39":1,"40":7,"43":2,"50":2,"55":2,"57":1,"60":2,"62":1,"66":2,"68":1,"72":2,"84":6,"89":2,"104":4,"105":6,"115":1,"116":2,"117":9,"135":1,"136":1,"141":1,"142":5,"145":1,"146":21,"149":4,"160":5,"221":3,"225":8,"227":8,"234":6,"238":3,"240":8,"241":9,"253":4,"254":4,"255":6,"266":2,"268":1,"283":1,"285":2,"289":1,"294":4,"298":2,"328":1,"331":1}}],["9000",{"1":{"362":2}}],["9090",{"1":{"268":1}}],["9090`",{"1":{"264":1}}],["95",{"1":{"268":1}}],["95th",{"1":{"268":1}}],["9999",{"1":{"268":1}}],["92ec",{"1":{"46":2}}],["9",{"1":{"35":1,"38":2,"268":2}}],["2k",{"1":{"289":1}}],["2xlarge`",{"1":{"286":1,"290":1}}],["2px",{"1":{"173":3}}],["2`",{"1":{"167":1,"323":4,"324":4}}],["27",{"1":{"146":1}}],["21",{"1":{"127":2,"331":1}}],["22",{"1":{"127":3}}],["22c55e",{"1":{"101":3}}],["250",{"1":{"209":1}}],["256",{"1":{"136":1,"157":3}}],["25",{"1":{"84":2,"127":2,"129":2}}],["2023",{"1":{"325":1}}],["20241023223039",{"1":{"284":1}}],["2024",{"1":{"129":2}}],["2025",{"1":{"35":1}}],["20",{"1":{"78":1,"160":2,"167":1}}],["2000",{"1":{"160":1,"287":1,"288":2,"289":2,"290":1}}],["200",{"1":{"112":1,"142":2,"146":1,"160":2}}],["200+rand",{"1":{"74":1,"78":1}}],["200ms",{"1":{"74":1,"78":1}}],["2s",{"1":{"60":3}}],["23",{"1":{"38":1,"88":1,"89":1,"109":1,"266":1}}],["244",{"1":{"146":1}}],["24",{"1":{"35":1,"88":1,"89":1,"109":2,"123":1,"124":1,"196":1}}],["26`",{"1":{"286":1,"290":1}}],["26",{"1":{"35":1,"278":3}}],["2",{"0":{"278":1},"1":{"32":1,"35":1,"37":1,"38":1,"39":3,"42":1,"46":2,"49":1,"54":1,"57":1,"59":4,"60":3,"61":1,"62":1,"64":1,"65":2,"66":1,"68":2,"69":1,"72":1,"74":2,"75":1,"76":1,"78":1,"80":1,"81":7,"82":6,"83":1,"84":3,"94":1,"95":2,"101":1,"102":1,"105":3,"107":1,"118":2,"123":1,"124":1,"126":1,"127":5,"132":2,"133":2,"136":2,"142":1,"145":2,"146":5,"149":2,"153":1,"160":3,"164":2,"166":1,"167":1,"168":1,"169":1,"170":1,"186":1,"198":3,"209":2,"216":1,"218":1,"224":1,"228":1,"240":2,"249":2,"253":1,"268":3,"276":1,"278":1,"279":1,"281":1,"284":1,"290":2,"294":1,"323":2,"324":2,"326":2,"331":1,"372":1}}],["11",{"1":{"268":2}}],["1e3",{"1":{"268":1}}],["19",{"1":{"251":1}}],["197+",{"1":{"250":1}}],["1976d2",{"1":{"173":1}}],["172",{"1":{"251":1}}],["1h",{"1":{"241":3,"268":2}}],["1d",{"1":{"241":1,"333":1}}],["1gi",{"1":{"289":2}}],["1g",{"1":{"231":1}}],["1b5e20",{"1":{"173":1}}],["16",{"1":{"157":1,"238":2}}],["185833",{"1":{"305":1}}],["18782",{"1":{"146":1}}],["18",{"1":{"127":3}}],["13",{"1":{"127":2,"268":1}}],["1s",{"1":{"121":1}}],["1mb",{"1":{"177":1}}],["1m",{"1":{"113":1,"118":1,"333":1}}],["1`",{"1":{"38":1,"253":1,"254":1,"255":3,"290":1,"323":2,"324":2}}],["127",{"1":{"253":1,"294":1,"298":1}}],["123",{"1":{"129":1,"153":2,"154":1}}],["1234",{"1":{"112":3,"149":2}}],["12",{"1":{"35":1,"38":1,"40":3,"141":1,"142":3,"268":1}}],["15673",{"1":{"227":1}}],["15672",{"1":{"225":2,"227":1}}],["15t10",{"1":{"129":2}}],["150",{"1":{"129":1}}],["15000",{"1":{"65":1,"66":1}}],["15s",{"1":{"66":2}}],["15",{"1":{"35":1,"38":2,"39":2,"65":1,"66":1,"129":1,"146":2,"225":2,"227":1,"231":1,"287":1}}],["14",{"1":{"35":1}}],["1",{"0":{"277":1},"1":{"32":1,"35":1,"37":1,"38":4,"42":1,"43":2,"46":2,"49":1,"54":1,"57":1,"61":5,"62":1,"64":1,"65":2,"66":1,"68":3,"69":1,"72":1,"74":5,"75":1,"76":1,"78":3,"80":1,"81":5,"82":9,"83":4,"84":6,"88":1,"89":2,"94":6,"95":2,"96":1,"101":1,"102":1,"104":5,"105":6,"107":1,"109":1,"110":4,"113":9,"116":1,"117":12,"118":7,"120":3,"121":1,"123":1,"124":1,"126":1,"127":6,"130":1,"132":1,"133":3,"141":1,"142":3,"145":2,"146":7,"149":2,"155":1,"160":1,"164":2,"166":1,"167":1,"168":1,"169":1,"170":1,"198":2,"209":2,"214":1,"216":1,"218":1,"224":2,"228":1,"238":1,"240":2,"241":1,"251":3,"253":2,"254":1,"268":3,"276":1,"278":1,"279":2,"281":1,"284":1,"289":1,"294":3,"296":1,"298":1,"301":1,"305":1,"322":2,"323":4,"324":6,"326":1,"331":2,"333":2,"372":1}}],["1024",{"1":{"157":1}}],["10m",{"1":{"109":1,"112":1,"116":1}}],["10",{"1":{"60":5,"65":4,"66":3,"68":5,"81":3,"82":3,"95":1,"110":2,"129":1,"130":1,"135":2,"136":3,"138":3,"142":2,"146":2,"160":4,"162":1,"175":1,"209":1,"268":3,"289":1,"294":3,"301":1,"333":1}}],["10s",{"1":{"60":9,"65":2,"66":3,"110":1,"113":1,"225":7,"227":5,"231":3,"289":3,"333":1}}],["10d451db11d3",{"1":{"52":1}}],["10k",{"1":{"5":1,"286":1}}],["1000ms",{"1":{"74":1,"78":1}}],["1000",{"1":{"35":3,"68":1,"84":3,"105":1,"142":1,"268":1,"294":2,"313":2}}],["100",{"1":{"0":1,"4":1,"20":1,"22":1,"94":3,"95":1,"96":1,"105":2,"110":4,"113":8,"117":12,"118":4,"129":1,"130":1,"187":1,"209":2,"287":1,"288":2,"289":7,"326":1}}],["jwt",{"1":{"238":2,"241":8,"252":8,"253":2,"259":8}}],["jwts",{"1":{"158":1}}],["juice",{"1":{"209":1}}],["judiciously",{"1":{"63":1}}],["js",{"1":{"127":3},"2":{"127":1}}],["jsonserializablemapping",{"1":{"323":2,"324":2}}],["jsonserializablemapping`",{"1":{"309":1}}],["json>",{"1":{"252":1}}],["json=",{"1":{"252":1,"372":1}}],["jsonobject",{"1":{"142":6}}],["jsonpath=",{"1":{"234":4,"238":2}}],["jsonpath",{"1":{"130":1}}],["json`",{"1":{"21":1,"157":1,"216":2,"259":1}}],["json",{"1":{"15":1,"46":1,"127":6,"129":1,"157":6,"216":1,"217":1,"220":1,"221":1,"259":4,"309":1,"346":1,"372":1},"2":{"127":1,"157":3}}],["javascript",{"1":{"127":3,"158":1}}],["jitter`",{"1":{"265":1}}],["jitter",{"1":{"62":1,"265":1}}],["joined",{"1":{"219":1}}],["joining",{"1":{"219":1}}],["join",{"1":{"43":1,"74":1,"95":1,"96":1,"152":1,"153":1,"154":1,"155":1,"156":1,"177":1}}],["john",{"1":{"40":3}}],["job=",{"1":{"146":1}}],["job",{"1":{"38":1,"41":2,"132":1,"146":1,"268":1,"322":14,"324":14}}],["jobs",{"1":{"25":1,"26":1,"38":1,"41":1,"175":1}}],["587`",{"1":{"305":4}}],["587",{"1":{"304":1}}],["56",{"1":{"289":1}}],["5673",{"1":{"227":1}}],["5672",{"1":{"225":3,"227":2}}],["55",{"1":{"286":1,"290":1}}],["5431",{"1":{"253":1,"328":1}}],["5435",{"1":{"227":1,"231":1}}],["5432",{"1":{"225":2,"227":5,"231":1,"245":2,"298":1}}],["512",{"1":{"135":3,"136":2}}],["5s",{"1":{"109":3,"146":1}}],["5m",{"1":{"64":1,"105":1,"268":36,"309":1}}],["5xx",{"1":{"62":1}}],["5a86",{"1":{"52":1}}],["59",{"1":{"38":2,"266":1}}],["5",{"1":{"22":1,"38":3,"39":1,"62":1,"65":1,"66":5,"69":1,"74":1,"75":1,"76":1,"89":1,"94":2,"101":10,"107":1,"109":1,"123":1,"162":1,"166":1,"225":3,"227":2,"231":1,"268":5,"288":1,"325":4,"328":1}}],["50m",{"1":{"241":1}}],["50ms",{"1":{"177":1}}],["500m",{"1":{"289":1}}],["500",{"1":{"162":1,"287":1,"288":2,"296":1}}],["503",{"1":{"146":2,"272":1}}],["50",{"1":{"5":2,"117":10,"118":1,"129":1,"130":2,"135":2,"136":2,"278":1,"293":1}}],["k",{"1":{"338":1}}],["kms",{"1":{"252":2,"259":3,"372":4}}],["kubectl",{"1":{"234":6,"238":3,"278":1,"279":1}}],["kubernetes",{"1":{"130":1,"189":1,"224":2,"232":1,"233":1,"234":1,"235":1,"236":1,"237":1,"238":2,"239":1,"240":1,"241":13,"246":3,"249":1,"278":2,"279":1,"289":2,"290":1},"2":{"234":1,"238":1,"241":2}}],["karenina",{"1":{"160":4},"2":{"160":1}}],["kafka",{"1":{"30":1,"73":1}}],["kb",{"1":{"157":1}}],["kind=github",{"1":{"374":1}}],["kind=smtp",{"1":{"304":1}}],["kind=postgres",{"1":{"226":1}}],["kind`",{"1":{"261":1,"265":1}}],["kind",{"1":{"130":3,"145":1,"225":1,"261":1,"265":1,"289":1}}],["killed",{"1":{"170":1,"373":1}}],["kill",{"1":{"68":2,"170":1,"171":1,"210":1,"373":2}}],["kick",{"1":{"49":1}}],["kicking",{"1":{"25":1}}],["kept",{"1":{"290":1}}],["keda",{"0":{"130":1},"1":{"130":5},"2":{"130":1}}],["keyring",{"1":{"372":2}}],["keyrings",{"1":{"372":2}}],["keyed",{"1":{"129":1}}],["keyexpr",{"1":{"81":1}}],["key=f",{"1":{"105":1,"294":1,"326":1}}],["key=rate",{"1":{"82":1}}],["key=",{"1":{"81":1,"113":2,"118":1,"136":2,"294":1}}],["key=event",{"1":{"52":2,"89":1,"116":1}}],["key`",{"1":{"54":1,"81":1,"145":1,"258":2,"262":2,"265":1,"310":1,"376":1}}],["keyset`",{"1":{"259":3}}],["keyset>",{"1":{"252":1}}],["keyset=",{"1":{"252":3}}],["keyset=$",{"1":{"238":3}}],["keyset",{"1":{"238":1,"241":14,"252":6,"253":4,"259":9},"2":{"252":3}}],["keysets",{"1":{"238":2}}],["keys",{"1":{"46":2,"47":1,"62":1,"80":1,"81":1,"89":2,"116":2,"129":1,"136":2,"144":1,"150":1,"158":1,"214":1,"216":1,"238":12,"252":3,"253":6,"311":2,"372":4,"374":2},"2":{"89":1,"116":1}}],["key",{"1":{"46":15,"48":1,"50":13,"52":10,"54":1,"72":3,"74":2,"76":1,"78":1,"81":6,"82":16,"84":6,"88":2,"89":7,"103":1,"105":1,"111":2,"112":6,"113":3,"115":1,"116":7,"118":1,"130":1,"135":1,"142":2,"145":1,"149":5,"150":1,"157":10,"164":1,"190":1,"191":1,"193":2,"214":4,"216":2,"219":1,"220":2,"221":1,"238":4,"246":3,"252":2,"253":7,"258":2,"259":2,"262":6,"265":1,"273":2,"294":4,"304":2,"305":1,"310":1,"315":6,"317":4,"318":10,"322":1,"324":1,"333":7,"339":1,"368":1,"372":2,"374":5,"376":4},"2":{"157":1,"238":3,"253":3}}],["keywords",{"1":{"0":2,"7":2,"9":2}}],["keeps",{"0":{"170":1},"1":{"153":1}}],["keeping",{"1":{"73":1}}],["keep",{"1":{"33":1,"37":1,"38":1,"42":1,"55":1,"75":1,"153":1,"157":1,"158":1,"162":2,"176":1,"189":1,"208":1,"341":1}}],["knowing",{"1":{"309":1}}],["know",{"1":{"74":1,"85":2,"100":1,"154":1,"294":1,"326":1}}],["known",{"1":{"27":1,"40":1,"80":1,"81":1,"82":1,"98":1,"100":2,"106":1}}],["knows",{"1":{"20":1,"92":1}}],["👀",{"1":{"20":2,"24":1,"35":2,"39":1,"43":1,"50":1,"52":1,"60":4,"65":1,"84":2,"104":1,"105":1,"121":5,"132":1,"138":1,"160":1,"294":2}}],["+=",{"1":{"105":1,"117":3,"121":1,"160":2}}],["+",{"1":{"14":1,"35":8,"46":4,"84":4,"95":3,"96":2,"104":1,"105":3,"109":1,"110":2,"112":1,"113":4,"116":1,"117":23,"118":6,"121":4,"153":6,"157":1,"160":3,"181":2,"214":1,"216":4,"219":1,"220":1,"221":1,"225":1,"268":2,"294":2,"326":1,"330":6,"341":3}}],["\tuser",{"1":{"221":1}}],["\tusername",{"1":{"220":1}}],["\tpullrequest",{"1":{"216":1}}],["\tpriorityinput",{"1":{"84":1}}],["\tactions",{"1":{"221":1}}],["\taction",{"1":{"216":1}}],["\tadditionalmetadata",{"1":{"40":2}}],["\tdata",{"1":{"214":1}}],["\tduration",{"1":{"82":1}}],["\thttp",{"1":{"162":1}}],["\thatchet",{"1":{"39":3,"55":1,"65":1,"66":3,"74":1,"78":1,"81":1,"82":1,"84":2,"110":2,"113":5,"117":4,"118":2,"135":3,"149":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["\trightbranch",{"1":{"117":1}}],["\trepository",{"1":{"216":1}}],["\tretrycount",{"1":{"59":1}}],["\treturn",{"1":{"14":1,"24":1,"25":3,"39":1,"40":3,"48":1,"50":1,"52":4,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"68":1,"84":3,"94":1,"95":1,"104":1,"105":3,"109":1,"110":1,"112":3,"113":3,"116":1,"117":4,"118":1,"121":1,"132":1,"133":1,"138":1,"160":2}}],["\tresponseurl",{"1":{"220":1}}],["\trest",{"1":{"35":1,"52":1}}],["\tresult",{"1":{"14":1,"55":1,"105":1}}],["\twaitforevent",{"1":{"117":1}}],["\twaitforsleep",{"1":{"117":1}}],["\tworkflowrun",{"1":{"161":1}}],["\tworkflow",{"1":{"84":3}}],["\tgo",{"1":{"104":1,"105":1}}],["\tvalue",{"1":{"104":1,"105":2}}],["\tvar",{"1":{"95":1,"117":3,"121":2,"160":1}}],["\tv0client",{"1":{"52":2,"149":1}}],["\tleftbranch",{"1":{"117":1}}],["\tlimit",{"1":{"82":1}}],["\tlog",{"1":{"20":2,"35":3,"39":1,"43":1,"68":2,"82":1,"109":2,"112":2,"116":2,"121":1,"149":2}}],["\tkey",{"1":{"82":1}}],["\tid",{"1":{"161":1}}],["\tif",{"1":{"55":2,"59":1,"95":1,"109":1,"112":1,"116":1,"117":6,"121":1,"138":1,"161":2,"162":2}}],["\tinputs",{"1":{"43":1}}],["\tinput",{"1":{"40":1,"149":2}}],["\tctx",{"1":{"161":1}}],["\tchunks",{"1":{"160":1}}],["\tcommand",{"1":{"220":1}}],["\tcount",{"1":{"105":1}}],["\tcontext",{"1":{"35":3,"48":1,"52":3,"84":3,"112":1,"113":1,"149":2}}],["\tclient",{"1":{"55":1,"161":1,"162":1}}],["\tok",{"1":{"55":1}}],["\tfor",{"1":{"68":1,"121":1,"160":2,"161":1}}],["\tfunc",{"1":{"65":1,"66":1,"74":1,"78":1,"81":1,"82":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["\tfmt",{"1":{"53":1,"55":1,"59":1,"104":1,"105":1,"120":1,"138":2,"161":1}}],["\tfeatures",{"1":{"35":1,"84":2}}],["\ttext",{"1":{"220":1}}],["\ttype",{"1":{"214":1,"221":1}}],["\ttotal",{"1":{"117":1}}],["\ttask",{"1":{"55":1}}],["\ttriggerpayload",{"1":{"52":1}}],["\ttime",{"1":{"39":1,"138":1,"160":1}}],["\tserver",{"1":{"162":1}}],["\tstream",{"1":{"161":1}}],["\tstreamingworkflow",{"1":{"161":1,"162":1}}],["\tsticky",{"1":{"133":1}}],["\tstickydag",{"1":{"132":1}}],["\tsteperrors",{"1":{"121":1}}],["\tstart",{"1":{"117":1}}],["\tskiponevent",{"1":{"117":1}}],["\tskippayload",{"1":{"52":1}}],["\tsum",{"1":{"105":1}}],["\tscheduledrun",{"1":{"35":1}}],["\tevent",{"1":{"219":1}}],["\teventinput",{"1":{"48":1,"112":1,"113":1}}],["\terr",{"1":{"117":3,"136":2}}],["\texpression",{"1":{"40":1}}],["\tname",{"1":{"40":1}}],["\t\thandled",{"1":{"219":1}}],["\t\thatchet",{"1":{"50":1,"52":2,"113":2,"118":2,"132":1}}],["\t\tusername",{"1":{"221":1}}],["\t\tuser",{"1":{"219":1}}],["\t\tunits",{"1":{"81":1,"82":1}}],["\t\tnumber",{"1":{"216":1}}],["\t\tnextrun",{"1":{"39":1}}],["\t\taction",{"1":{"221":2}}],["\t\tactionid",{"1":{"221":1}}],["\t\targs",{"1":{"220":1}}],["\t\tamount",{"1":{"214":1}}],["\t\taddr",{"1":{"162":1}}],["\t\tobject",{"1":{"214":1}}],["\t\toriginalinput",{"1":{"121":1}}],["\t\twritetimeout",{"1":{"162":1}}],["\t\tworkflowrun",{"1":{"162":1}}],["\t\tworkflowid",{"1":{"52":1}}],["\t\tw",{"1":{"162":3}}],["\t\tfullname",{"1":{"216":1}}],["\t\tfunc",{"1":{"105":2,"132":2,"133":1}}],["\t\tfor",{"1":{"162":1}}],["\t\tflusher",{"1":{"162":1}}],["\t\tfmt",{"1":{"161":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["\t\tfailurehandled",{"1":{"121":1}}],["\t\tpr",{"1":{"216":1}}],["\t\tprocessedat",{"1":{"109":1,"112":1,"116":1}}],["\t\tpriority",{"1":{"84":2}}],["\t\tvar",{"1":{"104":1,"105":1,"117":3}}],["\t\tdefer",{"1":{"104":1,"105":1}}],["\t\tdefault",{"1":{"68":1}}],["\t\tduration",{"1":{"81":1}}],["\t\tkey",{"1":{"81":1,"82":1}}],["\t\tlimitvalueexpr",{"1":{"81":1}}],["\t\tlimitstrategy",{"1":{"74":1}}],["\t\tlog",{"1":{"55":2,"65":2,"66":4,"81":1,"82":1,"121":2,"161":2,"162":2}}],["\t\tmap",{"1":{"149":2}}],["\t\tmaxruns",{"1":{"74":1}}],["\t\tmu",{"1":{"104":2,"105":2}}],["\t\tmessage",{"1":{"48":1,"59":1,"109":1,"112":2,"113":1,"116":1,"160":1}}],["\t\tcommand",{"1":{"220":1}}],["\t\tcompleted",{"1":{"68":1}}],["\t\tchannel",{"1":{"219":1}}],["\t\tchunks",{"1":{"160":1}}],["\t\tcustomer",{"1":{"214":1}}],["\t\tctx",{"1":{"160":1,"162":1}}],["\t\tcase",{"1":{"68":1}}],["\t\tstream",{"1":{"162":1}}],["\t\tstep",{"1":{"94":1,"95":1}}],["\t\tstatus",{"1":{"68":1}}],["\t\tselect",{"1":{"68":1}}],["\t\tscope",{"1":{"52":1}}],["\t\tif",{"1":{"66":1,"104":2,"105":2,"117":3,"160":1,"162":1}}],["\t\tinput",{"1":{"35":1,"43":1}}],["\t\tend",{"1":{"160":1}}],["\t\terrordetails",{"1":{"121":2}}],["\t\terr",{"1":{"66":1,"104":1,"105":1,"117":3}}],["\t\texpression",{"1":{"52":1,"74":1}}],["\t\texecutedat",{"1":{"39":1}}],["\t\trepo",{"1":{"216":1}}],["\t\treadtimeout",{"1":{"162":1}}],["\t\treturn",{"1":{"55":1,"59":1,"65":1,"66":1,"74":1,"78":1,"81":1,"82":1,"95":1,"109":1,"112":1,"116":1,"117":3,"138":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["\t\tresults",{"1":{"104":1,"105":1}}],["\t\tresult",{"1":{"14":1,"94":1,"95":1,"104":1,"105":1}}],["\t\t\taction",{"1":{"221":1}}],["\t\t\targs",{"1":{"220":2}}],["\t\t\tamount",{"1":{"214":3}}],["\t\t\thandled",{"1":{"219":1}}],["\t\t\thttp",{"1":{"162":1}}],["\t\t\tpr",{"1":{"216":2}}],["\t\t\tpayload",{"1":{"52":1}}],["\t\t\trepo",{"1":{"216":2}}],["\t\t\treturn",{"1":{"50":1,"66":1,"68":1,"104":2,"105":4,"132":2,"133":1,"162":1}}],["\t\t\tfmt",{"1":{"162":1}}],["\t\t\tfor",{"1":{"105":1}}],["\t\t\tdescription",{"1":{"136":2}}],["\t\t\tname",{"1":{"136":2}}],["\t\t\ton",{"1":{"136":2}}],["\t\t\tok",{"1":{"55":1}}],["\t\t\tend",{"1":{"160":1}}],["\t\t\terr",{"1":{"133":1}}],["\t\t\texpression",{"1":{"52":1,"78":2}}],["\t\t\tvar",{"1":{"133":1}}],["\t\t\tif",{"1":{"133":2,"162":1}}],["\t\t\tcommand",{"1":{"220":2}}],["\t\t\tcompleted",{"1":{"65":1,"66":1}}],["\t\t\tcustomer",{"1":{"214":3}}],["\t\t\tchildresult",{"1":{"133":1}}],["\t\t\tchildworkflow",{"1":{"133":1}}],["\t\t\tworkerid",{"1":{"132":2}}],["\t\t\tlimitstrategy",{"1":{"78":2}}],["\t\t\tlog",{"1":{"66":1,"68":2,"105":2}}],["\t\t\tmaxruns",{"1":{"78":2}}],["\t\t\ttotal",{"1":{"117":3}}],["\t\t\ttransformedmessage",{"1":{"74":1,"78":1}}],["\t\t\ttime",{"1":{"68":1}}],["\t\t\tsteps",{"1":{"136":2}}],["\t\t\tstatus",{"1":{"65":1,"66":1}}],["\t\t\tsum",{"1":{"105":1}}],["\t\t\tscope",{"1":{"52":1}}],["\t\t\t\tflusher",{"1":{"162":1}}],["\t\t\t\tmodel",{"1":{"136":1}}],["\t\t\t\tworker",{"1":{"136":2}}],["\t\t\t\treturn",{"1":{"133":2,"136":1}}],["\t\t\t\tresult",{"1":{"105":1,"132":2,"133":1}}],["\t\t\t\tsetdesiredlabels",{"1":{"136":1}}],["\t\t\t\tsetname",{"1":{"136":1}}],["\t\t\t\tsum",{"1":{"105":2}}],["\t\t\t\tstatus",{"1":{"66":1,"68":1}}],["\t\t\t\terr",{"1":{"105":1}}],["\t\t\t\tvar",{"1":{"105":1}}],["\t\t\t\t\tmessage",{"1":{"136":1}}],["\t\t\t\t\tctx",{"1":{"136":2}}],["\t\t\t\t\t\tcomparator",{"1":{"136":1}}],["\t\t\t\t\t\trequired",{"1":{"136":1}}],["\t\t\t\t\t\tweight",{"1":{"136":1}}],["\t\t\t\t\t\tvalue",{"1":{"136":2}}],["\t\t\t\t\t\t\tcomparator",{"1":{"136":1}}],["\t\t\t\t\t\t\trequired",{"1":{"136":1}}],["\t\t\t\t\t\t\tweight",{"1":{"136":1}}],["\t\t\t\t\t\t\tvalue",{"1":{"136":2}}],["\t\t\t\t\t\t",{"1":{"136":6}}],["\t\t\t\t\t\tmessage",{"1":{"136":1}}],["\t\t\t\t\tsetdesiredlabels",{"1":{"136":1}}],["\t\t\t\t\tsetname",{"1":{"136":1}}],["\t\t\t\t\t",{"1":{"136":9}}],["\t\t\t\t\treturn",{"1":{"105":2,"136":1}}],["\t\t\t\t\tvalue",{"1":{"105":1}}],["\t\t\t\tif",{"1":{"105":2,"136":1}}],["\t\t\t\tchildresult",{"1":{"105":1}}],["\t\t\t\tcompleted",{"1":{"66":1,"68":1}}],["\t\t\t\tlog",{"1":{"105":2}}],["\t\t\t\t",{"1":{"52":3,"105":4,"136":4}}],["\t\t\t\ttransformedmessage",{"1":{"50":1}}],["\t\t\t",{"1":{"50":1,"66":1,"68":1,"105":4,"132":2,"133":4,"136":2,"162":1}}],["\t\t",{"1":{"40":2,"50":2,"52":2,"55":1,"65":3,"66":4,"68":1,"74":2,"78":4,"104":2,"105":4,"117":3,"132":2,"133":1,"135":2,"136":4,"160":1,"162":2,"214":3,"216":2,"219":1,"220":2,"221":1}}],["\t\ttext",{"1":{"219":1}}],["\t\ttype",{"1":{"219":1}}],["\t\ttypes",{"1":{"78":1}}],["\t\ttitle",{"1":{"216":1}}],["\t\ttime",{"1":{"65":1,"66":1,"74":1,"78":1,"160":1}}],["\t\ttimestamp",{"1":{"39":1}}],["\t\ttransformedmessage",{"1":{"53":1}}],["\t\ttriggerat",{"1":{"35":1}}],["\t\tjobname",{"1":{"39":1}}],["\t",{"1":{"14":1,"35":2,"39":2,"40":2,"43":1,"48":2,"50":1,"52":6,"53":1,"55":8,"59":3,"65":1,"66":1,"68":3,"74":2,"78":2,"81":2,"82":2,"84":3,"94":1,"95":3,"104":3,"105":5,"109":2,"112":4,"113":3,"116":2,"117":6,"118":1,"120":2,"121":4,"132":5,"133":1,"135":1,"136":2,"138":3,"149":4,"160":3,"161":3,"162":4,"214":4,"216":5,"219":4,"220":3,"221":5}}],["\tmessage",{"1":{"14":1,"55":1}}],["32`",{"1":{"374":1}}],["36",{"1":{"283":1,"291":1,"338":4}}],["3600",{"1":{"35":1,"157":1}}],["388e3c",{"1":{"173":1}}],["3392ff",{"1":{"101":6}}],["30s",{"1":{"225":1}}],["308",{"1":{"146":1}}],["300",{"1":{"105":1,"209":1}}],["30",{"1":{"38":1,"65":2,"68":1,"105":1,"113":2,"118":1,"129":1,"194":2,"210":1,"291":4,"296":1}}],["31",{"1":{"38":1}}],["3",{"0":{"279":1},"1":{"14":1,"32":1,"35":1,"37":1,"38":1,"42":1,"46":1,"54":1,"57":1,"58":3,"59":3,"60":3,"61":2,"65":4,"66":3,"68":2,"69":1,"74":1,"75":1,"76":1,"81":3,"82":4,"83":3,"84":5,"89":1,"107":1,"123":1,"124":1,"126":1,"127":4,"132":2,"133":2,"136":2,"138":1,"142":1,"146":2,"149":2,"153":1,"164":1,"166":1,"167":1,"169":1,"170":1,"216":1,"218":1,"225":3,"227":2,"240":1,"248":1,"251":2,"268":3,"276":1,"278":1,"284":1,"322":2,"324":2,"325":1}}],["==",{"1":{"52":3,"112":4,"117":3,"121":1}}],["===",{"1":{"20":1,"142":1,"156":1}}],["=>",{"1":{"14":2,"24":1,"25":1,"35":1,"39":1,"40":1,"43":2,"48":1,"52":8,"53":1,"58":1,"59":2,"60":2,"61":1,"65":2,"66":2,"68":4,"72":1,"81":1,"82":1,"84":4,"94":3,"95":3,"96":3,"104":8,"105":17,"109":1,"110":2,"112":2,"113":5,"116":1,"117":8,"118":2,"120":3,"121":3,"132":3,"133":3,"135":3,"136":7,"138":1,"142":7,"149":4,"153":8,"154":1,"155":27,"157":4,"160":1,"214":3,"216":3,"219":2,"220":3,"221":2,"294":1,"345":2}}],["=",{"1":{"14":4,"20":8,"24":5,"25":15,"35":14,"39":5,"40":16,"43":13,"48":3,"50":10,"52":13,"55":11,"58":2,"59":4,"60":2,"61":2,"62":1,"65":4,"66":6,"68":5,"72":19,"74":5,"78":5,"81":5,"82":10,"84":27,"89":10,"93":4,"94":4,"95":8,"96":7,"97":6,"104":22,"105":49,"109":7,"110":3,"112":17,"113":9,"115":1,"116":12,"117":38,"118":3,"120":4,"121":6,"132":11,"133":12,"135":7,"136":13,"138":5,"141":6,"142":13,"144":3,"149":6,"153":13,"154":2,"155":8,"156":1,"157":24,"160":15,"161":13,"162":19,"214":9,"216":13,"219":6,"220":4,"221":8,"294":14,"322":3,"325":1,"326":9,"328":6,"337":1,"345":4}}],["|chunk|",{"1":{"160":1,"161":1}}],["||",{"1":{"105":1,"117":3}}],["|i|",{"1":{"104":1,"105":2,"142":2}}],["|input",{"1":{"14":1,"39":1,"52":1,"53":1,"58":1,"59":1,"60":1,"61":3,"65":1,"66":1,"68":2,"74":1,"78":2,"81":1,"82":1,"84":1,"94":2,"95":2,"96":2,"105":3,"109":1,"110":1,"112":2,"113":2,"116":2,"117":4,"118":1,"121":2,"132":2,"133":2,"136":1,"138":1,"142":2,"155":5,"160":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["|run|",{"1":{"72":1}}],["|greeting|",{"1":{"43":1}}],["|",{"1":{"7":1,"11":18,"89":3,"116":2,"124":1,"127":1,"132":2,"133":2,"136":1,"153":2,"171":1,"238":2,"241":1,"278":1,"333":6,"349":1,"374":2}}],["ngrok",{"1":{"374":2}}],["nginx",{"1":{"241":14},"2":{"241":1}}],["num",{"1":{"328":2}}],["number`",{"1":{"309":1}}],["numbers",{"1":{"135":1}}],["number=random",{"1":{"94":1,"95":1,"96":1,"110":1,"113":2,"117":3,"118":1}}],["number",{"1":{"16":1,"20":1,"22":1,"23":1,"28":1,"56":1,"57":1,"60":3,"61":1,"62":1,"63":1,"74":4,"79":1,"82":2,"87":1,"90":1,"94":3,"95":4,"96":4,"104":1,"105":1,"107":1,"110":1,"113":2,"117":21,"118":1,"130":1,"137":1,"146":1,"153":3,"155":2,"159":1,"160":1,"196":2,"208":1,"209":2,"210":1,"214":1,"216":15,"268":27,"271":2,"289":2,"294":3,"295":1,"297":1,"299":2,"307":4,"309":4,"311":2,"312":4,"313":2,"316":10,"317":2,"318":4,"320":2,"322":10,"323":2,"324":4,"327":1,"333":2,"335":2,"337":2,"344":2},"2":{"117":1,"216":2}}],["null",{"1":{"238":1}}],["n=+",{"1":{"238":1}}],["nstreaming",{"1":{"161":1}}],["n",{"1":{"59":1,"85":1,"86":1,"87":1,"101":1,"104":17,"105":20,"120":2,"132":1,"133":2,"160":9,"162":1,"198":1,"214":1,"216":1,"219":1,"220":1,"221":1,"278":1,"279":1,"294":1,"326":2,"362":1,"376":7},"2":{"104":2,"105":2,"294":1,"326":1}}],["npm",{"1":{"21":1,"127":6,"144":1,"365":1}}],["noisy",{"1":{"168":1}}],["nolint",{"1":{"110":1,"113":2,"117":3,"118":1}}],["normally",{"1":{"316":4}}],["normalizing",{"1":{"152":1}}],["normal",{"1":{"57":1,"88":1,"118":2}}],["nonretryableresult",{"1":{"61":1}}],["nonretryableinput",{"1":{"61":1}}],["nonretryableerror",{"1":{"61":3}}],["nonretryableexception",{"1":{"61":1}}],["nonretryableworkflow",{"1":{"61":1},"2":{"61":1}}],["non",{"1":{"46":1,"57":1,"61":7,"62":2,"89":1,"102":1,"137":1,"138":4,"243":1,"255":2,"280":1},"2":{"61":1}}],["none`",{"1":{"307":42,"309":5,"311":18,"312":10,"313":4,"314":10,"316":44,"317":28,"318":18,"320":8,"322":53,"323":8,"324":31}}],["none",{"1":{"20":1,"53":1,"61":1,"81":1,"82":1,"89":1,"94":1,"101":1,"109":1,"112":1,"121":1,"132":2,"133":2,"136":1,"153":6,"160":3,"273":1,"309":10,"311":2,"315":2,"316":8,"317":2,"320":4,"325":1,"326":1,"328":1,"330":6}}],["noon",{"1":{"35":1,"40":1}}],["notation",{"1":{"130":1}}],["notify",{"1":{"120":1,"121":2,"219":1,"374":1}}],["notifications",{"1":{"119":1,"121":2}}],["notification",{"1":{"34":1,"109":1}}],["notes",{"1":{"377":6}}],["note",{"0":{"73":1},"1":{"7":1,"20":1,"24":1,"25":1,"26":1,"35":1,"39":1,"40":2,"46":1,"50":1,"52":2,"55":1,"58":1,"68":2,"72":1,"73":1,"81":4,"121":1,"214":1,"216":1,"218":1,"230":1,"245":1,"251":1,"268":1,"286":1,"287":1,"293":1,"295":1,"307":4,"309":3,"314":2,"317":1,"320":1,"322":2,"326":2,"330":1,"332":2,"333":1,"344":1,"372":1,"374":2}}],["nodes",{"1":{"101":1,"290":1}}],["node",{"1":{"21":1,"91":1,"100":1,"109":1,"127":12,"340":2,"364":1}}],["nil",{"1":{"14":1,"20":2,"24":1,"25":3,"35":3,"39":1,"40":3,"43":1,"48":1,"50":1,"52":3,"53":1,"55":3,"58":1,"59":2,"60":1,"61":1,"65":1,"66":2,"68":2,"74":1,"78":1,"81":1,"82":2,"84":3,"94":1,"95":2,"96":2,"104":4,"105":8,"109":2,"110":1,"112":4,"113":3,"116":2,"117":10,"118":1,"120":1,"121":2,"132":2,"133":5,"136":3,"138":3,"144":1,"149":2,"160":1,"161":2,"162":4,"214":1,"216":1,"219":1,"220":1,"221":1,"294":3}}],["nature",{"1":{"294":1}}],["naturally",{"1":{"98":1}}],["natively",{"1":{"26":1}}],["native",{"1":{"7":1,"120":1,"127":1,"237":1,"349":1}}],["navigate",{"1":{"36":1,"41":1,"72":1,"166":1,"228":1,"234":2,"238":1,"374":2}}],["namely",{"1":{"321":1,"345":1}}],["name>",{"1":{"245":2,"365":1,"374":1}}],["name>`",{"1":{"127":1}}],["named",{"1":{"154":1,"348":1,"351":1}}],["name`",{"1":{"145":4,"255":3,"260":1,"262":2,"264":1,"265":2,"273":1,"311":8,"316":2,"317":2,"318":8,"320":2,"322":2,"324":2,"334":1,"376":1}}],["names`",{"1":{"318":4}}],["namespace`",{"1":{"270":1,"289":1}}],["namespace=loadtest2",{"1":{"289":1}}],["namespace=loadtest1",{"1":{"289":1}}],["namespace=hatchet",{"1":{"238":1}}],["namespace=default",{"1":{"234":2}}],["namespace",{"1":{"234":8,"238":4,"241":1,"270":1,"278":2,"289":2,"307":1},"2":{"241":1}}],["namespaced",{"1":{"214":1,"322":1}}],["namespaces",{"1":{"164":1}}],["names",{"1":{"145":1,"309":1,"314":2,"318":4,"333":1,"338":1}}],["name=<app",{"1":{"374":1}}],["name=hatchet",{"1":{"238":1}}],["name=engine",{"1":{"234":1}}],["name=$",{"1":{"234":2,"238":1}}],["name=",{"1":{"14":1,"25":2,"39":2,"40":4,"50":1,"52":1,"55":2,"74":1,"78":1,"84":2,"89":1,"93":1,"105":2,"109":1,"112":1,"121":1,"132":1,"133":1,"136":1,"138":1,"141":1,"304":1,"322":1,"326":2,"328":1,"331":1}}],["name",{"1":{"14":3,"20":1,"25":1,"39":2,"40":2,"43":1,"46":6,"48":1,"50":2,"52":2,"53":1,"55":6,"58":1,"59":1,"60":1,"61":1,"62":1,"65":2,"66":1,"68":2,"74":2,"78":6,"81":1,"82":1,"84":2,"93":3,"94":3,"95":1,"96":1,"104":3,"105":10,"109":2,"110":1,"112":3,"113":2,"116":3,"117":4,"118":1,"120":1,"121":4,"129":3,"130":9,"132":2,"133":2,"135":2,"136":5,"138":1,"141":1,"142":5,"144":1,"145":3,"146":1,"155":6,"160":2,"164":1,"169":1,"214":3,"216":9,"218":1,"219":3,"220":9,"221":3,"225":2,"234":6,"238":3,"241":1,"243":2,"245":1,"246":2,"253":1,"255":4,"260":1,"262":2,"264":1,"265":2,"268":6,"271":1,"273":1,"289":4,"294":4,"304":1,"307":11,"309":7,"310":2,"311":17,"312":11,"313":3,"314":3,"315":3,"316":28,"317":16,"318":35,"319":5,"320":11,"322":36,"323":3,"324":25,"331":2,"332":3,"333":10,"341":1,"342":6,"344":9,"345":13,"352":3,"354":2,"355":2,"362":10,"367":1,"372":1,"376":1},"2":{"78":1,"216":1,"220":1,"234":1,"238":1,"322":1,"331":1,"345":1}}],["neither",{"1":{"317":2}}],["net",{"1":{"305":1}}],["networks",{"1":{"189":2}}],["networking",{"0":{"239":1},"1":{"180":1,"183":1,"188":1,"239":1,"243":1}}],["network",{"1":{"43":1,"57":1,"67":1,"69":1,"158":1,"166":1,"167":3,"170":1,"176":1,"177":1,"182":1,"189":2,"210":2,"268":1}}],["nested",{"1":{"221":1}}],["negligible",{"1":{"158":1}}],["never",{"1":{"123":1,"124":3,"157":2,"158":1,"169":1,"289":1,"373":1}}],["necessary",{"1":{"46":1,"69":1,"243":1,"358":1}}],["newinstrumentor",{"1":{"144":1},"2":{"144":1}}],["newinterruptcontext",{"1":{"20":1},"2":{"20":1}}],["newstandalonedurabletask",{"1":{"109":1,"112":1,"116":1},"2":{"109":1,"112":1,"116":1}}],["newstandalonetask",{"1":{"14":1,"39":1,"50":1,"52":1,"55":1,"58":1,"59":1,"60":1,"61":1,"74":1,"78":1,"81":1,"82":1,"105":2,"133":1,"214":1,"216":1,"219":1,"220":1,"221":1},"2":{"14":1,"39":1,"50":1,"52":1,"55":1,"58":1,"59":1,"60":1,"61":1,"74":1,"78":1,"81":1,"82":1,"105":1,"133":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["newly",{"1":{"77":1}}],["newer",{"1":{"76":1}}],["newest",{"0":{"77":1},"1":{"74":2}}],["newest`",{"1":{"74":1,"77":3}}],["newworkflow",{"1":{"66":1,"84":1,"93":1,"132":1},"2":{"66":1,"84":1,"93":1,"132":1}}],["newworker",{"1":{"20":1,"135":1},"2":{"20":1,"135":1}}],["newtask",{"1":{"65":1,"66":1,"68":1,"94":1,"95":1,"110":1,"113":2,"117":4,"118":1,"132":2,"138":1},"2":{"65":1,"66":1,"68":1,"94":1,"95":1,"110":1,"113":1,"117":1,"118":1,"132":1,"138":1}}],["newnonretryableerror",{"1":{"61":1},"2":{"61":1}}],["newclient",{"1":{"55":1,"161":1,"162":1},"2":{"55":1,"161":1,"162":1}}],["new",{"1":{"10":1,"19":1,"27":1,"29":1,"30":1,"35":4,"37":1,"42":1,"49":1,"52":1,"55":1,"58":2,"59":2,"60":2,"61":2,"65":1,"66":3,"67":1,"68":1,"72":2,"74":5,"75":3,"76":5,"77":1,"78":2,"81":1,"82":1,"83":1,"84":5,"105":1,"106":1,"107":1,"110":2,"112":2,"113":8,"116":1,"117":4,"118":4,"121":1,"133":1,"136":5,"142":3,"144":2,"149":1,"157":3,"162":2,"164":2,"167":1,"169":1,"183":1,"192":1,"199":1,"214":2,"216":2,"218":1,"220":1,"234":1,"237":1,"238":1,"247":1,"254":2,"276":1,"294":1,"305":1,"311":4,"312":4,"316":4,"317":4,"318":4,"322":4,"324":4,"332":8,"333":2,"334":1,"335":1,"341":2,"342":1,"346":6,"352":1,"373":1,"374":3,"376":1},"2":{"52":1,"58":1,"59":1,"60":1,"61":1,"72":1,"74":1,"78":1,"81":1,"82":1,"84":1,"105":1,"110":1,"112":1,"113":2,"116":1,"117":1,"118":2,"133":1,"136":1,"142":2,"149":1,"294":1}}],["nextjs",{"1":{"162":1}}],["next",{"0":{"8":1,"178":1,"184":1},"1":{"8":1,"39":1,"42":2,"55":1,"60":1,"75":1,"85":1,"86":1,"87":2,"90":1,"94":1,"101":1,"154":2,"158":1,"164":1,"214":1}}],["nextra",{"1":{"0":1,"7":1,"211":1}}],["nearly",{"1":{"5":1}}],["needing",{"1":{"30":1,"32":1,"68":2,"72":1,"307":3,"323":2,"324":2,"326":1}}],["needed",{"0":{"124":1,"279":1},"1":{"21":1,"66":1,"71":1,"100":1,"127":1,"155":1,"156":2,"189":1,"231":1,"330":1}}],["need",{"1":{"4":2,"7":1,"21":1,"25":3,"35":1,"46":6,"49":1,"50":1,"52":1,"55":1,"66":1,"72":1,"81":1,"100":1,"102":1,"105":1,"114":1,"119":1,"123":4,"124":1,"131":1,"133":1,"139":1,"154":1,"166":1,"174":1,"180":1,"189":1,"190":1,"199":1,"214":2,"218":2,"228":1,"238":1,"240":1,"283":1,"293":2,"296":2,"309":1,"325":1,"326":1,"331":1,"352":1,"364":1,"372":1,"374":4}}],["needs",{"1":{"2":1,"14":1,"20":1,"23":1,"71":1,"74":1,"100":1,"101":1,"118":1,"122":1,"173":1,"174":1,"209":1,"238":1,"243":1,"295":1}}],["`query",{"1":{"338":6}}],["`query`",{"1":{"334":1,"338":1,"341":1}}],["`queued`",{"1":{"129":1,"169":1}}],["`q`",{"1":{"333":3}}],["`yield`",{"1":{"328":3}}],["`60s`",{"1":{"266":2}}],["`750`",{"1":{"257":2}}],["`720h`",{"1":{"257":1}}],["`7070`",{"1":{"254":1}}],["`20250813183355`",{"1":{"283":1}}],["`20250813183355",{"1":{"283":1}}],["`2000`",{"1":{"257":1}}],["`20`",{"1":{"254":1}}],["`24h`",{"1":{"257":2}}],["`2`",{"1":{"83":1,"255":1,"257":2,"265":1}}],["`8733`",{"1":{"254":1}}],["`8080`",{"1":{"254":1}}],["`8000`",{"1":{"162":1}}],["`8001`",{"1":{"146":1,"272":1}}],["`k`",{"1":{"333":2}}],["`kubectl`",{"1":{"233":2,"236":2}}],["`key`",{"1":{"74":1,"75":1,"76":1,"315":2,"322":1,"324":1,"333":2}}],["`x",{"1":{"216":1}}],["`+`",{"1":{"164":1}}],["`version`",{"1":{"307":3,"320":2}}],["`v0",{"1":{"283":2,"286":1,"290":1}}],["`v1loglineorderbydirection`",{"1":{"337":1}}],["`v1loglinelevel`",{"1":{"337":1}}],["`v1loglinelist`",{"1":{"313":2}}],["`v1createfilterrequest`",{"1":{"335":1}}],["`v1webhooklist`",{"1":{"318":2}}],["`v1webhook`",{"1":{"318":8}}],["`v1webhookbasicauth",{"1":{"318":2}}],["`v1webhooksourcename`",{"1":{"318":2}}],["`v1workflowrundetails`",{"1":{"316":4}}],["`v1workflowruncreate`",{"1":{"192":1}}],["`v1tasksummary`",{"1":{"316":2,"322":2,"324":2}}],["`v1tasksummarylist`",{"1":{"316":2}}],["`v1taskstatus`",{"1":{"316":2}}],["`v1updatefilterrequest`",{"1":{"312":2,"335":1}}],["`v1filterlist`",{"1":{"312":2}}],["`v1filter`",{"1":{"312":8,"322":2,"324":2}}],["`v1`",{"1":{"254":1}}],["`void`",{"1":{"154":1,"333":1}}],["`valueerror`",{"1":{"309":1,"310":1,"317":2,"322":1}}],["`values",{"1":{"241":1,"243":1,"245":1}}],["`value`",{"1":{"136":1}}],["`valuelocation`",{"1":{"130":1}}],["`ngrok`",{"1":{"374":2}}],["`nginx",{"0":{"241":1}}],["`number`",{"1":{"332":1,"333":3,"334":2,"335":4,"337":2,"339":2,"340":2,"341":2,"342":4,"344":4,"345":1}}],["`n`",{"1":{"307":3,"322":4}}],["`name",{"1":{"344":1}}],["`namespace`",{"1":{"307":1}}],["`name`",{"1":{"271":1,"307":4,"318":2,"322":5,"332":1,"334":1,"345":3}}],["`next",{"1":{"158":1}}],["`newinstrumentor`",{"1":{"144":1}}],["`npm",{"1":{"127":2}}],["`nodeid`",{"1":{"340":1}}],["`now",{"1":{"333":2}}],["`none`",{"1":{"307":42,"309":6,"311":20,"312":10,"313":4,"314":10,"315":2,"316":52,"317":30,"318":18,"320":10,"322":53,"323":10,"324":33,"376":1}}],["`nonretryable`",{"1":{"61":1}}],["`not",{"1":{"62":1,"136":1}}],["`5m`",{"1":{"266":1}}],["`5431`",{"1":{"255":1}}],["`5s`",{"1":{"254":2,"255":1}}],["`500`",{"1":{"316":4}}],["`500ms`",{"1":{"254":1}}],["`50`",{"1":{"117":1,"255":3,"266":1,"344":1}}],["`5",{"1":{"146":1,"272":1}}],["`5`",{"1":{"62":1,"257":1,"266":1}}],["`offset",{"1":{"335":1,"339":1,"342":1,"344":1}}],["`offset`",{"1":{"196":1,"311":2,"312":2,"316":2,"317":2,"318":2,"320":2,"322":2,"324":2}}],["`omit`",{"1":{"332":2,"345":4,"346":2}}],["`o`",{"1":{"332":10,"346":5}}],["`opts",{"1":{"335":5,"336":1,"338":1,"340":1,"341":2,"344":4}}],["`opts`",{"1":{"316":4,"319":2,"335":1,"339":2,"340":2,"341":1}}],["`options",{"1":{"332":1,"333":3,"342":5,"345":7}}],["`options`",{"1":{"322":7,"324":7,"332":8,"342":1,"345":4,"346":3}}],["`openssl`",{"1":{"236":1,"238":1}}],["`opened`",{"1":{"216":1}}],["`opentelemetry`",{"1":{"144":1}}],["`object`",{"1":{"144":1,"332":2,"333":3,"334":2,"341":3,"345":4,"346":2}}],["`otel`",{"1":{"144":1}}],["`orderbyfield",{"1":{"339":1}}],["`orderbydirection",{"1":{"337":1,"339":1}}],["`order",{"1":{"311":4,"317":4}}],["`or`",{"1":{"118":1}}],["`or",{"1":{"116":1,"118":2}}],["`onsuccess",{"1":{"345":1}}],["`onfailure",{"1":{"345":1}}],["`onfailure`",{"1":{"121":1}}],["`only",{"1":{"316":2,"322":2,"324":2}}],["`on",{"1":{"39":3,"46":1,"50":1,"214":1,"307":6,"322":6}}],["`ispaused",{"1":{"343":1}}],["`issues`",{"1":{"216":1}}],["`id",{"1":{"333":1}}],["`id`",{"1":{"322":1}}],["`i`",{"1":{"332":3,"345":7}}],["`ihatchetclient`",{"1":{"332":1,"346":3}}],["`if`",{"1":{"115":1}}],["`invocationcount`",{"1":{"333":1}}],["`invalid",{"1":{"62":1}}],["`include",{"1":{"316":2}}],["`includetasknameinspanname`",{"1":{"144":1}}],["`incrementby`",{"1":{"333":1}}],["`increment",{"1":{"309":1}}],["`init",{"1":{"154":1}}],["`instrumentor`",{"1":{"145":1}}],["`instrument",{"1":{"144":1}}],["`int",{"1":{"307":7,"309":1,"311":6,"312":4,"316":6,"317":4,"318":4,"320":4,"322":12,"324":4}}],["`int`",{"1":{"62":1,"144":3,"145":2,"307":5,"309":2,"313":2,"315":2,"316":8,"322":6,"323":2,"324":4}}],["`internal`",{"1":{"62":1,"145":1}}],["`input",{"1":{"46":3,"214":1,"220":2,"307":3,"326":2,"331":1,"333":1,"334":1,"341":1}}],["`input`",{"1":{"24":1,"25":1,"46":1,"54":1,"80":1,"81":2,"94":2,"155":1,"158":2,"307":1,"309":1,"311":2,"316":2,"317":2,"322":9,"323":2,"324":11,"326":2,"332":3,"333":2,"345":7,"346":1}}],["`l`",{"1":{"333":2,"345":2}}],["`labels",{"1":{"333":1}}],["`labels`",{"1":{"307":1,"333":1}}],["`latest`",{"1":{"229":1}}],["`levels",{"1":{"337":1}}],["`letsencrypt",{"1":{"241":1}}],["`less",{"1":{"136":2}}],["`loop",{"1":{"325":2}}],["`localhost",{"1":{"234":1,"238":1}}],["`logsclient`",{"1":{"332":1}}],["`logs`",{"1":{"307":1,"332":1}}],["`log`",{"1":{"309":2}}],["`logger`",{"1":{"142":1}}],["`logging`",{"0":{"141":1},"1":{"140":1}}],["`log",{"1":{"142":2}}],["`low`",{"1":{"83":1}}],["`line`",{"1":{"309":1}}],["`lifespanfn",{"1":{"307":1}}],["`lifespan`",{"1":{"307":1,"309":1,"323":2,"324":2,"328":1}}],["`limit",{"1":{"335":1,"337":1,"339":1,"342":1,"344":1}}],["`limit`",{"1":{"81":1,"196":1,"311":2,"312":2,"313":2,"315":2,"316":2,"317":2,"318":2,"320":2,"322":2,"324":2}}],["`limits`",{"1":{"81":1}}],["`listlogsopts`",{"1":{"336":1}}],["`list",{"1":{"62":1,"307":12,"312":4,"314":2,"316":12,"317":7,"318":4,"322":26,"324":14,"334":1,"335":1,"336":1,"339":1,"340":1,"341":1,"342":1,"343":1,"344":1}}],["`list`",{"1":{"35":1,"311":2,"312":2,"313":2,"316":2,"317":2,"318":2,"319":2,"320":2}}],["`3h`",{"1":{"266":2}}],["`3`",{"1":{"83":1,"316":4}}],["`3000`",{"1":{"257":1}}],["`30s`",{"1":{"66":1,"265":1}}],["`30",{"1":{"38":1}}],["`4mb`",{"1":{"274":2}}],["`4m`",{"1":{"65":1}}],["`4000`",{"1":{"257":1}}],["`4`",{"1":{"257":1}}],["`4194304`",{"1":{"254":1}}],["`45s`",{"1":{"66":1}}],["`err`",{"1":{"333":1}}],["`errors",{"1":{"333":1}}],["`emptymodel`",{"1":{"307":4,"326":3}}],["`email",{"1":{"243":1,"305":1}}],["`enqueueat`",{"1":{"345":1}}],["`envfrom`",{"1":{"246":1}}],["`env`",{"1":{"243":1}}],["`engine",{"1":{"241":1,"278":1}}],["`engine`",{"1":{"240":1}}],["`enablehatchetcollector`",{"1":{"144":1}}],["`enablehatchetcollector",{"1":{"144":1}}],["`enable",{"1":{"144":1}}],["`equal`",{"1":{"136":2}}],["`expression",{"1":{"333":2}}],["`expression`",{"1":{"311":2,"312":2,"322":4,"324":4,"334":1,"345":1}}],["`extra=",{"1":{"326":1}}],["`extra`",{"1":{"307":1}}],["`excludedattributes`",{"1":{"144":1}}],["`except`",{"1":{"120":1}}],["`execution",{"1":{"65":2,"307":2,"322":4}}],["`else`",{"1":{"115":1}}],["`eventclient`",{"1":{"332":1}}],["`event`",{"1":{"307":1}}],["`events`",{"1":{"149":1,"332":1}}],["`event",{"1":{"54":1,"318":4}}],["`1ms`",{"1":{"266":1}}],["`1m`",{"1":{"255":1}}],["`1",{"1":{"258":1,"265":1}}],["`1500`",{"1":{"257":1}}],["`15m`",{"1":{"255":1}}],["`127",{"1":{"253":1,"254":2,"255":3}}],["`10ms`",{"1":{"266":1}}],["`10`",{"1":{"254":1,"255":3,"261":1}}],["`1000`",{"1":{"196":1,"254":1,"257":2,"267":2,"271":1,"313":2}}],["`100`",{"1":{"136":1,"254":1,"261":4,"266":1,"271":1,"295":1,"322":2,"324":2}}],["`10s`",{"1":{"65":1}}],["`1`",{"1":{"83":1,"255":1,"307":3}}],["`1h`",{"1":{"65":1}}],["`<protocol>",{"1":{"374":2}}],["`<unit>`",{"1":{"65":1}}],["`<number>`",{"1":{"65":1}}],["`<number><unit>`",{"1":{"65":1}}],["`<link",{"1":{"11":1,"12":1}}],["`filterid`",{"1":{"335":3}}],["`filterpayload",{"1":{"333":1}}],["`filtersclient`",{"1":{"332":1}}],["`filters`",{"1":{"52":1,"332":1}}],["`filter",{"1":{"312":6,"341":1}}],["`fetch",{"1":{"309":2}}],["`float",{"1":{"307":2,"322":4}}],["`frontend",{"1":{"278":1}}],["`frontend`",{"1":{"240":1,"249":1}}],["`fn`",{"1":{"94":1}}],["`foo`",{"1":{"72":1}}],["`false`",{"1":{"62":4,"136":1,"144":1,"245":1,"254":4,"255":3,"259":1,"260":4,"264":2,"265":1,"268":1,"272":1,"309":1,"316":2,"322":4,"324":4,"365":1}}],["`branchid`",{"1":{"340":1}}],["`branchdurabletask",{"1":{"340":1}}],["`bulkupdate",{"1":{"341":1}}],["`bulkdelete",{"1":{"341":1}}],["`bulkrunnowaitchildren",{"1":{"333":1}}],["`bulkrunchildren",{"1":{"333":1}}],["`bulkrunchildrennowait`",{"1":{"43":1}}],["`bulkrunchildren`",{"1":{"43":1}}],["`bulkcancelreplayopts`",{"1":{"316":4}}],["`bulk",{"1":{"316":8,"317":5}}],["`before",{"1":{"153":1}}],["`before`",{"1":{"152":1,"153":4,"154":5,"155":1,"157":2,"158":7}}],["`baseworkflowdeclaration`",{"1":{"332":3,"333":3,"334":1,"344":3,"345":1}}],["`baseworkflow",{"1":{"322":1,"324":1}}],["`baserestclient`",{"1":{"311":1,"312":1,"313":1,"314":1,"315":1,"316":1,"317":1,"318":1,"319":1,"320":1}}],["`backoff",{"1":{"307":4,"322":8}}],["`batchspanprocessor`",{"1":{"144":1}}],["`bar`",{"1":{"72":1}}],["`boolean`",{"1":{"144":2,"333":2}}],["`bool`",{"1":{"62":2,"144":1,"309":4,"316":4,"322":4,"324":4}}],["`uint8array`",{"1":{"333":1}}],["`us",{"1":{"167":1,"290":1}}],["`userdata",{"1":{"333":1}}],["`userid`",{"1":{"154":1}}],["`usereventcondition`",{"1":{"116":1,"118":3}}],["`user",{"1":{"49":1,"54":1,"112":1}}],["`upsert",{"1":{"339":1}}],["`upsertlabels",{"1":{"333":1}}],["`upsertlabels`",{"1":{"136":1}}],["`update",{"1":{"335":1,"341":2,"342":1}}],["`updateworkerrequest`",{"1":{"319":2}}],["`updates`",{"1":{"312":2,"317":1,"335":1,"341":1}}],["`update`",{"1":{"312":2,"317":2,"318":2,"319":2,"341":1}}],["`uploadtos3`",{"1":{"157":1}}],["`unpause",{"1":{"343":1}}],["`until",{"1":{"337":1,"338":1}}],["`until`",{"1":{"196":1,"313":2,"314":2,"316":6,"322":2,"324":2,"338":1}}],["`undefined`",{"1":{"154":2,"333":6,"339":1,"340":2}}],["`units`",{"1":{"81":2}}],["`unimplemented`",{"1":{"62":1}}],["`unauthenticated`",{"1":{"62":1}}],["`unavailable`",{"1":{"62":1}}],["`unknown`",{"1":{"62":1,"333":1}}],["`mtls`",{"1":{"376":5}}],["`middlewareafter`",{"1":{"332":4,"345":2}}],["`middlewarebefore`",{"1":{"332":7}}],["`mock",{"1":{"323":6,"324":4}}],["`m7g",{"1":{"286":2,"290":1}}],["`maintenance",{"1":{"296":1}}],["`manual`",{"1":{"151":1}}],["`match`",{"1":{"115":1}}],["`maxruns`",{"1":{"76":1}}],["`max",{"1":{"62":2,"144":2,"293":1}}],["`mergeifnonempty`",{"1":{"332":2}}],["`metricsclient`",{"1":{"332":1}}],["`metrics`",{"1":{"307":1,"332":1}}],["`mentioned",{"1":{"219":1}}],["`member",{"1":{"219":1}}],["`memo",{"1":{"88":1}}],["`memo`",{"1":{"87":1}}],["`message",{"1":{"219":1}}],["`medium`",{"1":{"83":1}}],["`m`",{"1":{"65":1}}],["`my",{"1":{"46":1,"130":1,"243":1}}],["`ghcr",{"1":{"289":1}}],["`glasskube",{"1":{"237":1}}],["`glasskube`",{"1":{"236":1}}],["`globalinput`",{"1":{"332":4,"345":7}}],["`globalinputtype`",{"1":{"155":1}}],["`globaloutput`",{"1":{"332":6}}],["`globaloutputtype`",{"1":{"155":1}}],["`generic",{"1":{"323":1,"324":1}}],["`generate",{"1":{"234":1,"238":1}}],["`getworkflowidfromname",{"1":{"344":1}}],["`gettaskexternalid",{"1":{"340":1}}],["`gettaskstatusmetrics",{"1":{"338":1}}],["`gettaskstats",{"1":{"338":1}}],["`getqueuemetrics",{"1":{"338":1}}],["`get",{"1":{"309":2,"314":6,"316":8,"320":2,"324":4,"334":1,"335":1,"340":2,"341":1,"342":1,"343":1,"344":1}}],["`get`",{"1":{"62":1,"311":2,"312":2,"316":2,"317":2,"318":2,"319":2,"320":2}}],["`grpc`",{"1":{"243":2}}],["`greater",{"1":{"136":2}}],["`group",{"1":{"74":1,"75":4,"307":3,"322":4}}],["`grant",{"1":{"49":1}}],["`github`",{"1":{"216":1}}],["`github",{"1":{"46":1,"216":1}}],["`host`",{"1":{"376":1}}],["`host",{"1":{"376":2}}],["`http",{"1":{"234":1,"254":1}}],["`https`",{"1":{"374":1}}],["`https",{"1":{"46":1,"256":1,"376":1}}],["`hello",{"1":{"345":2}}],["`helm`",{"1":{"233":1}}],["`headers",{"1":{"46":1}}],["`headers`",{"1":{"46":1}}],["`hasworkflow",{"1":{"333":1}}],["`hard`",{"1":{"132":1,"136":1}}],["`hatchetclient`",{"1":{"158":1}}],["`hatchetclient",{"1":{"154":1,"158":1}}],["`hatchetattributespanprocessor`",{"1":{"145":1}}],["`hatchetotel",{"1":{"144":3}}],["`hatchetotel`",{"1":{"144":1}}],["`hatchetinstrumentor`",{"1":{"144":2}}],["`hatchet`",{"1":{"141":3,"247":1,"255":3,"260":1}}],["`hatchet",{"1":{"7":4,"9":2,"21":1,"62":3,"72":2,"118":3,"144":2,"145":31,"146":4,"157":1,"166":1,"201":1,"206":4,"225":1,"226":1,"227":2,"230":1,"234":1,"240":5,"241":1,"243":2,"245":2,"249":1,"251":1,"265":2,"268":30,"270":5,"272":3,"273":5,"274":4,"279":1,"282":1,"284":2,"289":3,"321":1,"330":1,"331":1,"345":3,"348":4,"352":1,"353":1,"354":1,"356":1,"357":1,"358":1,"362":2,"363":1,"364":5,"365":3,"366":2,"367":3,"376":11}}],["`high`",{"1":{"83":1}}],["`h`",{"1":{"65":1,"368":1}}],["`$",{"1":{"43":1,"142":2,"220":1,"221":1,"294":1}}],["`childrunopts`",{"1":{"333":3}}],["`children`",{"1":{"333":3}}],["`childkey",{"1":{"333":1}}],["`childindex",{"1":{"333":1}}],["`child",{"1":{"326":1}}],["`chunk",{"1":{"316":4}}],["`createtaskworkflow",{"1":{"346":1}}],["`createtaskworkflowopts`",{"1":{"332":1}}],["`createdurabletaskworkflow",{"1":{"346":1}}],["`createdurabletaskworkflowopts`",{"1":{"332":1}}],["`createwebhookoptions`",{"1":{"342":1}}],["`createworkflow",{"1":{"346":1}}],["`createworkflowdurabletaskopts`",{"1":{"333":1,"345":1}}],["`createworkflowtaskopts`",{"1":{"333":1,"345":3}}],["`createworkflowopts`",{"1":{"332":1,"346":1}}],["`createworkeropts`",{"1":{"332":1}}],["`createratelimitopts`",{"1":{"339":1}}],["`create",{"1":{"322":7,"324":6,"334":1,"335":1,"341":1,"342":1}}],["`create`",{"1":{"311":2,"312":2,"316":2,"317":2,"318":2}}],["`cronclient`",{"1":{"332":1}}],["`crons`",{"1":{"332":1}}],["`cronworkflowslist`",{"1":{"311":2}}],["`cronworkflowsorderbyfield",{"1":{"311":2}}],["`cronworkflows`",{"1":{"311":4,"322":4,"324":4,"334":2}}],["`cron`",{"1":{"307":1,"334":3,"341":1}}],["`cron",{"1":{"39":2,"149":1,"311":8,"322":2,"324":2,"334":5,"341":4,"345":4}}],["`c7i",{"1":{"290":2}}],["`curl",{"1":{"162":1}}],["`conditions`",{"1":{"333":3}}],["`config",{"1":{"323":2,"324":2}}],["`concurrency`",{"1":{"307":3,"322":4}}],["`console`",{"1":{"274":1}}],["`consumer`",{"1":{"145":2}}],["`controllers`",{"1":{"268":1}}],["`context",{"0":{"142":1},"1":{"140":1,"142":1,"328":2}}],["`context`",{"1":{"43":1,"53":1,"88":2,"89":1,"94":1,"142":2,"154":3,"155":1,"160":1,"308":1,"310":1,"330":1,"333":1}}],["`comparator`",{"1":{"136":1}}],["`cmd",{"1":{"127":1}}],["`cat",{"1":{"374":1}}],["`catch`",{"1":{"120":1,"333":1}}],["`cast",{"1":{"322":9,"324":9}}],["`callable",{"1":{"307":2,"322":4}}],["`cached`",{"1":{"322":1}}],["`cache",{"1":{"251":1,"255":1}}],["`caddy`",{"1":{"234":1}}],["`cancelrunopts`",{"1":{"340":1}}],["`cancel`",{"1":{"309":2,"316":2}}],["`cancel",{"1":{"67":1,"74":2,"76":2,"77":4,"113":2,"117":2,"322":2,"340":1}}],["`ctx`",{"1":{"94":2,"154":1}}],["`ctx",{"1":{"89":1,"117":1,"142":1,"152":1,"154":4,"155":1,"310":3,"323":4,"324":4,"333":4}}],["`clusterissuer`",{"1":{"241":1}}],["`closed`",{"1":{"216":1}}],["`cloud",{"1":{"201":1,"268":1}}],["`client",{"1":{"141":1,"158":2,"346":3}}],["`clientconfig`",{"1":{"62":1,"144":1}}],["`claude`",{"1":{"11":1}}],["`claude",{"1":{"10":1}}],["`0`",{"1":{"196":1,"265":1,"266":1,"307":2,"322":4,"323":2,"324":2,"340":1,"344":1}}],["`0",{"1":{"38":3,"291":1}}],["`to`",{"1":{"345":4}}],["`tworkflowinput",{"1":{"323":2,"324":2}}],["`tworkflowinput`",{"1":{"322":9,"324":9}}],["`typeerror`",{"1":{"323":2}}],["`type",{"1":{"307":3}}],["`type`",{"1":{"46":1,"214":1}}],["`tls`",{"1":{"273":1,"376":2}}],["`tenantclient`",{"1":{"332":1}}],["`tenant`",{"1":{"196":1,"332":1}}],["`tenant",{"1":{"192":2,"307":1}}],["`tenantinviteaccept`",{"1":{"192":1}}],["`tenantid`",{"1":{"154":1,"332":1}}],["`tenantmemberdelete`",{"1":{"191":1,"192":1}}],["`tenacityconfig`",{"1":{"62":2}}],["`t`",{"1":{"154":1,"333":1}}],["`timedelta",{"1":{"307":4,"322":8}}],["`time",{"1":{"109":2}}],["`taskoutputtype`",{"1":{"345":2}}],["`taskoutput`",{"1":{"155":1}}],["`taskexternalid`",{"1":{"340":2}}],["`taskrunid`",{"1":{"336":1}}],["`taskrunexternalid",{"1":{"333":1}}],["`taskrunref`",{"1":{"324":3}}],["`taskrunref",{"1":{"324":3}}],["`taskname",{"1":{"333":1}}],["`taskworkflowdeclaration`",{"1":{"332":4,"333":2,"345":5,"346":2}}],["`taskmetrics`",{"1":{"314":2}}],["`taskdefaults",{"1":{"307":1}}],["`taskdefaults`",{"1":{"307":2}}],["`task",{"1":{"307":1,"309":6,"313":2,"316":2,"332":1,"345":1,"372":1,"373":1}}],["`taskinput`",{"1":{"155":1}}],["`tasks`",{"1":{"74":1,"322":1,"326":1,"331":1,"332":1}}],["`task`s",{"1":{"307":2}}],["`task`",{"1":{"24":4,"25":4,"43":1,"88":1,"94":3,"307":4,"309":3,"322":7,"331":1}}],["`traceid`",{"1":{"158":1}}],["`traceparent`",{"1":{"145":1}}],["`tracer",{"1":{"144":1}}],["`tracerprovider`",{"1":{"144":4}}],["`try`",{"1":{"120":2}}],["`true`",{"1":{"62":2,"113":1,"116":1,"118":1,"133":1,"136":3,"144":2,"146":1,"243":2,"254":7,"256":1,"260":1,"262":1,"263":1,"265":1,"299":1,"316":2,"322":2,"324":2}}],["`triggerdata`",{"1":{"333":1}}],["`triggers",{"1":{"333":1}}],["`triggeredbyevent",{"1":{"333":1}}],["`triggerworkflowoptions",{"1":{"322":5,"324":5}}],["`triggerworkflowoptions`",{"1":{"322":5,"324":5}}],["`triggering",{"1":{"314":2,"316":2,"322":2,"324":2,"338":1}}],["`triggerat`",{"1":{"35":1,"341":2}}],["`trigger",{"1":{"28":1,"317":4}}],["`wakeat`",{"1":{"333":1}}],["`was",{"1":{"309":3}}],["`warn`",{"1":{"274":1}}],["`wait",{"1":{"110":1,"113":2,"322":2}}],["`waitfor",{"1":{"88":1,"333":1}}],["`waitforevent",{"1":{"88":1,"333":1}}],["`waitforevent`",{"1":{"86":1,"87":1,"88":1,"111":1,"112":1,"113":1,"123":2}}],["`waitfor`",{"1":{"87":1,"102":1,"112":1,"123":2}}],["`whsec",{"1":{"214":2}}],["`while`",{"1":{"19":1}}],["`withmiddleware`",{"1":{"152":1,"158":1}}],["`webhooknames",{"1":{"342":1}}],["`webhookname`",{"1":{"342":3}}],["`webhooksclient`",{"1":{"332":1}}],["`webhooks`",{"1":{"332":1}}],["`webhook",{"1":{"318":8}}],["`webhook`",{"1":{"46":1,"151":1}}],["`weight`",{"1":{"136":1}}],["`work",{"1":{"296":1}}],["`workerid`",{"1":{"343":4}}],["`workerlabels`",{"1":{"333":2}}],["`workerlist`",{"1":{"319":2}}],["`workersclient`",{"1":{"332":1}}],["`workers`",{"1":{"307":1,"332":1}}],["`worker",{"1":{"126":2,"316":2,"319":4,"322":2,"324":2,"332":1}}],["`worker`",{"1":{"74":1,"307":4,"319":4,"328":1}}],["`workflowdefinition`",{"1":{"344":1}}],["`workflowdeclaration`",{"1":{"332":1,"345":1,"346":1}}],["`workflowversionid",{"1":{"333":1}}],["`workflowversion`",{"1":{"320":2,"322":4,"324":4}}],["`workflowids",{"1":{"335":1}}],["`workflowid",{"1":{"333":1}}],["`workflowlist`",{"1":{"320":2}}],["`workflowrunid`",{"1":{"340":2}}],["`workflowrunid",{"1":{"333":1}}],["`workflowruntriggerconfig`",{"1":{"322":6,"324":6}}],["`workflowrunorderbydirection",{"1":{"311":2,"317":2}}],["`workflowrunref`",{"1":{"25":4,"316":1,"322":6,"324":2,"340":2}}],["`workflowname`",{"1":{"333":1}}],["`workflowname",{"1":{"145":1,"333":1}}],["`workflowsclient`",{"1":{"332":2}}],["`workflows`",{"1":{"74":1,"307":2,"322":4,"324":4,"332":1}}],["`workflows",{"1":{"72":1,"141":1}}],["`workflow",{"1":{"43":1,"56":1,"191":1,"192":1,"307":1,"309":4,"311":6,"312":4,"314":2,"316":15,"317":9,"320":8,"332":1,"338":2,"345":2}}],["`workflow`",{"1":{"24":1,"25":1,"43":2,"93":1,"307":3,"320":2,"321":1,"326":1,"331":1,"332":6,"333":3,"334":2,"341":2,"344":6}}],["`asynciterableiterator`",{"1":{"340":1}}],["`asyncio",{"1":{"325":2}}],["`asyncio`",{"1":{"325":5}}],["`any`",{"1":{"323":2,"324":2,"333":3,"334":3,"340":4,"341":2,"344":3,"345":2}}],["`and`",{"1":{"118":1}}],["`auth`",{"1":{"318":2}}],["`attempt",{"1":{"309":1,"337":1}}],["`app",{"1":{"219":1}}],["`application",{"1":{"216":3}}],["`apikey`",{"1":{"305":1}}],["`apitokenupdaterevoke`",{"1":{"192":1}}],["`apitokencreate`",{"1":{"191":1,"192":1}}],["`api",{"1":{"191":1,"192":2,"234":1,"238":1}}],["`api`",{"1":{"151":1,"240":1,"249":1,"297":1,"332":2}}],["`action`",{"1":{"216":3}}],["`after",{"1":{"153":1}}],["`after`",{"1":{"152":1,"153":4,"154":3,"155":1,"157":1,"158":6}}],["`array`",{"1":{"144":1}}],["`a",{"1":{"118":2}}],["`agentic",{"1":{"101":1}}],["`agents",{"1":{"10":2}}],["`admin123",{"1":{"255":1}}],["`admin",{"1":{"251":1,"255":4}}],["`admin`",{"1":{"82":2,"255":1}}],["`additionalmetadata",{"1":{"333":1,"334":1,"341":1}}],["`additionalmetadata`",{"1":{"145":1}}],["`additional",{"1":{"54":2,"72":1,"80":1,"81":2,"309":1,"311":4,"316":8,"317":7,"322":4,"323":2,"324":6,"338":1}}],["`await",{"1":{"72":3}}],["`await`",{"1":{"24":1,"158":1}}],["`abortsignal`",{"1":{"69":1}}],["`aborted`",{"1":{"62":1}}],["`already",{"1":{"62":1}}],["`aio",{"1":{"24":3,"25":1,"43":3,"308":2,"309":2,"310":4,"311":8,"312":10,"313":2,"314":8,"315":2,"316":24,"317":14,"318":10,"319":6,"320":8,"322":17,"323":8,"324":26}}],["`r`",{"1":{"309":1,"323":2,"324":6}}],["`raise",{"1":{"309":1}}],["`rabbitmq`",{"1":{"249":1,"261":1}}],["`ratelimitorderbyfield`",{"1":{"339":1}}],["`ratelimitorderbydirection`",{"1":{"339":1}}],["`ratelimitsclient`",{"1":{"332":1}}],["`ratelimits`",{"1":{"332":1}}],["`ratelimitduration`",{"1":{"315":2}}],["`rate",{"1":{"79":1,"81":2,"82":2,"307":3,"322":4}}],["`replayrunopts`",{"1":{"340":1}}],["`replay",{"1":{"340":1}}],["`replay`",{"1":{"316":2}}],["`record`",{"1":{"333":3,"334":4,"341":4}}],["`rethrowifcancelled",{"1":{"333":1}}],["`return",{"1":{"322":2,"324":2}}],["`retrycount",{"1":{"333":1}}],["`retry",{"1":{"59":1,"62":6,"309":1,"323":2,"324":2}}],["`retries`",{"1":{"57":2,"58":2,"61":1,"307":2,"322":4}}],["`register",{"1":{"307":2}}],["`region`",{"1":{"151":1}}],["`readabledatakey",{"1":{"333":1}}],["`read",{"1":{"251":1,"255":4,"299":4}}],["`request`",{"1":{"342":1}}],["`requestparams",{"1":{"338":1}}],["`requestparams`",{"1":{"338":2}}],["`requestid`",{"1":{"153":1}}],["`required`",{"1":{"136":2}}],["`releaseslot",{"1":{"333":1}}],["`release",{"1":{"138":2,"309":2}}],["`restoretask",{"1":{"340":1}}],["`resolved`",{"1":{"332":3,"345":3}}],["`resource",{"1":{"62":1}}],["`response",{"1":{"220":1}}],["`result",{"1":{"55":1}}],["`reward",{"1":{"49":1}}],["`refresh",{"1":{"309":2}}],["`refreshtimeout",{"1":{"66":1,"333":1}}],["`refreshtimeout`",{"1":{"66":1}}],["`ref",{"1":{"25":1}}],["`references",{"1":{"10":5}}],["`runcmd`",{"1":{"364":1,"365":1}}],["`runchild",{"1":{"88":1,"333":1}}],["`runchild`",{"1":{"87":1,"102":1}}],["`runandwait",{"1":{"332":1,"345":1}}],["`runopts`",{"1":{"332":3,"345":7}}],["`runnables`",{"1":{"321":1,"345":1}}],["`running`",{"1":{"169":1}}],["`runnowaitchild",{"1":{"333":1}}],["`runnowait",{"1":{"145":1,"332":1,"345":4}}],["`runnowait`",{"1":{"25":1,"43":1}}],["`runsclient`",{"1":{"307":1,"320":1,"332":1}}],["`runs`",{"1":{"149":1,"307":1,"332":1}}],["`runs",{"1":{"72":2}}],["`runmany`",{"1":{"43":1}}],["`run",{"1":{"25":3,"43":2,"145":1,"158":1,"316":6,"322":13,"324":14,"332":1,"345":5}}],["`run`",{"1":{"24":4,"43":2,"133":1,"307":2,"322":2,"324":2,"340":2}}],["`p`",{"1":{"333":5}}],["`ping`",{"1":{"216":1}}],["`ps",{"1":{"171":1}}],["`postmark`",{"1":{"265":1}}],["`postgres",{"1":{"245":1}}],["`postgres`",{"1":{"225":2,"231":1,"249":1,"253":1}}],["`post`",{"1":{"62":1}}],["`poetry",{"1":{"127":2}}],["`precmds`",{"1":{"364":1}}],["`priority",{"1":{"334":1,"341":1}}],["`priority`",{"1":{"83":2,"151":1,"309":1,"311":2,"316":2,"322":2,"324":2}}],["`pr",{"1":{"216":1}}],["`prometheus",{"1":{"268":6}}],["`promise`",{"1":{"154":1,"158":2,"332":4,"333":17,"334":4,"335":5,"336":1,"338":4,"339":2,"340":8,"341":7,"342":5,"343":5,"344":4,"345":7}}],["`product`",{"1":{"151":1}}],["`production`",{"1":{"7":1,"151":1}}],["`producer`",{"1":{"145":5}}],["`pull",{"1":{"216":1}}],["`push",{"1":{"145":1}}],["`push`",{"1":{"48":1}}],["`publish",{"1":{"101":1}}],["`putstream",{"1":{"333":1}}],["`put",{"1":{"82":2,"160":1,"309":2}}],["`put`",{"1":{"62":1,"315":2}}],["`permission",{"1":{"62":1}}],["`pause",{"1":{"343":1}}],["`partial`",{"1":{"340":1,"342":1}}],["`parentworkflowrunid",{"1":{"333":1}}],["`parenttask`",{"1":{"333":1}}],["`parentoutput",{"1":{"333":1}}],["`parent`",{"1":{"323":2,"324":2}}],["`parents`",{"1":{"322":2}}],["`parent",{"1":{"309":1,"314":2,"316":2,"317":8,"322":2,"323":4,"324":6,"338":1}}],["`parentcondition`",{"1":{"117":1}}],["`payloadschema",{"1":{"333":1}}],["`payload`",{"1":{"54":1,"221":1,"312":2,"322":2,"324":2}}],["`payment",{"1":{"118":1,"214":3}}],["`patch`",{"1":{"62":1}}],["`package",{"1":{"21":1}}],["`jsonserializablemapping`",{"1":{"309":2,"311":4,"316":4,"317":4}}],["`jsonserializablemapping",{"1":{"307":3,"311":2,"312":2,"316":2,"317":4,"322":4,"323":2,"324":6}}],["`json",{"1":{"14":2,"55":2,"105":4,"214":9,"216":10,"219":7,"220":8,"221":7}}],["`done`",{"1":{"309":2}}],["`docker`",{"1":{"236":1,"238":1}}],["`docker",{"1":{"225":4,"226":1,"227":1,"230":1,"278":1,"285":1,"358":3}}],["`docs",{"1":{"11":1,"12":1}}],["`duration`",{"1":{"307":4,"315":2,"322":8,"333":3,"345":1}}],["`durabletask",{"1":{"332":1,"345":1}}],["`durablecontext`",{"1":{"88":2,"89":1,"102":1,"109":1,"112":1,"308":1,"333":1}}],["`durable",{"1":{"88":1,"89":1,"271":1,"307":3,"322":2}}],["`dict",{"1":{"307":3,"309":1,"310":1,"314":4,"316":6,"318":4,"322":6,"323":4,"324":6}}],["`disable`",{"1":{"255":1}}],["`date`",{"1":{"333":1,"337":2,"341":4,"345":1}}],["`datetime`",{"1":{"317":4,"322":2,"324":2}}],["`datetime",{"1":{"65":1,"313":4,"314":4,"316":12,"322":4,"324":4}}],["`dataclass`",{"1":{"331":1}}],["`dataclasses`",{"1":{"331":1}}],["`data`",{"1":{"309":1,"333":1}}],["`database",{"1":{"225":2,"245":2,"246":1,"251":1,"255":22,"284":1,"293":1,"295":1}}],["`daily",{"1":{"84":1}}],["`dynamic`",{"1":{"266":1}}],["`dynamickey`",{"1":{"81":1}}],["`dynamic",{"1":{"81":1}}],["`d60181b7",{"1":{"46":1}}],["`delay",{"1":{"345":1}}],["`delete",{"1":{"334":1,"335":1,"341":1,"342":1,"344":1}}],["`delete`",{"1":{"35":1,"62":1,"311":2,"312":2,"317":2,"318":2,"320":2,"324":2}}],["`description`",{"1":{"307":3}}],["`desired",{"1":{"136":1,"307":2,"322":2}}],["`dependencies=",{"1":{"323":2,"324":2}}],["`dependencies`",{"1":{"323":2,"324":2}}],["`depends`",{"1":{"152":1,"155":1,"323":4,"324":4,"330":1}}],["`deploymentenvfrom`",{"1":{"246":1}}],["`deps`",{"1":{"152":1,"154":1,"155":1}}],["`dev",{"1":{"365":1}}],["`development`",{"1":{"151":1,"258":1}}],["`dev`",{"1":{"7":1}}],["`default`",{"1":{"255":2}}],["`default",{"1":{"251":1,"255":3,"307":9}}],["`def",{"1":{"94":1}}],["`deadline",{"1":{"62":1}}],["`",{"1":{"10":2,"14":2,"24":1,"25":1,"38":7,"43":1,"46":12,"50":3,"55":2,"59":1,"62":3,"66":1,"84":1,"88":5,"94":1,"105":4,"116":1,"118":3,"126":1,"127":3,"138":1,"142":7,"144":8,"145":9,"146":10,"153":6,"154":6,"157":2,"158":5,"160":4,"214":14,"216":14,"219":9,"220":12,"221":9,"231":1,"240":1,"243":22,"245":1,"251":9,"254":2,"255":1,"260":4,"264":2,"265":2,"268":4,"272":3,"283":2,"284":1,"294":1,"307":10,"309":4,"310":4,"314":4,"317":3,"322":50,"323":11,"324":41,"326":1,"332":12,"333":45,"334":10,"335":14,"336":2,"337":7,"338":17,"339":7,"340":10,"341":17,"342":14,"343":5,"344":11,"345":45,"346":10,"353":2,"354":2,"355":1,"365":2,"372":4,"373":1}}],["`spawnchildren",{"1":{"333":1}}],["`spawnchild",{"1":{"333":1}}],["`smtp",{"1":{"305":3}}],["`since",{"1":{"337":1}}],["`since`",{"1":{"196":1,"313":2,"314":2,"316":6,"322":2,"324":2,"338":2}}],["`signal",{"1":{"310":1}}],["`sigterm`",{"1":{"170":1}}],["`simpletask`",{"1":{"72":1}}],["`sourcenames",{"1":{"342":1}}],["`source",{"1":{"318":4}}],["`source`",{"1":{"150":1}}],["`soft`",{"1":{"132":2}}],["`skip",{"1":{"113":2,"117":2,"322":2}}],["`skills",{"1":{"10":2}}],["`search",{"1":{"337":1,"339":1}}],["`second`",{"1":{"315":2}}],["`seed",{"1":{"251":1,"255":1}}],["`settings`",{"1":{"234":1,"238":1}}],["`settimeout`",{"1":{"109":1}}],["`setup",{"1":{"226":1,"230":1}}],["`server",{"1":{"167":1,"230":1,"251":1,"254":25,"256":2,"257":15,"258":12,"259":9,"260":15,"261":7,"262":18,"263":7,"264":12,"265":25,"266":12,"267":2,"268":6,"374":1}}],["`send",{"1":{"49":1}}],["`slack",{"1":{"219":1,"220":2,"221":1}}],["`sleepuntil",{"1":{"333":1}}],["`sleep",{"1":{"316":4}}],["`sleepcondition",{"1":{"310":1}}],["`sleepcondition`",{"1":{"116":1,"118":3}}],["`sleepfor",{"1":{"88":1,"333":1}}],["`sleepfor`",{"1":{"86":1,"87":1,"88":1,"102":2,"108":1,"109":1,"123":2,"333":1}}],["`slots`",{"1":{"22":2,"74":1,"75":1,"271":1,"307":1}}],["`scrapeprometheusmetrics",{"1":{"338":1}}],["`scrape",{"1":{"314":2}}],["`scopes",{"1":{"335":1}}],["`scopes`",{"1":{"312":2}}],["`scope",{"1":{"318":4}}],["`scope`",{"1":{"52":1,"312":2,"322":2,"324":2}}],["`scaledobject`",{"1":{"130":1}}],["`schedules`",{"1":{"332":1}}],["`scheduleclient`",{"1":{"332":2}}],["`scheduletriggerworkflowoptions",{"1":{"322":2,"324":2}}],["`scheduletriggerworkflowoptions`",{"1":{"322":2,"324":2}}],["`schedule`",{"1":{"322":2,"324":2}}],["`schedule`d",{"1":{"307":2}}],["`scheduler`",{"1":{"268":1}}],["`scheduler",{"1":{"251":1,"254":4}}],["`scheduledrun`",{"1":{"341":3}}],["`scheduledruns`",{"1":{"341":1}}],["`scheduledruns",{"1":{"341":1}}],["`scheduledworkflowsbulkdeletefilter`",{"1":{"341":2}}],["`scheduledworkflowsbulkdeleteresponse`",{"1":{"317":2}}],["`scheduledworkflowsbulkupdateresponse`",{"1":{"317":1}}],["`scheduledworkflowsbulkupdateitem`",{"1":{"317":1}}],["`scheduledworkflowslist`",{"1":{"317":2}}],["`scheduledworkflowsorderbyfield",{"1":{"317":2}}],["`scheduledworkflows`",{"1":{"317":6,"341":3}}],["`scheduledworkflowruncreate`",{"1":{"192":1}}],["`scheduled`",{"1":{"307":1,"332":1}}],["`scheduled",{"1":{"149":1,"192":1,"317":11}}],["`schedule",{"1":{"65":2,"144":1,"145":1,"307":2,"322":5,"345":4}}],["`s`",{"1":{"65":1}}],["`subscribetostream",{"1":{"340":1}}],["`subscribe",{"1":{"316":2}}],["`subscription",{"1":{"50":3}}],["`subcription",{"1":{"50":1}}],["`sharedconfig",{"1":{"243":9,"278":2}}],["`sharedconfig`",{"1":{"243":2}}],["`sha1`",{"1":{"46":1}}],["`sha256`",{"1":{"46":1}}],["`step",{"1":{"323":8,"324":8}}],["`step1`",{"1":{"121":1}}],["`stickystrategy",{"1":{"307":3}}],["`sticky`",{"1":{"132":2,"133":2,"307":3}}],["`str",{"1":{"307":8,"309":7,"311":6,"314":4,"316":6,"317":10,"318":6,"320":4,"322":11,"324":7}}],["`str`",{"1":{"307":2,"309":1,"310":1,"311":10,"312":12,"313":2,"314":2,"315":2,"316":15,"317":8,"318":10,"319":4,"320":6,"322":9,"324":11}}],["`string`",{"1":{"145":12,"332":5,"333":22,"334":9,"335":3,"336":1,"337":1,"338":12,"339":1,"340":6,"341":6,"342":3,"343":4,"344":5,"345":2}}],["`stripe`",{"1":{"214":1}}],["`stripe",{"1":{"46":1,"214":2}}],["`static",{"1":{"318":4}}],["`statuses`",{"1":{"316":6,"317":5,"322":2,"324":2}}],["`start`",{"1":{"307":1}}],["`startedat`",{"1":{"158":1}}],["`standalone`",{"1":{"307":2,"321":1}}],["`standalone",{"1":{"56":1}}],["`standalonetask`",{"1":{"43":1}}],["`staging`",{"1":{"7":1,"151":1}}],["```txt",{"1":{"362":2,"374":1}}],["```ts",{"1":{"332":12,"333":1,"337":1,"345":7}}],["```typescript",{"1":{"14":1,"20":1,"24":1,"25":1,"35":6,"39":1,"40":3,"43":2,"48":1,"50":1,"52":4,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":2,"74":1,"78":1,"81":1,"82":2,"84":3,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":4,"109":1,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":1,"121":1,"132":1,"133":1,"135":1,"136":2,"142":3,"144":1,"149":2,"153":3,"154":1,"155":1,"156":1,"157":2,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":2,"345":2}}],["```promql",{"1":{"268":27}}],["```python",{"1":{"14":1,"20":1,"24":2,"25":4,"35":6,"39":2,"40":6,"43":1,"48":1,"50":1,"52":4,"53":1,"55":4,"58":1,"59":1,"60":1,"61":1,"62":1,"65":1,"66":1,"68":2,"72":8,"74":1,"78":1,"81":1,"82":2,"84":3,"89":3,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":5,"109":1,"110":1,"112":3,"113":3,"115":1,"116":1,"117":3,"118":1,"120":1,"121":1,"132":1,"133":1,"135":1,"136":2,"138":1,"141":3,"142":1,"144":2,"149":2,"153":1,"155":1,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":2,"322":1,"325":4,"326":4,"328":2,"330":2,"331":3}}],["```sql",{"1":{"247":1}}],["```sh",{"1":{"7":8,"224":1,"225":1,"226":1,"228":1,"234":3,"238":2,"249":1,"289":1,"291":1,"294":2,"349":2,"350":1,"352":2,"353":1,"354":3,"355":1,"356":1,"357":1,"360":1,"361":1,"365":4,"367":1,"368":1,"372":4,"374":1}}],["````",{"1":{"243":1}}],["````yaml",{"1":{"243":1}}],["```yaml",{"1":{"130":2,"146":1,"225":2,"227":1,"230":2,"231":1,"240":1,"241":1,"243":3,"245":1,"246":1,"249":1,"268":1,"278":2,"285":1,"289":1,"364":3,"365":1,"367":1}}],["```dockerfile",{"1":{"127":7}}],["```mermaid",{"1":{"87":1,"101":3,"123":1,"124":1,"173":1}}],["```json",{"1":{"21":1,"129":1}}],["```ruby",{"1":{"14":1,"20":1,"24":1,"25":2,"35":6,"39":1,"40":3,"43":1,"48":1,"50":1,"52":4,"53":1,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":2,"72":4,"74":1,"78":1,"81":1,"82":2,"84":3,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":5,"109":1,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":1,"121":1,"132":1,"133":1,"135":1,"136":2,"138":1,"142":2,"149":2,"153":1,"155":1,"160":1,"161":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["```go",{"1":{"14":1,"20":1,"24":1,"25":2,"35":3,"39":1,"40":3,"43":1,"48":1,"50":1,"52":4,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":1,"74":1,"78":1,"81":1,"82":2,"84":3,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":4,"109":1,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":1,"121":1,"132":1,"133":1,"135":1,"136":2,"138":1,"144":3,"149":2,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":2}}],["```bash",{"1":{"10":2,"11":1,"21":7,"129":1,"144":2,"146":2,"148":1,"227":1,"229":1,"252":6,"253":2,"268":1,"278":3,"279":1,"284":1,"285":1,"289":2,"298":1,"301":1,"304":1}}],["```",{"1":{"7":8,"10":2,"11":1,"14":4,"20":4,"21":10,"24":5,"25":9,"35":21,"38":2,"39":5,"40":15,"43":5,"48":4,"50":4,"52":16,"53":4,"55":6,"58":4,"59":4,"60":4,"61":4,"62":1,"65":4,"66":4,"68":7,"72":12,"74":4,"78":4,"81":4,"82":8,"84":12,"87":1,"89":3,"93":4,"94":4,"95":4,"96":4,"97":4,"101":3,"104":12,"105":18,"109":4,"110":4,"112":12,"113":12,"115":1,"116":4,"117":12,"118":4,"120":4,"121":4,"123":1,"124":1,"127":8,"129":6,"130":2,"132":4,"133":4,"135":4,"136":8,"138":3,"141":5,"142":6,"144":8,"146":5,"148":5,"149":8,"153":5,"154":1,"155":3,"156":1,"157":2,"160":4,"161":6,"162":3,"173":1,"196":2,"214":4,"216":4,"219":4,"220":4,"221":4,"224":1,"225":5,"226":1,"227":6,"228":1,"229":1,"230":2,"231":1,"234":5,"238":2,"240":1,"241":1,"243":2,"245":1,"246":1,"247":1,"249":2,"252":6,"253":2,"268":29,"278":5,"279":1,"283":2,"284":1,"285":2,"289":4,"291":1,"294":8,"295":2,"296":6,"298":1,"301":1,"304":1,"322":1,"325":4,"326":4,"328":2,"330":2,"331":3,"332":12,"333":1,"337":1,"345":9,"349":2,"350":1,"352":2,"353":1,"354":3,"355":1,"356":1,"357":1,"360":1,"361":1,"362":2,"364":3,"365":5,"367":2,"368":1,"372":10,"373":2,"374":2}}],["><",{"1":{"332":24,"333":40,"334":5,"335":6,"336":2,"337":8,"338":5,"339":3,"340":10,"341":8,"342":6,"343":6,"344":5,"345":12,"346":3}}],[">events",{"1":{"173":1}}],[">|grpc|",{"1":{"173":1}}],[">|completes|",{"1":{"124":1}}],[">|",{"1":{"124":1}}],[">|wait",{"1":{"123":1}}],[">|hits",{"1":{"123":1}}],[">|assigned",{"1":{"123":1,"124":1}}],[">|result|",{"1":{"101":3}}],[">|spawns|",{"1":{"101":9}}],[">>w",{"1":{"87":1}}],[">>p",{"1":{"87":3}}],[">>h",{"1":{"87":3}}],[">`",{"1":{"11":1,"12":1}}],[">",{"1":{"5":4,"7":3,"9":3,"14":1,"20":1,"21":4,"34":4,"35":5,"36":4,"38":6,"39":2,"41":1,"43":2,"46":8,"48":2,"49":3,"50":5,"52":5,"53":1,"55":1,"56":2,"58":1,"59":2,"60":1,"61":1,"62":10,"65":6,"66":1,"68":2,"72":13,"74":2,"81":6,"82":1,"85":4,"89":6,"90":2,"94":5,"95":2,"96":2,"100":3,"101":2,"104":4,"105":4,"109":3,"110":1,"112":4,"113":4,"115":7,"116":1,"117":10,"118":1,"121":2,"124":4,"127":5,"130":2,"131":3,"132":7,"133":2,"134":1,"136":6,"137":5,"138":3,"141":1,"142":1,"144":1,"147":2,"148":1,"149":3,"152":2,"153":14,"154":2,"155":2,"156":1,"157":4,"160":3,"162":1,"173":5,"177":4,"190":3,"209":3,"214":1,"216":4,"217":4,"219":4,"220":4,"221":2,"225":6,"230":2,"238":2,"245":1,"253":4,"255":4,"268":3,"278":5,"279":4,"280":5,"284":3,"294":3,"296":1,"305":3,"325":4,"326":6,"328":2,"330":9,"331":3,"332":25,"333":34,"334":9,"335":5,"336":1,"338":4,"339":2,"340":16,"341":11,"342":6,"343":5,"344":7,"345":30,"346":7,"347":2,"364":2,"368":2,"369":2,"370":2,"371":2,"372":2}}],["vcs",{"1":{"374":10}}],["v=frpub6oebcc",{"1":{"325":1}}],["v0",{"1":{"278":3,"285":2,"289":1}}],["v0client",{"2":{"52":1,"149":1}}],["volumes",{"1":{"225":7,"227":6,"231":1}}],["volume",{"1":{"225":1,"268":1,"301":1}}],["void",{"1":{"142":6,"160":1}}],["vpc",{"1":{"189":1}}],["vpns",{"1":{"166":1}}],["v2",{"1":{"135":3,"136":7}}],["vs",{"0":{"80":1,"179":1},"1":{"154":1,"179":1,"268":3}}],["video>",{"1":{"369":1,"370":1,"371":1}}],["video",{"1":{"369":2,"370":2,"371":2}}],["vice",{"1":{"301":1}}],["virtually",{"1":{"331":1}}],["virtualenvs",{"1":{"127":1},"2":{"127":1}}],["virginia",{"1":{"198":1}}],["visit",{"1":{"283":1}}],["visibility",{"1":{"143":1,"190":1,"203":1,"204":1}}],["visible",{"0":{"168":1},"1":{"98":1,"117":1,"154":1,"168":1}}],["vision",{"1":{"136":4}}],["via",{"1":{"21":1,"25":2,"35":4,"36":1,"39":1,"40":2,"62":2,"72":3,"83":1,"103":1,"148":1,"152":1,"154":1,"155":1,"158":1,"167":1,"171":1,"188":1,"192":1,"193":1,"196":1,"207":1,"225":2,"234":1,"235":1,"238":1,"243":4,"246":1,"250":1,"268":2,"269":1,"293":1,"301":1,"317":2,"346":1,"349":1,"372":2}}],["viewing",{"0":{"195":1}}],["views",{"1":{"150":1}}],["viewed",{"1":{"79":1}}],["view",{"0":{"369":1,"370":1,"371":1},"1":{"11":1,"12":1,"36":1,"41":1,"72":1,"140":1,"171":1,"195":1,"234":1,"248":1,"307":1,"369":2,"370":3,"371":2}}],["v",{"1":{"20":2,"35":3,"43":1,"55":2,"66":1,"82":1,"104":1,"105":1,"120":1,"144":1,"149":2,"161":2,"162":1,"238":2,"338":1,"362":2}}],["vms",{"1":{"174":1}}],["vm",{"1":{"19":1}}],["vacuum",{"1":{"34":1,"296":4}}],["var`",{"1":{"243":1}}],["vary",{"1":{"37":1,"42":1}}],["var",{"1":{"25":1,"74":1,"78":1,"96":1,"104":2,"105":2,"127":2,"225":3,"226":1,"227":2,"231":1,"243":1}}],["varies",{"1":{"177":1}}],["variants",{"1":{"43":1}}],["variables",{"0":{"246":1,"252":1,"376":1},"1":{"62":2,"126":1,"225":2,"240":1,"243":5,"246":3,"250":1,"251":6,"252":1,"254":1,"268":3,"269":1,"272":1,"294":1,"298":1,"301":1,"304":1,"374":2,"375":1,"376":2}}],["variable",{"0":{"251":1},"1":{"21":1,"62":1,"81":1,"146":2,"167":1,"230":1,"243":1,"246":1,"251":4,"254":1,"255":1,"256":1,"257":1,"258":1,"259":1,"260":1,"261":1,"262":1,"263":1,"264":1,"265":1,"266":1,"267":1,"270":1,"271":1,"272":1,"273":1,"274":1,"284":1,"289":1,"291":1,"293":1,"295":1,"372":1,"374":1,"376":2}}],["various",{"1":{"5":1,"23":1,"161":1,"307":2,"322":3,"324":2,"345":1,"375":1}}],["valuable",{"1":{"159":1}}],["value=256",{"1":{"136":1}}],["value=",{"1":{"136":1}}],["valuelocation",{"1":{"130":3}}],["valueerror",{"1":{"68":1}}],["values",{"1":{"16":1,"38":2,"46":2,"83":2,"135":1,"136":1,"148":1,"152":1,"153":1,"155":1,"158":1,"241":2,"243":3,"268":1,"278":5,"294":5,"305":1,"307":3,"376":1},"2":{"278":1}}],["value",{"1":{"15":1,"32":1,"38":1,"46":2,"57":1,"72":3,"94":1,"104":5,"105":7,"109":1,"112":1,"116":1,"117":2,"120":2,"130":1,"135":1,"136":17,"149":5,"150":1,"153":18,"154":3,"155":2,"214":1,"216":1,"219":1,"220":1,"221":1,"243":2,"246":1,"254":1,"255":1,"256":1,"257":1,"258":1,"259":1,"260":1,"261":1,"262":1,"263":1,"264":2,"265":1,"266":1,"267":1,"270":1,"271":1,"272":1,"273":1,"274":1,"278":1,"289":2,"291":1,"293":4,"295":4,"296":1,"317":4,"323":2,"324":2,"330":6,"333":1,"340":1},"2":{"94":1,"104":2,"105":3,"120":1}}],["valid",{"1":{"94":1,"129":1,"148":1,"241":1,"243":3,"344":1,"376":1}}],["validation",{"1":{"326":3,"331":1}}],["validating",{"1":{"46":1,"183":1,"331":1}}],["validates",{"1":{"106":1}}],["validate",{"1":{"46":1,"158":1,"333":1}}],["validated",{"1":{"15":1,"309":1,"326":1,"333":2}}],["validator",{"1":{"307":6}}],["validator`",{"1":{"307":3,"326":2,"331":1}}],["validator=input",{"1":{"331":1}}],["validator=githubprinput",{"1":{"216":1}}],["validator=slackinteractioninput",{"1":{"221":1}}],["validator=slackcommandinput",{"1":{"220":1}}],["validator=slackeventinput",{"1":{"219":1}}],["validator=stripepaymentinput",{"1":{"214":1}}],["validator=simpleinput",{"1":{"14":1}}],["validator=childinput",{"1":{"105":1,"326":1}}],["validator=parentinput",{"1":{"105":1,"326":1}}],["validator=awaitedevent",{"1":{"89":1}}],["validator=workflowinput",{"1":{"74":1,"78":1}}],["validator=taskoutput",{"1":{"55":1}}],["validator=taskinput",{"1":{"55":2}}],["validator=eventworkflowinput",{"1":{"50":1,"52":1}}],["validators",{"1":{"15":1,"309":1,"331":1}}],["venv",{"1":{"364":1}}],["verbosely",{"1":{"325":1}}],["versa",{"1":{"301":1}}],["version>`",{"1":{"279":1}}],["version`",{"1":{"254":1,"320":4}}],["version=",{"1":{"146":1}}],["version=1",{"1":{"127":1}}],["versions",{"0":{"278":1},"1":{"72":1,"279":2}}],["version",{"0":{"283":1,"285":1},"1":{"2":1,"10":1,"11":1,"12":1,"50":1,"145":2,"149":2,"166":3,"225":4,"227":2,"229":1,"254":3,"276":1,"277":1,"278":2,"279":5,"280":1,"281":1,"282":3,"283":7,"284":4,"285":1,"286":1,"290":1,"291":1,"307":3,"309":3,"320":16,"331":1,"333":2,"350":2,"362":4}}],["verified=true",{"1":{"253":1}}],["verified`",{"1":{"243":1,"260":1}}],["verified",{"1":{"219":1,"225":2,"243":1,"260":1}}],["verification",{"1":{"218":1,"243":1,"278":1}}],["verifying",{"0":{"350":1},"1":{"218":1}}],["verify",{"1":{"158":1,"166":1,"214":1,"216":1,"219":1,"241":2,"276":1,"278":2,"279":1,"282":1,"350":1}}],["vector",{"1":{"146":1}}],["ve",{"1":{"5":1,"8":1,"23":1,"47":2,"150":2,"169":1,"214":1,"216":1,"326":1,"330":2,"364":1}}],["v1worker",{"1":{"333":1}}],["v1webhooksourcename",{"1":{"318":2}}],["v1webhookhmacauth`",{"1":{"318":2}}],["v1webhookapikeyauth",{"1":{"318":2}}],["v1tasksummary",{"1":{"322":2,"324":2}}],["v1taskstatus",{"1":{"72":4,"316":6,"322":2,"324":2},"2":{"72":1}}],["v1alpha1",{"1":{"130":2}}],["v1createfilterrequest",{"1":{"52":1},"2":{"52":1}}],["v1",{"1":{"0":1,"2":3,"3":2,"6":1,"7":1,"8":1,"13":2,"16":3,"17":6,"18":1,"19":1,"23":2,"25":2,"26":1,"27":1,"31":4,"33":1,"34":4,"37":1,"38":5,"42":1,"46":1,"47":1,"48":1,"49":2,"72":1,"85":3,"87":2,"89":1,"90":1,"92":2,"93":4,"100":1,"102":1,"103":1,"104":1,"106":2,"109":1,"110":1,"111":1,"112":2,"113":2,"115":1,"118":2,"123":1,"124":1,"129":2,"130":4,"148":3,"153":1,"157":1,"161":1,"169":3,"176":3,"178":1,"182":1,"184":2,"196":1,"207":2,"225":2,"227":1,"268":3,"283":1,"286":1,"289":1,"309":2,"326":1,"328":1,"374":2}}],["ec256",{"1":{"238":2,"253":2},"2":{"238":2,"253":2}}],["echo",{"1":{"238":1}}],["eu",{"1":{"198":1}}],["editor",{"1":{"207":1}}],["edition",{"1":{"190":1}}],["edges",{"1":{"91":1}}],["e65100",{"1":{"173":1}}],["e3f2fd",{"1":{"173":1}}],["equivalent",{"1":{"160":1}}],["equally",{"1":{"187":1}}],["equal`",{"1":{"136":3}}],["equal",{"1":{"117":1,"136":4}}],["e2e",{"1":{"158":1}}],["e2eencryption",{"1":{"157":2}}],["escape",{"1":{"316":2,"317":2}}],["estimated",{"1":{"287":1}}],["essential",{"1":{"127":1}}],["especially",{"1":{"22":1,"32":1,"75":1,"81":1,"123":1,"209":2}}],["elapsed",{"1":{"124":2,"333":1}}],["elsewhere",{"1":{"94":1,"160":1}}],["else",{"1":{"68":1,"115":1,"117":3}}],["early",{"1":{"326":1}}],["earth",{"1":{"105":2,"294":1,"326":1}}],["east",{"1":{"198":1,"305":1}}],["easier",{"1":{"307":1,"315":1,"329":1,"339":1}}],["easiest",{"1":{"70":1,"164":1,"225":1}}],["easily",{"1":{"161":1}}],["easy",{"1":{"2":1,"46":1,"115":1,"142":1,"211":1,"316":4,"348":1,"351":1,"362":1}}],["etl",{"1":{"98":1}}],["etc",{"1":{"19":1,"46":1,"76":1,"83":1,"93":1,"94":1,"148":1,"173":1,"174":1,"183":1,"201":1,"209":1,"217":1,"225":1,"227":1,"302":1,"321":2,"325":1,"338":1,"345":1}}],["ephemeral",{"1":{"49":2,"89":1}}],["evaluating",{"1":{"329":1}}],["evaluated",{"1":{"113":1,"152":1,"156":1,"330":1}}],["evaluate",{"1":{"48":1,"49":1,"312":2,"322":2,"324":2,"329":1}}],["evaluates",{"1":{"46":1,"116":1,"118":1,"174":1}}],["evictmodel",{"1":{"136":1}}],["evict",{"1":{"136":2}}],["evictable",{"1":{"104":1,"109":1,"112":1}}],["evicts",{"1":{"87":1,"123":1}}],["eviction",{"0":{"123":1,"124":1},"1":{"87":2,"102":1,"103":2,"104":1,"109":1,"112":1,"123":6,"124":2}}],["evicted",{"1":{"87":2,"88":1,"90":1,"102":1,"104":3,"115":1,"123":4,"340":2}}],["even",{"1":{"13":1,"61":1,"76":1,"100":1,"112":1,"123":1,"174":1,"220":1,"301":1}}],["eventbuffer",{"1":{"294":4}}],["eventwithmetadata",{"1":{"294":1},"2":{"294":1}}],["eventworkflow",{"1":{"50":2,"52":2}}],["eventworkflowinput",{"1":{"50":1,"53":1}}],["eventfanout",{"1":{"289":2}}],["event`",{"1":{"145":2,"216":1,"309":1}}],["eventinput",{"1":{"50":1,"53":1}}],["events`",{"1":{"50":1,"214":1,"219":1,"307":3}}],["events=",{"1":{"46":1,"50":1,"52":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":1}}],["events",{"0":{"48":1,"111":1,"112":1,"160":1},"1":{"16":2,"25":6,"31":1,"44":1,"46":2,"47":1,"48":4,"50":2,"51":2,"52":7,"74":1,"75":1,"76":1,"86":1,"101":2,"111":6,"112":8,"113":7,"115":1,"118":1,"124":3,"136":2,"145":1,"149":5,"150":4,"151":4,"161":3,"162":3,"164":1,"174":1,"212":3,"213":2,"214":3,"215":1,"216":10,"219":4,"220":1,"221":1,"288":3,"289":8,"290":1,"291":2,"292":1,"294":13,"297":1,"301":2,"307":4,"318":3,"327":1,"332":2,"333":2,"340":2,"342":1},"2":{"48":2,"52":2,"112":2,"113":2,"136":1,"149":2,"294":2,"332":1}}],["event",{"0":{"49":1,"50":1,"51":1,"113":1,"219":1},"1":{"3":2,"25":3,"30":1,"31":2,"45":1,"46":11,"47":3,"48":13,"49":12,"50":14,"51":2,"52":23,"53":3,"54":3,"81":1,"87":3,"88":2,"89":15,"90":1,"101":1,"104":1,"110":1,"111":4,"112":31,"113":45,"115":1,"116":14,"117":10,"118":23,"122":1,"123":6,"124":4,"130":1,"142":2,"145":1,"146":7,"149":5,"150":1,"167":1,"212":1,"214":7,"216":8,"217":2,"219":26,"220":4,"221":2,"248":1,"257":6,"272":2,"287":1,"288":5,"289":3,"294":6,"307":4,"309":4,"310":5,"314":4,"316":4,"318":8,"322":4,"324":4,"325":3,"332":3,"333":15,"338":3},"2":{"48":1,"52":4,"53":1,"89":2,"112":2,"113":1,"149":1,"219":4,"294":3,"325":1},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["everything",{"1":{"2":1,"3":1,"92":1,"160":4,"161":1,"216":1,"328":1}}],["emitting",{"1":{"289":1}}],["emitter",{"1":{"289":2}}],["emit",{"1":{"154":1}}],["emerges",{"1":{"85":1}}],["emptymodel",{"1":{"39":2,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":2,"89":3,"94":2,"95":2,"96":2,"109":1,"110":1,"112":1,"113":2,"116":1,"117":4,"118":1,"121":2,"132":2,"133":2,"136":1,"138":1,"141":2,"142":1,"153":8,"155":1,"160":1,"307":5,"322":9,"324":9,"326":4,"328":1,"330":9}}],["email=support",{"1":{"304":1}}],["email=noreply",{"1":{"304":1}}],["email`",{"1":{"49":1,"255":1,"265":4}}],["email",{"1":{"27":2,"32":1,"34":1,"201":1,"225":3,"227":1,"234":1,"243":9,"253":1,"255":1,"260":6,"265":18,"302":1,"304":11,"305":4,"374":2}}],["emails",{"1":{"25":1,"38":1}}],["either",{"1":{"23":1,"31":1,"36":1,"46":2,"62":1,"70":1,"72":1,"81":3,"83":1,"89":1,"110":1,"113":1,"116":1,"118":3,"133":1,"140":1,"245":1,"310":2,"317":2,"329":1,"332":3,"341":1,"345":2}}],["efficiency",{"1":{"268":1}}],["efficient",{"1":{"76":1,"79":1,"158":2,"209":1}}],["effect",{"1":{"84":1,"102":1}}],["effects",{"1":{"58":1,"102":1,"137":1}}],["effective",{"1":{"56":1,"63":1}}],["effort",{"1":{"37":1,"42":1}}],["effortlessly",{"1":{"5":1}}],["efc4aaf2",{"1":{"21":1}}],["err",{"1":{"20":6,"24":3,"25":9,"35":9,"40":9,"43":3,"48":3,"52":9,"55":6,"66":3,"82":3,"84":9,"95":3,"96":3,"97":2,"104":9,"105":14,"109":3,"112":9,"113":3,"116":3,"117":9,"120":3,"121":2,"133":5,"135":1,"136":2,"144":3,"149":6,"161":6,"162":9,"294":6},"2":{"162":1}}],["error`",{"1":{"309":5}}],["errormsg",{"1":{"121":3}}],["errordetails",{"1":{"121":2}}],["errorf",{"1":{"105":2,"138":1},"2":{"105":1,"138":1}}],["error",{"0":{"119":1},"1":{"14":1,"17":1,"39":1,"50":1,"53":1,"55":1,"57":3,"58":3,"59":2,"60":2,"61":1,"62":3,"63":1,"65":3,"66":3,"68":8,"69":2,"71":1,"74":1,"78":1,"81":1,"82":1,"94":1,"95":1,"104":4,"105":6,"109":1,"110":1,"112":1,"113":2,"116":1,"117":4,"118":1,"119":2,"120":5,"121":8,"132":2,"133":2,"136":2,"138":2,"142":8,"154":3,"158":3,"160":1,"162":4,"167":1,"214":1,"216":1,"219":1,"220":1,"221":1,"268":1,"289":1,"294":1,"309":5,"333":2,"334":1,"341":1},"2":{"142":1,"162":2}}],["errors`",{"1":{"62":4,"309":1}}],["errors=false",{"1":{"62":1}}],["errors",{"1":{"3":1,"58":1,"59":1,"60":1,"61":1,"62":5,"68":1,"69":3,"92":1,"120":2,"121":7,"166":1,"278":1,"309":1,"317":3,"326":2,"333":3,"341":2},"2":{"58":1,"59":1,"60":1,"61":1,"121":3}}],["e",{"1":{"7":1,"32":1,"37":1,"42":1,"43":1,"46":2,"57":1,"58":1,"72":1,"74":1,"76":1,"79":1,"80":1,"82":1,"83":1,"93":1,"94":1,"104":4,"105":4,"109":1,"120":4,"127":1,"137":1,"145":1,"146":1,"151":3,"154":1,"158":1,"175":1,"177":1,"191":2,"193":1,"219":1,"220":1,"278":1,"279":1,"283":1,"289":9,"291":1,"309":1,"327":1,"329":1,"333":2,"338":1,"376":1},"2":{"104":1,"105":1,"120":1}}],["eng",{"1":{"173":5}}],["engine`",{"1":{"227":1,"230":1,"240":2}}],["engines`",{"1":{"373":1}}],["engines",{"1":{"21":1,"373":2}}],["engine",{"0":{"230":1,"250":1,"278":1,"297":1},"1":{"21":1,"50":1,"144":6,"166":2,"167":3,"168":1,"170":2,"173":2,"174":4,"176":1,"177":2,"188":1,"210":3,"223":1,"224":1,"225":3,"227":6,"230":3,"234":2,"238":3,"240":3,"241":9,"246":1,"248":2,"250":1,"254":2,"276":1,"278":8,"279":6,"285":2,"286":1,"288":1,"290":3,"291":1,"293":2,"295":1,"297":6,"301":1,"323":2,"324":2,"332":1,"372":1,"373":1},"2":{"240":2,"241":1}}],["engineering",{"1":{"0":1}}],["enough",{"1":{"173":1,"296":2}}],["enriching",{"1":{"152":1}}],["enhance",{"1":{"151":1}}],["env`",{"1":{"372":2}}],["env=production",{"1":{"127":6}}],["env",{"1":{"127":6,"157":3,"226":1,"240":2,"241":2,"243":6,"245":1,"289":1,"372":1},"2":{"157":3}}],["environ",{"1":{"62":1},"2":{"62":1}}],["environment`",{"1":{"46":1,"258":1}}],["environment",{"0":{"246":1,"251":1,"252":1,"376":1},"1":{"19":1,"21":1,"46":3,"62":3,"126":1,"146":2,"150":1,"151":1,"157":1,"164":3,"167":1,"171":2,"183":1,"225":7,"227":6,"230":1,"231":1,"243":6,"246":4,"248":1,"250":1,"251":2,"258":1,"268":3,"269":1,"280":1,"284":1,"286":1,"289":2,"291":1,"293":1,"294":1,"295":1,"298":1,"301":1,"304":1,"362":2,"372":1,"374":3,"375":1,"376":4}}],["environments",{"0":{"163":1},"1":{"5":1,"7":1,"127":1,"163":1,"164":1,"171":1,"175":1,"180":1,"206":1,"243":7,"348":1,"351":1}}],["enqueuing",{"1":{"93":1}}],["enqueued",{"1":{"77":1,"333":1}}],["enqueues",{"1":{"25":1,"34":1,"38":1,"333":1}}],["enqueue",{"1":{"25":5,"37":1,"42":1,"83":1,"97":3,"333":1},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["ensuring",{"1":{"64":1,"74":1,"79":1}}],["ensures",{"1":{"69":1,"75":1,"91":1,"123":2,"127":3,"157":1}}],["ensure",{"1":{"58":1,"63":1,"74":4,"75":2,"131":1,"133":1,"134":1,"136":1,"137":1,"141":1,"166":1,"168":1,"170":1,"248":1,"282":1,"284":1,"289":1,"290":1,"301":1,"326":1}}],["enabling",{"1":{"58":1,"79":1,"154":1}}],["enable",{"1":{"51":1,"58":1,"62":4,"146":1,"219":1,"221":1,"254":6,"255":2,"256":1,"258":3,"263":1,"264":2,"265":5,"268":2,"272":2,"294":1,"298":1,"299":1,"304":1,"326":1}}],["enabled=t",{"1":{"301":1}}],["enabled=true",{"1":{"234":1,"252":1,"268":1,"298":1,"304":1,"374":1}}],["enabled`",{"1":{"146":1,"245":1,"254":1,"255":2,"256":1,"258":3,"259":1,"260":3,"263":1,"264":2,"265":5,"268":2,"272":1,"299":1}}],["enabled",{"1":{"21":1,"62":2,"155":1,"156":1,"158":1,"241":3,"243":2,"254":1,"258":1,"259":1,"260":7,"268":1},"2":{"234":1}}],["enables",{"1":{"5":1,"44":1,"117":1,"128":1,"176":1,"243":1,"268":1,"318":1,"342":1}}],["encryptedinput",{"1":{"157":1}}],["encrypted",{"1":{"157":7,"188":2},"2":{"157":2}}],["encryptedenvelope",{"1":{"157":3}}],["encrypt",{"1":{"157":3,"372":1}}],["encryptionclient",{"1":{"157":1}}],["encryption",{"0":{"259":1},"1":{"157":3,"158":1,"188":2,"238":5,"241":12,"251":1,"252":11,"253":5,"259":9,"372":5},"2":{"157":1}}],["encrypting",{"1":{"152":1}}],["encrypts",{"1":{"46":1,"157":1}}],["encounter",{"1":{"308":1,"321":1,"333":1,"345":1}}],["encounters",{"1":{"138":1}}],["encoded",{"1":{"217":1,"220":2,"221":1,"252":3,"259":3}}],["encoding",{"1":{"46":1}}],["enforces",{"1":{"82":1,"154":2,"173":1,"174":1}}],["enforce",{"1":{"40":1,"79":1,"81":1,"254":2}}],["en",{"1":{"38":1,"118":1,"216":1,"277":1,"305":1}}],["entries",{"1":{"192":1,"193":3,"194":2}}],["entrypoint",{"0":{"126":1},"1":{"171":1,"227":1},"2":{"227":1}}],["entry",{"1":{"30":1,"82":1,"126":2,"191":1}}],["enter",{"1":{"216":1,"352":1}}],["enters",{"1":{"87":1,"123":1}}],["enterprises",{"1":{"5":1}}],["entirely",{"1":{"101":1,"106":1,"153":2,"158":1}}],["entire",{"1":{"82":1,"90":2,"100":1,"101":1,"103":1,"104":1,"120":1,"123":2}}],["end=",{"1":{"161":1}}],["endpoint=<collector",{"1":{"372":1}}],["endpoint`",{"1":{"256":1,"258":1}}],["endpoints",{"1":{"44":1,"146":1,"240":1,"294":2,"318":1,"342":1}}],["endpoint",{"1":{"11":2,"12":1,"129":3,"130":1,"146":1,"148":3,"183":1,"214":1,"243":2,"254":1,"256":1,"258":1,"268":4,"297":2}}],["end",{"1":{"7":1,"14":1,"20":1,"39":1,"43":1,"52":1,"53":1,"55":2,"58":1,"59":1,"60":1,"61":3,"65":1,"66":1,"68":4,"74":1,"78":2,"81":1,"82":2,"84":1,"94":2,"95":2,"96":2,"104":3,"105":7,"109":1,"110":1,"112":2,"113":2,"116":1,"117":4,"118":1,"120":1,"121":2,"132":2,"133":2,"135":1,"136":2,"138":1,"142":4,"154":1,"155":5,"157":2,"158":3,"160":4,"161":1,"162":1,"173":3,"196":1,"212":2,"214":1,"216":1,"219":1,"220":1,"221":1,"248":2,"278":2,"313":2,"314":2,"316":6,"322":2,"324":2,"338":1}}],["extremely",{"1":{"175":1}}],["extra=",{"1":{"326":1}}],["extra",{"1":{"136":1,"142":12,"144":1,"153":1,"154":1,"155":2,"326":1,"345":1}}],["extracted",{"1":{"324":4}}],["extracting",{"1":{"157":1}}],["extract",{"1":{"130":1,"318":8,"328":1}}],["extracts",{"1":{"106":1}}],["ext",{"1":{"173":3}}],["extended",{"1":{"308":1,"333":2}}],["extends",{"1":{"66":1,"88":1,"333":2}}],["extend",{"1":{"66":2}}],["externally",{"1":{"55":3,"326":1}}],["external",{"0":{"244":1},"1":{"17":1,"25":1,"44":1,"47":1,"48":1,"55":2,"57":1,"58":1,"61":1,"72":1,"74":4,"79":2,"80":1,"82":1,"88":1,"102":1,"106":1,"111":2,"112":2,"113":2,"115":1,"118":2,"120":1,"124":1,"173":3,"212":1,"244":1,"245":2,"314":4,"316":16,"318":3,"322":6,"324":6,"328":2,"338":5,"340":5,"342":1}}],["excludes",{"1":{"251":1}}],["exclude",{"1":{"144":1}}],["excluded",{"1":{"62":1}}],["exclusive",{"1":{"137":1}}],["except",{"1":{"104":1,"105":1,"120":1}}],["exceptions",{"1":{"322":2,"324":2}}],["exceptions`",{"1":{"322":2,"324":2}}],["exception",{"1":{"58":1,"59":1,"60":1,"61":3,"104":1,"105":1,"120":1,"121":3,"210":1,"309":1}}],["exceeded",{"1":{"82":1}}],["exceeded`",{"1":{"62":1}}],["exceeds",{"1":{"82":1}}],["exceeding",{"1":{"79":1}}],["exceed",{"1":{"66":1,"157":1}}],["exhaustion",{"1":{"170":1}}],["exhausting",{"1":{"57":1}}],["exhausted`",{"1":{"62":1}}],["exists",{"1":{"124":1,"309":2}}],["exists`",{"1":{"62":1}}],["exist",{"1":{"85":1,"309":1}}],["existing",{"1":{"35":1,"36":1,"66":1,"142":1,"225":2,"334":1,"341":2,"356":1}}],["exits",{"1":{"210":1,"327":1}}],["exit",{"1":{"57":1,"68":3,"144":1,"238":3,"309":4,"373":1},"2":{"68":1}}],["exact",{"1":{"279":1}}],["exactly",{"1":{"30":2,"55":1,"72":2,"92":1,"127":1,"331":1}}],["example",{"0":{"32":1,"89":1,"127":1,"241":1,"253":1,"367":1},"1":{"15":1,"17":1,"32":1,"34":2,"35":2,"38":2,"40":2,"46":2,"49":1,"50":1,"52":6,"58":1,"62":1,"65":1,"66":3,"68":1,"72":3,"74":1,"78":1,"85":1,"89":2,"90":1,"101":1,"102":1,"104":2,"105":1,"109":1,"110":1,"112":1,"113":1,"117":2,"118":1,"124":1,"129":1,"132":1,"137":1,"141":2,"142":5,"146":5,"148":1,"150":1,"154":1,"158":1,"160":2,"162":3,"173":1,"175":1,"209":1,"214":1,"216":1,"225":1,"227":1,"230":1,"234":1,"238":1,"240":12,"241":20,"243":4,"246":1,"247":1,"248":1,"253":1,"255":1,"268":5,"283":1,"286":1,"289":2,"294":2,"296":1,"322":1,"323":2,"324":2,"325":1,"326":4,"330":1,"331":1,"333":1,"345":2,"372":2,"373":1,"374":1},"2":{"68":1,"225":1,"227":1,"234":1,"240":4,"241":3,"243":1}}],["examples`",{"1":{"372":1}}],["examples",{"0":{"157":1},"1":{"1":1,"15":1,"24":1,"35":1,"38":1,"39":1,"46":2,"47":1,"61":1,"90":1,"96":1,"104":1,"105":1,"106":1,"125":1,"138":1,"141":1,"157":1,"158":1,"161":1,"234":1,"238":1,"248":1,"307":1,"322":2,"325":1,"331":1,"362":2,"372":3},"2":{"24":1,"35":1,"104":1,"105":1,"141":1}}],["expanding",{"1":{"198":1}}],["expiration",{"1":{"188":1}}],["expire",{"1":{"118":1}}],["expiresin",{"1":{"157":1}}],["expires",{"1":{"109":2,"116":1,"118":4,"123":1,"124":1}}],["explanation",{"1":{"376":1}}],["explains",{"1":{"125":1,"172":1,"280":1}}],["explore",{"1":{"177":1}}],["explicitly",{"1":{"332":2,"358":1}}],["explicit",{"1":{"176":1,"317":3,"341":1}}],["expr",{"1":{"88":1}}],["expressed",{"1":{"115":1}}],["express",{"1":{"86":1,"100":1,"116":1,"118":1,"158":2}}],["expression=",{"1":{"40":2,"52":2,"74":1,"78":2,"84":1,"117":2}}],["expression`",{"1":{"39":2,"318":8,"334":1}}],["expressions",{"1":{"38":3,"40":1,"42":1,"112":1,"113":1,"217":1,"220":1,"322":4}}],["expression",{"1":{"38":4,"39":3,"40":2,"41":1,"46":8,"52":6,"54":1,"74":2,"78":4,"81":6,"88":1,"112":1,"113":2,"117":2,"129":1,"214":2,"216":4,"219":1,"220":1,"221":1,"311":2,"312":2,"318":8,"322":4,"324":4,"333":2,"345":1}}],["expense",{"1":{"294":1}}],["expensive",{"1":{"131":1,"327":1}}],["expects",{"1":{"289":1}}],["expect",{"1":{"177":1,"286":1}}],["expectations",{"0":{"177":1}}],["expected",{"1":{"26":1,"123":1,"124":1,"177":1,"276":1,"375":1}}],["experimental",{"1":{"152":1,"327":1,"329":1}}],["experience",{"0":{"203":1},"1":{"74":1,"203":1,"329":1,"369":1}}],["exposing",{"1":{"241":1,"345":1}}],["exposed",{"1":{"240":1,"243":6,"268":2}}],["exposes",{"1":{"62":1,"72":1,"272":1,"307":1,"345":1}}],["expose",{"1":{"61":1,"142":1,"189":1,"240":2,"374":1}}],["exponentially",{"1":{"60":1}}],["exponential",{"0":{"60":1},"1":{"60":2,"62":2,"63":1,"307":4,"322":8}}],["exporter",{"1":{"144":1,"372":2}}],["exports",{"1":{"144":1,"147":1}}],["exporting",{"1":{"143":1}}],["export",{"1":{"14":2,"21":2,"39":1,"50":1,"52":1,"55":3,"58":1,"59":1,"60":1,"65":1,"66":1,"68":2,"74":1,"78":1,"84":1,"93":1,"104":3,"105":10,"112":1,"116":1,"120":1,"121":1,"132":1,"133":1,"144":1,"153":4,"155":1,"160":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"234":6,"238":3,"268":3,"304":8,"373":1}}],["exec",{"1":{"21":1,"127":2}}],["executor",{"1":{"325":3},"2":{"325":1}}],["executor`",{"1":{"325":2}}],["executing",{"1":{"19":1,"75":2,"76":2,"85":1,"110":2,"138":2,"139":1,"145":1,"307":2,"325":1,"332":1}}],["executiontimeout",{"1":{"65":1,"66":1,"105":1,"109":1,"112":1,"116":1,"121":1}}],["executions",{"1":{"3":1,"67":1,"71":1,"74":1}}],["execution",{"0":{"29":1,"85":1},"1":{"0":1,"2":1,"5":1,"16":1,"17":1,"29":6,"30":1,"31":2,"32":1,"33":2,"37":2,"42":2,"62":1,"64":1,"65":5,"66":4,"67":1,"69":1,"74":1,"82":1,"85":1,"87":2,"90":2,"92":2,"94":3,"95":1,"96":1,"97":1,"98":1,"103":1,"105":4,"106":1,"109":1,"115":1,"117":1,"121":2,"123":2,"124":2,"131":1,"132":2,"137":1,"154":1,"156":1,"158":1,"174":1,"176":3,"205":1,"222":1,"268":1,"288":4,"294":1,"308":1,"322":14,"323":4,"324":12,"326":1,"333":4,"340":2,"345":7,"377":1}}],["executed",{"1":{"38":1,"39":1,"57":1,"78":1,"81":4,"82":4,"87":1,"94":1,"95":1,"96":1,"97":2,"121":1,"141":1,"142":3,"174":1,"288":1,"328":1,"329":2}}],["executes",{"0":{"154":1},"1":{"7":1,"19":1,"92":1,"117":1,"124":1,"154":1,"158":2}}],["execute",{"0":{"18":1},"1":{"2":2,"18":1,"19":1,"20":1,"30":1,"49":1,"87":1,"90":1,"92":1,"102":1,"104":1,"115":1,"117":1,"123":2,"156":1,"223":2,"279":1,"307":3,"322":4,"328":1},"2":{"328":1}}],["f5386316b7fa",{"1":{"325":1}}],["f57c00",{"1":{"173":1}}],["fff8e1",{"1":{"173":1}}],["f1f8e9",{"1":{"173":1}}],["f1c4546cbf52",{"1":{"129":1,"148":1,"228":1,"243":1,"268":1,"372":1}}],["fprint",{"1":{"162":1},"2":{"162":1}}],["friendly",{"1":{"271":1,"333":1}}],["front",{"1":{"174":1,"189":1}}],["frontend`",{"1":{"240":1}}],["frontend",{"1":{"159":1,"162":3,"234":2,"238":1,"241":3,"243":1,"258":2,"278":1}}],["frozen",{"1":{"127":6}}],["from=builder",{"1":{"127":4}}],["frequency",{"1":{"294":1}}],["frequent",{"1":{"174":1}}],["frequently",{"0":{"208":1},"1":{"74":1,"208":1}}],["french",{"1":{"160":4,"161":1}}],["frees",{"1":{"109":1}}],["freely",{"1":{"100":1}}],["freeing",{"1":{"87":1,"108":1}}],["freed",{"1":{"86":1,"87":2,"101":1,"104":1,"109":1,"112":1,"123":2,"124":1}}],["free",{"1":{"67":1,"74":2,"76":1,"91":1,"103":1,"110":1,"268":2,"309":2}}],["f",{"1":{"104":1,"105":3,"120":1,"141":1,"142":1,"214":1,"216":1,"219":1,"220":1,"221":1,"225":2,"289":2,"322":1,"326":2}}],["flight",{"1":{"167":1,"170":1,"210":1}}],["flush`",{"1":{"266":1}}],["flusher",{"1":{"162":2},"2":{"162":2}}],["flush=true",{"1":{"161":1}}],["flush",{"1":{"144":1,"162":1,"266":7,"294":22},"2":{"162":1}}],["floor",{"1":{"110":1,"113":2,"117":3,"118":1},"2":{"110":1,"113":1,"117":1,"118":1}}],["flow",{"1":{"92":1,"101":1,"111":1,"115":1}}],["flows",{"1":{"86":1,"115":1}}],["float32",{"1":{"289":1}}],["float",{"1":{"89":1,"116":1}}],["flexible",{"1":{"174":1}}],["flexibly",{"1":{"72":1}}],["flexibiliy",{"1":{"142":1}}],["fleet",{"1":{"87":1,"101":1,"104":1,"106":1}}],["flags",{"1":{"289":2,"296":1,"352":1,"362":6}}],["flag",{"1":{"68":5,"127":1,"146":1,"255":1,"284":1,"296":1,"309":4,"354":2,"355":1,"365":2},"2":{"68":1}}],["flavor",{"1":{"25":1}}],["flavors",{"1":{"24":1}}],["feeds",{"1":{"316":4}}],["feedback",{"1":{"203":1}}],["fe",{"1":{"258":2}}],["fetchall",{"1":{"328":1},"2":{"328":1}}],["fetched",{"1":{"154":1}}],["fetch",{"1":{"102":1,"121":2,"157":1}}],["fetching",{"1":{"72":2}}],["feature",{"1":{"52":1,"56":1,"63":1,"71":1,"73":1,"131":2,"134":1,"137":1,"152":1,"158":1,"164":1,"214":1,"298":1,"301":1,"305":1,"307":1,"309":2,"322":1,"327":1,"328":1,"329":1,"332":12,"333":1,"351":1,"375":1}}],["featured",{"1":{"4":1}}],["features",{"0":{"70":1,"348":1},"1":{"0":1,"3":3,"16":1,"33":1,"40":1,"74":1,"82":1,"84":3,"254":1,"368":1},"2":{"35":1,"40":1,"82":1,"84":4}}],["fmt",{"1":{"25":2,"55":1,"105":2,"133":1,"138":1},"2":{"25":1,"53":1,"55":1,"59":1,"104":1,"105":2,"120":1,"133":1,"138":2,"161":2,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["fn",{"1":{"14":1,"43":2,"53":1,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":2,"81":1,"82":1,"94":1,"95":1,"96":1,"104":3,"105":7,"109":1,"110":1,"112":1,"113":2,"116":1,"117":4,"118":1,"120":1,"121":2,"132":1,"133":1,"135":1,"136":4,"142":2,"155":1,"160":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":2,"332":2,"345":7,"346":2},"2":{"136":1,"294":1}}],["fi",{"1":{"238":1}}],["fifo",{"1":{"176":1}}],["fifteen",{"1":{"72":1}}],["fixes",{"1":{"189":1}}],["fixed",{"1":{"22":1,"85":1,"88":1,"98":1,"100":2,"101":1,"105":1,"110":1,"117":1}}],["fix",{"1":{"58":1,"72":1}}],["field`",{"1":{"220":1,"311":2,"317":2}}],["fields",{"1":{"38":3,"40":1,"46":8,"54":1,"152":1,"153":2,"154":5,"157":1,"158":3,"307":1,"317":1,"345":10,"364":1}}],["field",{"1":{"38":6,"46":1,"72":1,"130":1,"150":1,"158":1,"191":1,"214":2,"216":3,"219":2,"220":2,"221":3,"246":2,"311":2,"317":4,"326":1,"346":1,"365":1}}],["filter`",{"1":{"322":4,"324":4}}],["filtering",{"0":{"150":1},"1":{"54":2,"149":1,"316":12,"322":2,"324":2}}],["filterpayload",{"1":{"53":2,"333":1},"2":{"53":1}}],["filtered",{"1":{"51":1,"53":2,"112":1,"113":1,"195":1}}],["filter",{"0":{"53":1},"1":{"46":1,"51":1,"52":13,"53":5,"54":2,"72":2,"88":1,"112":2,"116":1,"150":2,"196":1,"311":8,"312":44,"314":6,"316":22,"317":21,"318":4,"320":2,"322":12,"324":12,"333":3,"335":14,"336":1,"337":3,"338":1,"341":3,"369":1},"2":{"52":1,"53":3}}],["filtersclient",{"1":{"332":1,"335":1}}],["filters`",{"1":{"307":3}}],["filters=runfilter",{"1":{"72":2}}],["filters=",{"1":{"52":1}}],["filters",{"0":{"51":1,"312":1,"335":1},"1":{"36":2,"51":2,"52":11,"70":1,"72":19,"112":1,"113":2,"307":6,"312":13,"316":38,"317":6,"320":4,"332":6,"335":7,"338":1,"369":1},"2":{"52":3,"332":1}}],["fill",{"1":{"46":1,"173":3,"214":1,"289":1}}],["file>",{"1":{"374":1}}],["filename",{"1":{"283":1}}],["filename=generate",{"1":{"238":1}}],["filename=",{"1":{"225":2,"227":1,"231":1}}],["file`",{"1":{"259":3,"262":6,"265":1,"273":3,"376":3}}],["file=",{"1":{"252":3,"253":3}}],["file",{"0":{"364":1},"1":{"10":1,"69":1,"127":1,"141":1,"225":4,"226":1,"227":2,"229":1,"238":1,"241":1,"243":1,"245":1,"252":1,"259":3,"262":6,"265":1,"273":3,"283":2,"364":3,"365":4,"366":1,"367":1,"372":3,"376":4}}],["filesystem",{"1":{"374":1}}],["files",{"1":{"10":1,"39":1,"40":1,"106":1,"141":1,"227":1,"238":1,"253":1,"364":3,"374":1}}],["fits",{"1":{"100":1,"179":1}}],["fit",{"1":{"32":1,"74":1,"175":2}}],["firms",{"1":{"188":1}}],["firewall",{"1":{"210":1}}],["firewalls",{"1":{"166":1}}],["fire",{"0":{"24":1,"25":1},"1":{"23":4,"24":1,"25":4,"93":1,"161":2},"3":{"23":2,"24":2,"25":2,"26":2,"27":2,"28":2}}],["firstextra",{"1":{"153":1}}],["firstmiddleware",{"1":{"153":2}}],["first",{"1":{"3":1,"7":2,"8":1,"14":6,"21":1,"22":1,"31":1,"38":3,"72":4,"82":1,"83":2,"84":1,"89":2,"93":1,"102":1,"105":2,"110":2,"113":1,"116":1,"117":1,"118":2,"121":2,"136":1,"142":1,"146":1,"153":4,"155":1,"178":1,"184":1,"196":1,"209":1,"218":1,"225":2,"228":2,"234":2,"237":1,"238":2,"280":1,"289":1,"294":1,"306":1,"309":1,"316":4,"331":1,"332":1,"340":1},"2":{"72":1,"153":1,"155":1}}],["five",{"1":{"22":1,"117":6,"325":1}}],["finding",{"0":{"283":1},"1":{"279":1}}],["findings",{"1":{"187":1,"188":1}}],["find",{"1":{"150":1,"211":1,"216":1,"283":1,"294":1,"338":1}}],["finish",{"1":{"87":1,"124":1,"329":1}}],["finished",{"1":{"74":1,"89":1,"109":1,"112":1,"116":1,"325":1}}],["finishes",{"1":{"13":1}}],["final",{"1":{"46":2,"90":1,"104":1,"154":3,"157":2,"268":2,"291":1,"322":4,"324":2},"2":{"157":2}}],["finally",{"1":{"7":1,"27":1,"28":1,"46":1,"55":1,"72":2,"153":4,"330":4,"331":1}}],["fine",{"1":{"22":1,"26":1,"62":1,"209":1}}],["fssl",{"1":{"7":1,"349":1}}],["focus",{"1":{"248":1}}],["foo=",{"1":{"328":1}}],["foo",{"1":{"94":1,"328":1}}],["foobarbaz",{"1":{"50":2,"52":11}}],["fork",{"1":{"340":1}}],["forward",{"1":{"234":3,"238":2}}],["for=",{"1":{"110":1,"113":2,"118":1}}],["for`",{"1":{"89":1,"110":1,"113":2,"145":1,"308":2,"310":7,"322":2}}],["forcibly",{"1":{"68":2}}],["formerly",{"1":{"82":1}}],["form",{"1":{"76":1,"118":2,"216":1,"217":1,"220":2,"221":1}}],["format`",{"1":{"255":1,"263":3,"274":1}}],["format=console",{"1":{"253":2}}],["formats",{"1":{"217":1}}],["format",{"0":{"65":1},"1":{"39":2,"40":1,"65":1,"91":1,"109":1,"112":1,"116":1,"129":1,"145":1,"148":3,"255":1,"263":3,"268":1,"274":1,"314":6,"333":1,"338":5,"344":2,"376":1}}],["forms",{"1":{"23":1}}],["forget",{"0":{"25":1},"1":{"23":2,"25":4,"93":1,"161":2},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["follows",{"1":{"297":1,"326":1,"330":1}}],["followed",{"1":{"102":1,"352":1}}],["following",{"1":{"7":2,"10":1,"46":1,"54":1,"69":1,"136":1,"141":1,"145":2,"162":3,"191":1,"192":1,"214":1,"216":1,"225":6,"226":2,"227":2,"228":1,"229":1,"234":3,"238":2,"240":2,"243":2,"245":1,"246":1,"247":1,"249":1,"251":1,"252":1,"268":2,"276":1,"279":1,"289":3,"291":1,"294":2,"295":1,"296":3,"298":1,"301":2,"304":1,"325":1,"360":1,"361":1,"364":1,"365":1,"372":1,"374":6,"376":2}}],["follow",{"1":{"7":1,"27":1,"38":1,"71":1,"98":1,"155":1,"164":2,"225":1,"279":1}}],["foundation",{"1":{"111":1}}],["found`",{"1":{"62":1}}],["found",{"1":{"62":1,"146":1,"238":1,"309":1,"322":1,"333":2}}],["founder",{"1":{"5":1}}],["four",{"1":{"1":1,"117":6}}],["famous",{"1":{"161":1}}],["family",{"1":{"160":8,"161":2}}],["families",{"1":{"160":4,"161":1}}],["faq",{"0":{"158":1},"3":{"208":1,"209":1,"210":1}}],["fallback",{"1":{"120":2,"121":1,"154":1}}],["false`",{"1":{"144":1}}],["false",{"1":{"48":3,"52":7,"66":1,"68":1,"112":3,"113":3,"118":1,"127":1,"149":2,"240":2,"241":2,"243":1,"294":1,"309":4,"333":2,"343":1,"365":1}}],["fancy",{"1":{"135":3,"136":11}}],["fans",{"1":{"106":1}}],["fanoutchild",{"1":{"105":2,"326":1}}],["fanoutparent",{"1":{"105":2,"326":1}}],["fanout",{"1":{"104":5,"105":16,"120":1,"158":1,"289":1,"294":1,"326":1},"2":{"104":4,"105":6,"120":1}}],["fan",{"1":{"85":1,"86":1,"104":1,"106":2,"107":1,"120":1,"123":1}}],["fastapi",{"1":{"152":1,"162":3,"325":4,"327":2,"329":3}}],["fast",{"1":{"115":1,"277":1}}],["faster",{"1":{"83":1,"127":1}}],["fastest",{"1":{"21":1,"180":1,"206":1,"224":1,"279":1}}],["fashion",{"1":{"74":1,"75":2}}],["facing",{"1":{"158":1,"243":1}}],["factor=0",{"1":{"296":2}}],["factor=2",{"1":{"60":1}}],["factor`",{"1":{"261":2,"307":2,"322":4}}],["factory",{"1":{"126":1}}],["factor",{"1":{"60":5,"261":2,"307":2,"322":4,"329":1}}],["factors",{"1":{"37":1,"42":1,"81":1}}],["faced",{"1":{"3":1}}],["fatalf",{"1":{"20":2,"35":3,"43":1,"55":2,"82":1,"144":1,"149":2,"161":2,"162":1},"2":{"20":1,"35":1,"43":1,"55":1,"82":1,"144":1,"149":1,"161":1,"162":1}}],["fair",{"1":{"74":2,"75":1}}],["fairly",{"1":{"17":1,"74":1,"75":1}}],["fairness",{"1":{"3":1,"74":2}}],["failover",{"1":{"202":1}}],["failing",{"1":{"60":1,"62":1,"120":1,"307":2,"322":4}}],["fail",{"1":{"57":1,"58":2,"59":3,"61":1,"104":1,"105":1,"120":1,"121":4,"322":2,"326":1}}],["fails",{"1":{"56":1,"57":2,"58":1,"119":1,"120":2,"121":4,"127":1,"154":1,"158":2,"309":1,"334":1,"341":1,"345":1}}],["failurerate",{"1":{"289":2}}],["failurehandleroutput",{"1":{"121":2}}],["failureinput",{"1":{"121":1}}],["failureworkflow",{"1":{"121":3},"2":{"121":2}}],["failures",{"1":{"56":1,"57":1,"58":1,"60":1,"63":1,"64":1,"73":1,"86":1,"120":2,"166":2,"176":2,"248":1,"333":2}}],["failure",{"0":{"121":1},"1":{"17":1,"18":1,"30":1,"58":2,"59":2,"60":2,"61":3,"62":1,"72":1,"92":1,"119":1,"120":1,"121":19,"210":1,"227":2,"268":1,"289":1,"301":1,"309":2,"322":9,"333":1},"2":{"121":3}}],["failed",{"1":{"5":1,"10":1,"20":2,"35":3,"43":1,"55":2,"57":1,"58":2,"59":2,"60":2,"64":2,"66":2,"72":6,"73":5,"82":1,"92":1,"104":3,"105":5,"120":3,"121":2,"138":1,"144":1,"149":2,"161":2,"162":2,"268":7,"291":1,"314":4,"338":1}}],["further",{"1":{"66":1,"73":1}}],["future",{"1":{"3":1,"23":1,"27":1,"34":1,"35":1,"63":1,"152":1,"294":1,"317":2,"347":1}}],["func",{"1":{"14":1,"39":1,"50":2,"52":1,"53":1,"55":3,"58":1,"59":1,"60":1,"61":1,"68":1,"94":1,"95":1,"104":1,"105":3,"109":1,"110":1,"112":1,"113":2,"116":1,"117":4,"118":1,"121":1,"132":1,"133":1,"136":2,"138":1,"160":2,"161":1,"162":2,"294":1}}],["functions",{"0":{"346":1},"1":{"69":1,"88":1,"120":1,"152":1,"153":1,"155":1,"158":2,"175":1,"307":1,"309":1,"310":1,"311":1,"312":1,"313":1,"314":1,"315":1,"316":1,"317":1,"318":1,"319":1,"320":1,"322":1,"323":1,"324":1,"325":1,"329":1,"330":2}}],["functionality",{"1":{"66":1,"278":1,"323":2,"324":2}}],["function",{"1":{"2":1,"13":1,"14":2,"15":1,"20":1,"55":1,"69":1,"74":1,"75":1,"76":1,"85":1,"87":1,"88":1,"89":1,"94":5,"115":1,"145":1,"154":4,"155":1,"156":1,"157":6,"158":2,"160":1,"162":1,"238":3,"268":2,"307":8,"322":12,"323":4,"332":6,"345":2,"346":2}}],["fundamentally",{"1":{"87":1,"123":1}}],["fundamental",{"1":{"2":1,"13":1}}],["fullname",{"1":{"216":2},"2":{"216":1}}],["fully",{"1":{"4":1,"55":1,"100":1,"152":1,"279":1}}],["full",{"1":{"0":1,"7":1,"11":3,"12":3,"55":2,"72":1,"85":1,"90":1,"92":1,"106":1,"120":1,"169":1,"175":1,"180":1,"206":1,"216":7,"254":1,"348":1,"377":1},"2":{"216":1}}],["ihatchetclient",{"1":{"332":12},"2":{"332":12}}],["iam",{"1":{"305":2}}],["ietf",{"1":{"303":1}}],["ignore",{"1":{"225":2}}],["ignored",{"1":{"136":1}}],["ireland",{"1":{"198":1}}],["irrelevant",{"1":{"76":1}}],["ip",{"1":{"191":2,"193":1}}],["ii",{"1":{"186":1}}],["icon",{"1":{"164":1}}],["iv",{"1":{"157":9},"2":{"157":1}}],["i+1",{"1":{"68":1,"105":2}}],["i++",{"1":{"68":1,"104":2,"105":3,"135":1,"136":1,"142":2,"294":1}}],["ispaused",{"1":{"343":1}}],["issuing",{"1":{"230":1}}],["issuer",{"1":{"241":2}}],["issue",{"1":{"58":1,"161":1}}],["issues",{"0":{"373":1},"1":{"45":1,"57":1,"63":1,"74":1,"132":1,"165":1,"166":1,"167":1,"209":1,"212":1,"215":1,"237":1,"248":1,"277":1,"279":1,"305":1}}],["isready",{"1":{"225":2,"227":1,"231":1}}],["isn",{"1":{"204":1,"333":1}}],["iso",{"1":{"196":2}}],["isolated",{"1":{"164":1,"188":1,"268":1}}],["isolate",{"1":{"164":1}}],["isolation",{"1":{"158":1,"164":1,"188":1}}],["isolating",{"1":{"123":1}}],["iscancel",{"1":{"68":1},"2":{"68":1}}],["immutable",{"1":{"327":1}}],["immediately",{"1":{"65":1,"75":1,"76":2,"87":1,"118":1,"123":2,"309":4,"322":4,"324":4}}],["imap",{"1":{"305":1}}],["imagepullpolicy",{"1":{"289":1}}],["image",{"1":{"224":1,"225":7,"226":1,"227":6,"231":1,"278":9,"279":1,"285":2,"289":1}}],["images",{"0":{"229":1},"1":{"189":1,"229":2}}],["impact",{"1":{"268":2,"301":1}}],["implications",{"1":{"132":1}}],["implements",{"1":{"142":1,"332":2}}],["implement",{"1":{"105":1,"128":1,"142":1}}],["implementing",{"1":{"71":1,"81":1}}],["implemented",{"1":{"55":1,"105":1,"106":1}}],["implementation=",{"1":{"146":1}}],["implementation",{"1":{"55":2,"332":14}}],["improving",{"0":{"292":1},"1":{"63":1}}],["improved",{"1":{"296":1}}],["improvements",{"1":{"187":1}}],["improve",{"1":{"56":1,"143":1,"294":2,"295":1,"296":1,"298":2}}],["impossible",{"1":{"30":1}}],["imported",{"1":{"326":1}}],["importing",{"1":{"55":1}}],["importantly",{"1":{"66":1,"72":2}}],["important",{"1":{"20":1,"49":1,"58":1,"64":1,"73":1,"123":1,"185":1,"294":1,"316":2,"317":2,"322":1,"323":2,"324":2,"330":1,"341":1}}],["import",{"1":{"0":3,"7":5,"9":1,"14":1,"20":4,"24":1,"35":2,"55":6,"62":3,"72":4,"104":1,"105":3,"141":8,"144":4,"153":1,"155":1,"156":2,"157":4,"211":1,"322":2,"326":2,"330":1,"345":1}}],["itoa",{"1":{"294":2},"2":{"294":1}}],["iteration",{"1":{"100":1,"106":1}}],["items",{"1":{"106":1,"234":2,"238":1,"266":1,"294":12,"311":2,"320":2}}],["item`",{"1":{"43":1,"322":2,"324":2}}],["item",{"1":{"43":2,"104":2,"105":4,"106":1,"294":1,"317":3,"322":2,"324":2,"326":1},"2":{"43":2,"104":2,"105":2,"294":1,"326":1}}],["itself",{"1":{"46":2,"62":1,"101":1,"106":1,"154":1,"155":1,"158":2,"255":1,"329":1}}],["iops",{"1":{"287":2}}],["io",{"1":{"30":1,"143":1,"225":2,"227":4,"234":2,"238":3,"241":15,"246":1,"249":1,"278":2,"285":2,"289":5},"2":{"225":1,"227":1,"234":2,"238":2,"241":2,"249":1,"278":1,"285":1,"289":1}}],["id>",{"1":{"374":2}}],["idle",{"1":{"123":1,"255":2}}],["id`",{"1":{"46":1,"145":9,"255":1,"258":1,"260":2,"265":1,"307":1,"309":4,"311":6,"312":8,"313":2,"314":4,"316":19,"317":19,"319":4,"320":6,"322":6,"324":9,"376":1}}],["ids`",{"1":{"312":2,"314":2,"316":6,"317":5,"338":1}}],["ids",{"1":{"35":1,"43":1,"52":1,"70":1,"72":17,"152":1,"154":1,"312":2,"314":2,"316":18,"317":6,"328":1,"335":1,"338":2,"341":4}}],["ids=workflow",{"1":{"72":2}}],["ids=",{"1":{"35":1,"72":4,"328":1}}],["id=<app",{"1":{"374":1}}],["id=<client",{"1":{"374":1}}],["id=1234",{"1":{"55":1}}],["id=event",{"1":{"52":1}}],["id=cron",{"1":{"40":2}}],["id=",{"1":{"35":1,"332":24,"333":40,"334":5,"335":6,"336":2,"337":8,"338":5,"339":3,"340":10,"341":8,"342":6,"343":6,"344":5,"345":12,"346":3}}],["id=scheduled",{"1":{"35":2}}],["ide",{"1":{"326":1}}],["identity",{"1":{"304":1}}],["identifier",{"1":{"334":1}}],["identified",{"1":{"284":1}}],["identifying",{"1":{"318":2}}],["identify",{"1":{"283":1,"310":1,"322":1,"324":1}}],["identically",{"1":{"113":1,"327":1,"329":1}}],["ideal",{"1":{"81":1,"101":1,"118":1}}],["ideas",{"1":{"29":1}}],["idempotency",{"1":{"62":2}}],["idempotent",{"1":{"30":1,"32":1,"58":1,"61":1,"62":4,"63":1,"176":1}}],["id",{"1":{"16":2,"25":1,"35":20,"40":18,"46":3,"52":4,"55":2,"72":11,"81":2,"89":6,"94":1,"112":2,"116":3,"129":1,"130":3,"132":6,"133":4,"136":4,"144":1,"145":8,"154":2,"161":5,"162":1,"191":3,"193":1,"221":9,"228":1,"238":2,"243":1,"255":1,"258":1,"260":2,"264":2,"265":1,"268":18,"294":3,"304":1,"307":1,"309":8,"311":10,"312":22,"313":2,"314":4,"316":39,"317":29,"319":12,"320":14,"322":11,"324":17,"332":2,"333":13,"334":3,"335":6,"336":1,"338":7,"340":15,"341":7,"343":5,"344":8,"345":2,"372":1,"373":1,"376":1},"2":{"35":5,"40":3,"52":2,"72":3,"81":1,"89":1,"112":1,"129":1,"132":2,"133":2,"136":2,"161":1,"162":1,"221":1}}],["if=",{"1":{"113":1,"117":2}}],["if`",{"1":{"113":4,"117":4,"322":4}}],["if",{"0":{"279":1},"1":{"4":1,"7":2,"11":1,"20":3,"21":2,"22":1,"24":2,"25":6,"29":1,"30":1,"34":1,"35":5,"36":1,"37":2,"38":2,"40":4,"42":2,"43":2,"46":3,"48":2,"49":2,"51":1,"52":4,"55":1,"56":1,"57":3,"58":2,"59":3,"60":2,"61":1,"62":2,"64":1,"65":5,"66":3,"68":4,"69":2,"72":1,"74":3,"75":3,"76":5,"82":2,"83":3,"84":3,"85":1,"88":1,"89":2,"92":1,"96":1,"100":1,"102":4,"104":2,"105":2,"109":2,"112":3,"113":6,"115":2,"116":1,"117":8,"118":7,"120":2,"121":1,"123":3,"124":1,"127":3,"129":1,"130":1,"132":5,"133":1,"134":1,"136":6,"137":2,"141":1,"142":1,"144":2,"145":2,"146":2,"149":2,"153":2,"154":2,"156":1,"157":3,"158":7,"164":1,"166":1,"167":4,"168":2,"169":4,"170":2,"171":1,"174":1,"175":1,"177":2,"180":2,"182":1,"190":1,"199":1,"202":1,"207":1,"209":3,"210":1,"216":2,"218":2,"225":2,"226":2,"229":1,"230":1,"238":3,"243":2,"248":1,"252":1,"254":1,"255":1,"258":1,"260":4,"268":1,"272":1,"276":1,"277":1,"279":2,"293":2,"294":8,"295":2,"296":3,"297":1,"301":2,"307":9,"309":19,"310":1,"314":2,"316":2,"317":6,"320":3,"322":24,"323":12,"324":10,"325":1,"326":2,"332":1,"333":15,"334":1,"341":1,"343":2,"344":2,"345":4,"353":1,"355":1,"357":1,"358":1,"364":1,"365":1,"372":1,"373":1,"376":6,"377":1}}],["inherited",{"1":{"270":4,"345":1}}],["inherit",{"1":{"262":2}}],["inherently",{"1":{"100":1}}],["inactive",{"1":{"210":1}}],["ingressclassname",{"1":{"241":2}}],["ingress`",{"0":{"241":1}}],["ingresses",{"1":{"240":1}}],["ingress",{"1":{"189":1,"240":3,"241":14},"2":{"241":1}}],["ingesting",{"1":{"268":1}}],["ingestion",{"1":{"3":1,"25":1,"263":2}}],["ingested",{"1":{"214":1}}],["ingest",{"1":{"174":1}}],["injected",{"1":{"153":1,"154":3,"158":1,"323":2,"324":2,"326":1,"329":1,"330":1}}],["injection",{"0":{"329":1},"1":{"152":2,"155":1,"157":1,"329":3}}],["injecting",{"1":{"46":1,"152":1}}],["inject",{"1":{"145":1,"152":1,"154":1,"155":1,"246":1,"329":1,"330":1}}],["initiate",{"1":{"294":1}}],["initialized",{"1":{"161":1}}],["initializing",{"1":{"141":1}}],["initially",{"1":{"66":1}}],["initial",{"1":{"66":1,"139":1,"251":1,"309":1}}],["init<",{"1":{"153":1}}],["init",{"1":{"142":1,"153":1,"154":2,"157":2,"158":1},"2":{"142":1,"153":1,"154":1,"157":1}}],["invites",{"1":{"254":2,"302":1}}],["invites`",{"1":{"254":2}}],["invite`",{"1":{"192":1}}],["invitation",{"1":{"192":1}}],["invalid",{"1":{"58":1,"168":1,"334":1,"341":1}}],["invoice",{"1":{"213":1}}],["invoices",{"1":{"38":1,"45":1}}],["invoking",{"0":{"55":1}}],["invokes",{"1":{"69":1}}],["invoke",{"1":{"13":2,"24":4}}],["involved",{"1":{"217":1}}],["involves",{"1":{"32":1,"281":1}}],["involve",{"1":{"7":1}}],["invocationcount",{"1":{"333":1}}],["invocations",{"1":{"268":6}}],["invocation",{"1":{"0":1,"3":1,"158":1,"333":1}}],["inserts",{"1":{"294":1}}],["insert",{"1":{"241":2}}],["insecure`",{"1":{"254":1,"260":1,"264":1}}],["insecure=true",{"1":{"253":1}}],["insecure",{"1":{"225":4,"227":3,"240":2,"241":2,"254":1,"260":1,"264":1}}],["inside",{"1":{"62":1,"96":1,"101":1,"104":2,"105":3,"145":1,"154":1,"155":1,"158":1,"221":1,"230":1,"255":1,"323":2,"324":2}}],["inspects",{"1":{"87":1}}],["inspect",{"1":{"53":1,"72":1,"87":1,"95":3,"96":3,"106":1,"121":1,"142":2,"181":1},"2":{"53":1,"95":1,"96":1,"121":1}}],["instrumented",{"1":{"341":1}}],["instrumentations",{"1":{"144":1}}],["instrumentation",{"1":{"144":2}}],["instrument",{"1":{"144":2,"209":1}}],["instrumentor",{"1":{"144":9,"341":1},"2":{"144":3}}],["instructions",{"1":{"7":3,"9":1,"164":1,"268":1,"279":1}}],["instantiation",{"1":{"331":1}}],["instance=hatchet",{"1":{"238":1}}],["instance=",{"1":{"146":1}}],["instances",{"1":{"17":1,"37":1,"40":1,"42":1,"61":1,"67":2,"72":4,"73":5,"74":5,"75":6,"76":4,"77":2,"176":1,"248":1,"275":1,"279":1,"290":1,"296":1,"297":1,"330":1,"348":1,"351":1}}],["instance",{"0":{"228":1},"1":{"16":1,"21":1,"22":1,"24":1,"27":1,"34":2,"37":1,"42":1,"43":1,"46":2,"49":1,"52":1,"55":1,"66":1,"72":2,"74":4,"75":8,"76":8,"93":1,"94":1,"137":1,"158":3,"164":3,"177":1,"209":2,"216":1,"222":2,"224":1,"225":7,"227":1,"228":1,"245":2,"247":1,"268":4,"286":2,"290":3,"292":1,"293":2,"294":1,"296":1,"297":4,"298":1,"307":1,"311":6,"317":6,"323":2,"324":2,"331":1,"332":24,"333":4,"346":5,"347":1,"348":1,"358":2,"360":1,"361":1,"362":1,"374":1}}],["instability",{"1":{"170":1}}],["installed",{"1":{"225":1,"227":1,"233":1,"236":1,"237":1,"238":1,"350":1,"359":1}}],["installs",{"1":{"127":2}}],["installing",{"0":{"235":1},"1":{"10":1}}],["install`",{"1":{"9":2,"127":1}}],["installation",{"0":{"347":1,"349":1,"350":1},"1":{"9":1,"10":1,"206":1,"234":1,"347":1,"350":1,"374":1},"3":{"7":1,"8":1}}],["install",{"1":{"7":8,"10":4,"11":1,"127":15,"144":4,"225":1,"227":1,"234":1,"238":1,"249":1,"349":6,"364":2,"372":1},"2":{"7":2,"127":1,"349":2}}],["instead",{"1":{"7":1,"73":1,"77":1,"85":1,"88":2,"89":1,"124":1,"127":1,"144":1,"157":1,"216":1,"243":1,"253":1,"307":1,"309":3,"320":1,"322":2,"324":2,"326":1,"331":1,"344":1,"377":1}}],["inclusive",{"1":{"322":2,"324":2}}],["including",{"1":{"36":1,"72":1,"103":1,"148":1,"251":1,"254":1,"268":2,"309":1,"316":4,"322":1,"345":8}}],["included",{"1":{"283":1}}],["include",{"1":{"34":1,"38":1,"61":1,"101":1,"139":1,"145":3,"151":1,"152":1,"158":1,"182":1,"193":2,"198":1,"316":2,"346":1}}],["includes",{"1":{"11":1,"12":1,"25":1,"129":1,"220":2,"308":1,"333":2,"368":1}}],["increment",{"1":{"309":1,"327":1,"333":1}}],["increasing",{"1":{"294":1}}],["increase",{"1":{"60":3,"167":1,"169":1,"170":1,"209":1,"231":3,"293":3,"294":2,"295":1,"296":1}}],["increases",{"1":{"60":1,"106":1,"288":1,"292":1}}],["incidents",{"1":{"182":1}}],["incident",{"1":{"180":1,"200":1,"201":1}}],["incomplete",{"1":{"279":1}}],["incoming",{"1":{"46":9,"47":1,"50":1,"51":1,"157":1,"212":1,"214":1,"265":1,"287":1,"318":9,"342":1,"374":1}}],["incorrectly",{"1":{"333":1}}],["incorrect",{"1":{"168":1}}],["incorporating",{"1":{"71":1}}],["inconsistent",{"1":{"74":1}}],["inconsistencies",{"1":{"58":1}}],["intrigue",{"1":{"160":4,"161":1}}],["introduce",{"1":{"102":1,"132":1}}],["introduction",{"0":{"222":1},"1":{"29":1,"306":1,"325":1,"332":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["intn",{"1":{"74":1,"78":1,"110":1,"113":2,"117":3,"118":1},"2":{"74":1,"78":1,"110":1,"113":1,"117":1,"118":1}}],["int32",{"1":{"74":1,"78":1}}],["int",{"1":{"55":2,"89":2,"104":2,"105":7,"116":1,"160":2,"214":5,"216":5,"289":4,"307":1,"325":4,"326":1,"328":1,"344":2,"362":2}}],["intended",{"1":{"146":1,"160":1,"255":1,"309":2,"316":2,"317":2,"322":2,"323":4,"324":6,"327":1,"329":1}}],["intensive",{"1":{"74":1,"76":1,"107":1,"137":3,"138":11}}],["intention",{"1":{"327":1}}],["intentionally",{"1":{"169":1,"174":1}}],["intentional",{"1":{"58":2,"59":2,"60":2,"61":1}}],["intent",{"1":{"46":5,"214":7},"2":{"214":1}}],["integer",{"1":{"65":1,"81":2,"196":2,"307":3,"322":4}}],["integrating",{"1":{"305":1}}],["integrations",{"1":{"45":1,"217":1,"251":1}}],["integration",{"1":{"44":1,"46":1,"158":1,"265":1,"268":1,"318":1,"342":1}}],["integrate",{"1":{"15":1,"374":1}}],["internet",{"1":{"374":1}}],["internal",{"1":{"162":1,"225":2,"227":2,"234":1,"245":1,"253":1,"254":2,"262":20,"294":1,"295":2,"297":4}}],["internals",{"1":{"94":1}}],["internally",{"1":{"25":1,"90":1,"326":1}}],["inter",{"1":{"223":1}}],["interfere",{"1":{"164":1}}],["interfaces",{"1":{"307":1}}],["interface",{"1":{"35":1,"40":2,"52":3,"55":1,"69":1,"93":1,"132":2,"136":2,"142":1,"332":1,"333":1,"348":1,"368":1}}],["intermittent",{"1":{"170":1}}],["intermingle",{"1":{"164":1}}],["intermediate",{"1":{"85":1,"90":1,"100":1,"115":1,"124":1}}],["intersection",{"1":{"155":2}}],["interval`",{"1":{"254":2,"265":1,"266":5}}],["interval",{"1":{"146":1,"225":3,"227":2,"231":1,"254":2,"265":1,"266":5,"333":2}}],["intervention",{"1":{"61":1,"73":1}}],["interruptions",{"1":{"109":1,"176":1}}],["interrupted",{"1":{"65":1,"88":1,"89":2,"109":1,"112":1,"170":1,"210":1}}],["interruptctx",{"1":{"20":2}}],["interacted",{"1":{"93":1}}],["interact",{"1":{"74":1,"102":1,"323":2,"324":2,"332":1,"348":1,"368":1,"373":1}}],["interacts",{"1":{"58":1,"221":1}}],["interactivity",{"1":{"220":1,"221":4}}],["interactively",{"1":{"354":1,"367":1}}],["interactive",{"0":{"221":1},"1":{"45":1,"217":2,"221":1}}],["interactions`",{"1":{"221":1}}],["interaction",{"1":{"127":1,"217":1,"221":10}}],["interacting",{"1":{"16":1,"93":1,"306":1,"307":4,"312":1,"313":1,"316":1,"332":2,"335":1,"336":1,"340":1,"347":1}}],["input`",{"1":{"153":1,"309":1}}],["input=input",{"1":{"331":1}}],["input=childinput",{"1":{"104":1,"105":2,"294":1,"326":1}}],["input=taskinput",{"1":{"55":1}}],["input=simpleinput",{"1":{"43":1}}],["input=dynamiccroninput",{"1":{"40":2}}],["input=helloinput",{"1":{"25":2}}],["inputs",{"1":{"15":1,"43":7,"76":1,"92":1,"115":1,"152":1,"157":2,"174":1,"205":1,"326":6,"331":1}}],["input",{"0":{"15":1},"1":{"10":1,"14":10,"15":2,"16":1,"24":1,"25":2,"28":1,"32":1,"35":1,"36":1,"39":4,"40":1,"41":1,"43":6,"46":2,"50":3,"52":4,"53":4,"55":10,"58":2,"59":2,"60":2,"61":2,"65":5,"66":5,"68":4,"72":2,"73":1,"74":7,"78":11,"81":9,"82":3,"84":1,"85":1,"88":2,"89":2,"90":1,"94":9,"95":7,"96":5,"97":5,"104":4,"105":30,"106":1,"109":6,"110":1,"112":9,"113":2,"115":2,"116":4,"117":4,"118":1,"121":7,"129":1,"132":4,"133":3,"136":1,"138":1,"141":1,"142":1,"149":1,"150":1,"152":1,"153":37,"154":12,"155":8,"157":12,"158":4,"160":2,"167":1,"174":1,"204":1,"214":14,"216":19,"219":13,"220":23,"221":13,"294":7,"307":2,"309":2,"311":2,"316":2,"317":2,"322":14,"323":2,"324":13,"326":12,"328":1,"330":9,"331":7,"332":3,"333":6,"334":3,"341":2,"344":1,"345":20},"2":{"14":3,"39":1,"50":1,"52":2,"53":1,"65":2,"66":2,"68":1,"74":3,"78":5,"81":2,"94":2,"95":2,"96":2,"104":1,"105":6,"109":2,"112":5,"116":2,"121":1,"129":1,"153":2,"154":1,"155":4,"157":2,"214":3,"216":7,"219":4,"220":4,"221":2,"294":1,"322":1,"326":2,"331":1,"345":1},"3":{"13":1,"14":1,"15":1,"16":1,"17":1,"18":1}}],["inferred",{"1":{"158":1,"332":6,"345":2,"346":4}}],["influence",{"1":{"17":1}}],["information",{"1":{"16":1,"65":1,"94":1,"121":1,"146":1,"149":1,"151":1,"218":1,"246":1,"249":1,"286":1,"297":1,"322":1,"371":1}}],["info",{"1":{"7":1,"9":1,"21":3,"35":1,"36":1,"38":1,"43":1,"46":2,"48":1,"49":1,"50":2,"52":1,"56":1,"62":2,"65":1,"72":3,"74":1,"81":1,"85":1,"89":1,"90":1,"94":1,"100":1,"104":1,"105":3,"109":1,"112":1,"124":1,"127":5,"130":1,"131":1,"134":1,"138":1,"141":4,"142":9,"144":1,"146":3,"147":1,"149":1,"152":1,"153":1,"154":2,"155":1,"156":1,"157":1,"190":1,"209":1,"217":1,"219":1,"220":1,"225":2,"230":1,"255":1,"268":1,"278":2,"280":1,"284":1,"289":2,"305":1,"325":1,"364":1,"368":1},"2":{"105":1,"141":2,"142":2}}],["infra",{"1":{"4":1,"248":1}}],["infrastructure",{"1":{"2":1,"158":1,"173":1,"178":1,"180":1,"184":1,"188":1,"200":1,"248":1,"294":1}}],["indefinitely",{"1":{"64":1}}],["independently",{"1":{"103":1,"104":1,"158":1,"174":1}}],["independent",{"1":{"49":1,"92":1,"188":1}}],["index`",{"1":{"145":1}}],["indexed",{"1":{"145":1}}],["index",{"1":{"11":1,"12":1,"104":2,"105":2,"145":1,"333":2}}],["indexing",{"1":{"5":1}}],["indicating",{"1":{"46":1}}],["indicates",{"1":{"46":1,"118":1,"309":1}}],["individually",{"1":{"120":1}}],["individual",{"1":{"22":1,"36":1,"81":1,"105":1,"148":1,"152":1,"216":1,"252":1,"278":1,"307":1,"320":1,"344":1}}],["industries",{"1":{"5":1}}],["inline",{"1":{"1":1,"86":1,"377":1}}],["urandom",{"1":{"374":1}}],["uri`",{"1":{"259":1}}],["uri",{"1":{"252":1,"259":1,"372":1}}],["uri=gcp",{"1":{"372":1}}],["uri=",{"1":{"252":1}}],["url>",{"1":{"372":1,"374":1}}],["url=<webhook",{"1":{"374":1}}],["url=http",{"1":{"253":1}}],["url=",{"1":{"252":1,"253":1,"298":1}}],["url`",{"1":{"220":1,"225":2,"245":1,"246":1,"254":1,"255":5,"261":1,"264":2,"268":1,"270":2,"284":1,"299":1}}],["urlencoded`",{"1":{"216":1}}],["url",{"1":{"11":1,"12":1,"46":3,"47":1,"130":3,"157":6,"214":4,"216":3,"219":5,"220":7,"221":3,"225":5,"227":5,"230":1,"240":2,"241":2,"243":3,"245":1,"246":2,"254":1,"255":1,"261":1,"264":3,"268":2,"374":4}}],["u",{"1":{"225":2,"227":1,"231":1,"238":1}}],["utf8",{"1":{"157":3}}],["utilities",{"1":{"347":1}}],["utility",{"1":{"112":1,"307":1}}],["utilized",{"1":{"300":1}}],["utilizing",{"1":{"139":1,"209":1}}],["utilization",{"1":{"79":1,"139":1,"169":1,"209":1,"268":1,"287":1}}],["util",{"1":{"105":1,"142":2}}],["utc",{"1":{"35":3,"37":1,"42":1,"72":2,"84":1},"2":{"35":1,"72":1,"84":1}}],["ui",{"0":{"205":1},"1":{"76":1,"173":1,"174":1,"206":1,"223":1,"225":2,"227":2,"300":1}}],["uuid4",{"1":{"89":1,"116":1}}],["uuid",{"1":{"52":1,"196":1,"328":2,"338":2,"344":1},"2":{"52":1}}],["up`",{"1":{"225":2}}],["upon",{"1":{"191":1}}],["upgrading",{"0":{"275":1},"1":{"190":1,"275":1,"276":1,"278":2,"280":3}}],["upgrade`",{"1":{"254":1}}],["upgrades",{"1":{"180":1,"182":1,"254":1}}],["upgrade",{"0":{"278":1},"1":{"147":1,"202":1,"275":1,"276":2,"277":1,"278":4,"279":4}}],["uptime",{"0":{"200":1},"1":{"182":1,"200":1,"202":1}}],["uploading",{"1":{"157":1}}],["uploadtos3",{"1":{"157":2}}],["upload",{"1":{"137":1,"157":2}}],["uploads",{"1":{"25":1,"158":1}}],["upfront",{"1":{"85":1,"92":1,"100":1,"104":1,"108":1,"110":1,"111":1,"113":1,"124":1}}],["update`",{"1":{"112":1,"312":2,"317":7,"318":2,"319":2,"356":1}}],["updated",{"1":{"52":1,"106":1,"112":2,"116":1,"169":1,"312":2,"317":3,"318":8,"319":2,"335":1,"341":2,"342":1,"374":1}}],["updates",{"1":{"35":1,"63":1,"159":1,"174":2,"200":1,"201":1,"223":1,"267":2,"312":2,"335":2,"341":2,"342":1}}],["update",{"1":{"35":5,"81":1,"112":4,"116":1,"127":3,"157":2,"240":1,"261":4,"267":2,"278":1,"279":1,"285":1,"312":6,"317":1,"318":6,"319":8,"335":2,"341":2,"342":3,"356":2},"2":{"35":2,"157":2}}],["updating",{"0":{"356":1},"1":{"32":1,"35":1,"73":1,"102":1}}],["upserted",{"1":{"333":1,"339":1}}],["upserts",{"1":{"333":1,"339":1}}],["upsertlabels",{"1":{"136":2,"333":1}}],["upsert",{"1":{"80":1,"82":2,"136":4,"333":1,"339":2},"2":{"82":1,"136":1}}],["upstream",{"1":{"32":1,"121":3,"124":1,"241":1,"309":2,"322":2}}],["ups",{"1":{"5":1}}],["unpaused",{"1":{"343":1}}],["unpause",{"1":{"343":3}}],["unprocessed",{"1":{"73":1}}],["unresponsive",{"1":{"231":1}}],["unhandled",{"1":{"210":1}}],["unhappy",{"1":{"160":8,"161":2}}],["unhealthy",{"1":{"146":2}}],["undefined",{"1":{"157":2,"333":3}}],["underutilizing",{"1":{"209":1}}],["undergoes",{"1":{"187":1}}],["underlying",{"1":{"55":1,"58":1}}],["understand",{"1":{"2":1,"62":1,"126":1,"184":1,"205":1}}],["unchanged",{"1":{"154":1}}],["uncollectable",{"1":{"146":6}}],["unset",{"1":{"136":2,"255":1,"314":2,"354":2}}],["unavailable",{"1":{"132":4}}],["unavailability",{"1":{"57":1}}],["unknown>",{"1":{"160":1}}],["unknown",{"1":{"86":1,"157":1,"377":1}}],["unlock",{"1":{"104":1,"105":1},"2":{"104":1,"105":1}}],["unless",{"1":{"83":1,"142":1}}],["unlike",{"1":{"0":1,"109":1,"110":1,"113":1,"158":1}}],["unnecessary",{"1":{"64":1,"69":1,"175":1}}],["unfortunately",{"1":{"29":1}}],["until=datetime",{"1":{"72":2}}],["until",{"1":{"22":1,"24":4,"25":1,"72":2,"74":1,"82":1,"101":1,"106":1,"109":1,"111":1,"112":1,"113":1,"124":4,"132":1,"134":1,"136":1,"137":1,"138":1,"289":1,"290":1,"322":4,"324":4,"325":2,"333":3,"337":1,"338":1}}],["union",{"1":{"152":1}}],["unintended",{"1":{"58":1,"137":1}}],["unique",{"1":{"40":1,"46":1,"268":2}}],["universaltabs",{"1":{"7":2}}],["units",{"1":{"81":2,"82":7,"107":1,"322":1,"345":1}}],["units=1",{"1":{"81":1,"82":1}}],["unit",{"1":{"2":1,"13":1,"65":1,"158":1,"323":4,"324":4}}],["unexpectedly",{"1":{"210":1}}],["unexpected",{"1":{"3":1,"10":1,"102":1,"123":1,"278":1,"327":1,"333":1}}],["usually",{"1":{"171":1,"183":1}}],["usa",{"1":{"167":1}}],["usage",{"0":{"47":1,"52":1,"54":1,"328":1,"330":1},"1":{"22":1,"113":1,"129":1,"146":1,"148":1,"182":1,"207":1,"210":1,"247":1,"289":1,"294":2,"310":2,"326":2,"331":2,"333":2,"362":2}}],["usr",{"1":{"127":1,"154":1}}],["us",{"1":{"38":1,"74":1,"175":1,"177":2,"189":1,"190":1,"198":5,"199":1,"277":1,"305":2}}],["using",{"0":{"1":1,"9":1,"138":1,"141":1,"142":1,"155":1,"355":1,"368":1},"1":{"7":5,"9":1,"25":2,"26":1,"31":1,"37":1,"42":1,"46":2,"50":1,"52":1,"53":1,"56":1,"65":2,"66":1,"70":1,"72":1,"80":1,"82":3,"88":1,"89":1,"94":1,"96":1,"102":1,"104":1,"105":1,"109":1,"110":2,"111":1,"112":2,"113":2,"116":1,"117":1,"118":1,"122":1,"126":1,"127":2,"129":1,"136":2,"138":1,"148":1,"149":1,"155":1,"158":2,"160":1,"161":1,"162":1,"164":1,"168":1,"176":1,"207":3,"209":2,"216":1,"226":2,"228":2,"238":1,"241":1,"243":2,"248":2,"250":1,"253":1,"284":1,"288":1,"290":1,"294":3,"302":1,"322":1,"323":2,"324":2,"325":2,"327":1,"331":1,"332":1,"351":1,"352":1,"353":1,"354":1,"355":1,"356":1,"357":1,"362":2,"364":1,"365":2,"368":1,"374":1}}],["useful",{"1":{"16":1,"24":1,"25":1,"26":1,"27":1,"32":2,"40":2,"46":2,"49":1,"60":1,"61":1,"64":1,"68":2,"74":1,"75":1,"76":1,"77":1,"81":1,"101":1,"106":1,"131":1,"132":1,"133":1,"136":1,"140":1,"154":1,"162":1,"205":1,"243":2,"308":1,"309":1,"322":2,"323":6,"324":7,"327":1,"329":1,"333":2}}],["uses",{"0":{"32":1},"1":{"1":1,"46":1,"52":1,"130":1,"152":3,"158":1,"159":1,"210":1,"219":1,"226":1,"229":1,"251":1,"252":1,"333":1,"354":1,"358":1}}],["userdata",{"1":{"333":1}}],["user3",{"1":{"294":1}}],["user2",{"1":{"294":1}}],["user1",{"1":{"294":1}}],["userguide",{"1":{"277":1}}],["user=hatchet",{"1":{"225":2,"227":1,"231":1}}],["userinfra",{"1":{"173":2}}],["userid",{"1":{"81":5,"112":1,"149":2,"154":4,"294":1},"2":{"81":1,"112":1,"154":1}}],["usereventcondition",{"1":{"89":1,"112":2,"113":8,"116":2,"118":4,"310":2},"2":{"112":1,"113":2,"116":1,"118":2}}],["userlimit",{"1":{"81":2}}],["userunits",{"1":{"81":2}}],["username=your",{"1":{"304":1}}],["username`",{"1":{"255":1,"264":1,"265":1,"268":1}}],["username",{"1":{"46":2,"220":1,"221":7,"245":1,"247":1,"255":1,"264":1,"265":1,"268":1,"303":1,"304":1,"305":2},"2":{"220":1,"221":1}}],["users",{"1":{"7":1,"38":1,"74":1,"75":1,"81":1,"82":1,"187":1,"197":1,"219":1,"243":3,"374":1}}],["user",{"1":{"1":1,"5":1,"24":1,"27":1,"34":1,"48":2,"49":1,"50":2,"55":3,"71":1,"74":2,"76":1,"80":1,"81":5,"109":1,"112":11,"113":2,"116":2,"121":2,"129":3,"136":2,"142":2,"149":4,"162":1,"191":3,"192":1,"193":3,"219":8,"220":8,"221":8,"225":4,"227":4,"238":1,"243":3,"247":3,"251":1,"252":1,"260":3,"294":7,"306":1,"332":1,"333":7,"348":1,"368":1,"374":1},"2":{"81":1,"112":1,"129":1,"219":1,"220":1,"221":1}}],["use",{"0":{"3":1,"58":1,"86":1,"107":1,"139":1,"151":1},"1":{"7":1,"10":2,"13":1,"25":3,"34":2,"35":1,"36":2,"38":3,"43":4,"46":5,"49":1,"55":3,"62":2,"63":1,"72":6,"74":2,"75":2,"76":1,"77":1,"78":1,"83":1,"84":1,"85":2,"100":3,"101":1,"104":1,"105":1,"106":1,"110":1,"115":1,"117":1,"118":1,"119":1,"121":1,"124":1,"127":1,"130":2,"136":2,"139":1,"140":1,"142":4,"144":2,"150":1,"151":1,"154":1,"157":1,"158":3,"160":1,"164":1,"188":1,"189":2,"200":1,"201":1,"207":1,"208":1,"212":1,"214":1,"216":3,"217":1,"220":2,"225":3,"226":1,"230":1,"243":2,"246":1,"249":1,"253":1,"255":1,"264":1,"278":1,"279":1,"284":2,"289":4,"294":1,"301":1,"307":5,"309":3,"310":2,"316":2,"317":4,"320":1,"323":2,"324":2,"325":1,"328":2,"330":1,"331":2,"332":1,"333":2,"344":1,"355":1,"366":1,"368":1,"372":1,"374":2,"376":10},"2":{"144":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["used",{"1":{"0":1,"29":1,"30":1,"35":1,"40":1,"46":2,"73":2,"94":1,"109":1,"113":1,"117":1,"135":2,"137":1,"146":1,"149":1,"207":1,"216":1,"243":2,"249":1,"268":4,"289":1,"307":2,"309":4,"310":1,"318":8,"322":4,"323":6,"324":10,"326":1,"327":1,"329":1,"333":3,"354":1,"372":1}}],["mp4",{"1":{"369":2,"370":2,"371":2}}],["mtls",{"1":{"273":1}}],["ms",{"1":{"268":1,"288":2}}],["msg",{"1":{"254":1}}],["msgqueue",{"1":{"225":2,"226":1,"227":1,"261":3,"295":1}}],["mkdir",{"1":{"238":1}}],["mb",{"1":{"231":1}}],["myinput",{"1":{"322":2,"345":1}}],["myaccount",{"1":{"305":1}}],["mytask",{"1":{"154":1},"2":{"154":1}}],["mymiddleware",{"1":{"153":2}}],["m",{"1":{"95":1,"96":1}}],["md",{"1":{"305":1}}],["mdx",{"1":{"64":1,"65":1,"67":1,"70":1,"132":2,"134":1,"136":1,"224":3,"332":17,"333":2}}],["md`",{"1":{"10":8,"11":1,"12":1}}],["much",{"1":{"209":2}}],["mutex",{"1":{"104":1,"105":1},"2":{"104":1,"105":1}}],["mu",{"1":{"104":1,"105":1},"2":{"104":2,"105":2}}],["multistepworkflow",{"1":{"121":1},"2":{"121":1}}],["multi",{"1":{"78":1,"86":1,"90":1,"98":1,"104":1,"121":3,"154":1,"158":1,"164":3,"175":1,"224":1}}],["multipleconcurrencykeys",{"1":{"78":1}}],["multiple",{"0":{"78":1,"164":1},"1":{"2":1,"20":1,"43":2,"46":1,"49":1,"50":1,"55":1,"66":1,"74":2,"75":1,"76":1,"78":1,"81":1,"83":1,"89":1,"104":1,"105":3,"107":1,"116":1,"118":4,"130":3,"131":1,"133":1,"135":1,"136":1,"139":1,"141":1,"145":1,"153":1,"164":1,"176":1,"197":1,"248":3,"289":1,"294":1,"297":1,"322":5,"324":4,"333":3,"340":1,"345":1,"348":1,"351":1}}],["mustparse",{"1":{"52":1},"2":{"52":1}}],["medium",{"1":{"325":2}}],["media",{"1":{"162":1}}],["measurements",{"1":{"288":1}}],["mean",{"1":{"167":1}}],["meantime",{"1":{"123":1}}],["meaningless",{"1":{"102":1}}],["meaningful",{"1":{"69":1}}],["meaning",{"1":{"46":1,"52":1,"61":1,"65":1,"94":1,"209":1,"333":2}}],["means",{"1":{"3":1,"30":2,"65":3,"76":1,"81":1,"124":1,"132":1,"145":1,"176":1,"183":1,"222":1,"229":1,"291":1,"301":1,"307":3,"322":4}}],["mem=125828",{"1":{"296":1}}],["mem=2147483647",{"1":{"296":1}}],["mem`",{"1":{"296":2}}],["member",{"1":{"192":1}}],["member`",{"1":{"192":1}}],["memory",{"1":{"131":1,"134":1,"135":3,"136":5,"139":1,"170":1,"175":1,"209":3,"210":1,"231":4,"254":1,"289":2,"296":2,"327":1}}],["memoized",{"1":{"88":1,"333":3}}],["mention",{"1":{"219":9}}],["mention`",{"1":{"219":1}}],["mentioned",{"1":{"162":1,"219":3,"268":1}}],["menus",{"1":{"221":1}}],["menu",{"1":{"36":2}}],["merge",{"1":{"153":1,"318":4}}],["merged",{"1":{"46":1,"155":1,"345":2}}],["meet",{"1":{"134":1}}],["meets",{"1":{"118":1,"134":1,"136":1}}],["messaging",{"1":{"73":2,"174":1}}],["messages",{"1":{"69":1,"73":2,"76":1,"142":1,"212":1,"219":1,"295":1,"309":1,"333":1}}],["message=f",{"1":{"331":1}}],["message=greeting",{"1":{"43":1}}],["message=",{"1":{"24":2}}],["message=input",{"1":{"14":1}}],["message",{"1":{"14":10,"24":3,"25":2,"35":3,"40":1,"43":5,"48":1,"49":1,"50":1,"52":2,"53":1,"55":7,"59":1,"65":3,"66":3,"68":2,"74":1,"78":1,"94":1,"95":1,"96":1,"97":1,"104":1,"105":8,"109":3,"112":4,"113":1,"116":3,"120":1,"121":3,"142":13,"149":3,"155":5,"223":1,"225":2,"226":2,"254":1,"261":1,"274":4,"294":2,"295":2,"297":1,"309":1,"331":1,"345":3},"2":{"14":3,"50":1,"53":1,"65":2,"66":2,"68":1,"74":1,"78":1,"94":1,"95":1,"96":1,"104":1,"105":3,"109":1,"112":1,"116":1,"120":1,"121":2,"155":1}}],["mechanics",{"1":{"72":2}}],["mechanisms",{"0":{"68":1},"1":{"62":1,"217":1}}],["mechanism",{"1":{"57":1,"67":1,"72":1,"119":1,"154":1}}],["meta",{"1":{"150":1}}],["metadata`",{"1":{"54":2,"72":1,"80":1,"81":2,"307":3,"309":1,"311":4,"316":8,"317":7,"322":4,"323":2,"324":6}}],["metadata=",{"1":{"35":1,"40":2,"72":2,"84":2,"105":1,"149":2,"294":1,"326":1}}],["metadata",{"0":{"149":1},"1":{"28":1,"35":8,"36":1,"40":9,"41":1,"46":2,"72":10,"84":4,"105":1,"130":6,"149":6,"150":3,"151":2,"223":1,"234":2,"238":1,"241":1,"289":1,"307":3,"309":3,"311":4,"316":8,"317":6,"322":6,"323":2,"324":8,"333":2,"338":3,"374":1},"2":{"35":4,"40":3,"72":3,"234":1,"238":1,"241":1}}],["metric",{"1":{"148":2,"268":2}}],["metricsclient",{"1":{"332":1,"338":1}}],["metrics`",{"1":{"146":3,"264":1,"272":1,"314":12}}],["metrics",{"0":{"147":1,"148":1,"268":1,"314":1,"338":1},"1":{"130":4,"146":2,"147":2,"148":3,"264":3,"266":6,"268":19,"287":1,"294":1,"300":1,"307":3,"314":32,"332":6,"338":11},"2":{"332":1}}],["met",{"1":{"66":1,"101":1,"106":1,"114":1,"124":3,"133":1,"322":6,"333":1}}],["meters",{"1":{"38":1}}],["method",{"0":{"142":1},"1":{"25":4,"35":2,"40":1,"43":3,"46":3,"48":1,"66":1,"82":2,"88":2,"94":3,"105":1,"109":2,"112":2,"126":2,"133":1,"136":1,"138":2,"140":1,"142":5,"160":1,"161":1,"268":1,"279":1,"307":1,"316":10,"317":2,"322":8,"323":4,"324":12,"326":1,"332":2,"333":1,"341":1}}],["methods`",{"1":{"62":2}}],["methods=",{"1":{"62":1}}],["methods",{"1":{"16":1,"24":2,"25":1,"43":9,"46":1,"55":1,"62":4,"70":1,"72":1,"88":1,"97":1,"102":2,"161":1,"306":1,"307":2,"308":3,"309":1,"310":1,"311":1,"312":1,"313":1,"314":1,"315":1,"316":1,"317":1,"318":2,"319":1,"320":1,"321":1,"322":4,"323":1,"324":4,"332":4,"333":8,"334":1,"335":1,"336":1,"338":1,"339":1,"340":1,"341":1,"342":1,"343":1,"344":1,"345":4}}],["motivation",{"1":{"316":4}}],["mounted",{"1":{"246":1}}],["mounting",{"0":{"246":1},"1":{"246":1}}],["mount",{"1":{"227":1,"246":1}}],["moving",{"1":{"173":1}}],["move",{"1":{"169":1,"183":1}}],["moved",{"1":{"73":2}}],["mock",{"1":{"158":2,"323":10,"324":8}}],["moment",{"1":{"123":1}}],["mostly",{"1":{"68":2}}],["moscow",{"1":{"52":7}}],["moon",{"1":{"43":4,"105":1,"294":1}}],["modified",{"1":{"238":1,"296":1}}],["modify",{"1":{"52":1,"74":1,"226":1,"230":1,"247":1}}],["modifying",{"1":{"39":1,"73":1,"230":1}}],["modals",{"1":{"217":1,"221":1}}],["mod",{"1":{"127":1,"364":1}}],["mode`",{"1":{"255":1}}],["mode",{"1":{"214":1,"245":1,"255":1,"268":1}}],["moderate",{"1":{"175":1}}],["modes",{"1":{"113":1,"217":1}}],["models",{"1":{"55":1,"183":1,"187":1,"326":1,"327":1,"331":3}}],["model",{"1":{"11":1,"24":1,"25":1,"82":1,"85":1,"90":1,"94":1,"98":1,"124":1,"134":1,"135":6,"136":32,"137":1,"157":1,"175":1,"179":2,"181":1,"207":1,"307":4,"326":2,"328":1,"331":1}}],["modular",{"1":{"107":1}}],["modules",{"1":{"364":1}}],["module",{"1":{"20":1,"156":1}}],["monitor",{"1":{"162":1,"210":1,"278":1}}],["monitoring",{"1":{"0":1,"140":1,"146":1,"182":1,"202":1,"223":1,"251":1,"265":8,"268":1}}],["monopolizing",{"1":{"74":1,"75":2,"77":1}}],["monorepo",{"1":{"55":1}}],["monday",{"1":{"38":1}}],["monthly",{"1":{"38":1}}],["month",{"1":{"5":2,"38":3}}],["mcpsetup",{"1":{"9":1}}],["mcpurl",{"1":{"9":1}}],["mcp",{"0":{"11":1,"207":1},"1":{"7":1,"11":3,"12":1,"207":1},"3":{"9":1,"10":1,"11":1,"12":1}}],["major",{"1":{"290":1,"305":1}}],["major=",{"1":{"146":1}}],["master",{"1":{"238":2,"241":4,"252":3,"253":2,"259":4,"372":1},"2":{"238":1,"252":1,"253":1}}],["massively",{"1":{"3":1}}],["massive",{"1":{"3":1}}],["machine",{"1":{"209":3,"327":1,"359":1}}],["macos",{"1":{"7":2,"349":2}}],["mail",{"1":{"305":1}}],["mailto",{"1":{"190":1}}],["maintained",{"1":{"132":1}}],["maintain",{"1":{"131":1,"133":1,"299":1}}],["maintenance",{"1":{"34":2,"39":1,"40":2,"296":1}}],["maincharacter",{"1":{"52":1}}],["main",{"1":{"20":5,"21":1,"30":1,"52":6,"55":2,"82":1,"121":1,"135":1,"156":3,"158":1,"161":1,"162":1,"172":1,"173":1,"248":1,"251":1,"294":1,"307":1,"321":1,"326":1,"332":1,"345":1},"2":{"20":1,"21":1,"156":1}}],["matrices",{"0":{"377":1}}],["matters",{"1":{"123":1,"124":1,"175":1}}],["math",{"1":{"84":1,"110":2,"113":4,"117":6,"118":2},"2":{"84":1,"110":2,"113":2,"117":2,"118":2}}],["matched",{"1":{"333":1,"338":1}}],["matches",{"1":{"24":1,"25":1,"34":1,"52":1,"112":1,"113":1,"117":2,"166":1,"168":1}}],["match",{"1":{"50":1,"52":3,"55":2,"111":1,"123":1,"127":1,"130":1,"135":1,"214":1,"320":4,"322":2,"324":2,"364":1}}],["matching",{"1":{"20":1,"36":1,"40":1,"46":1,"51":2,"72":5,"112":1,"113":1,"136":2,"214":1,"311":4,"312":2,"316":10,"317":2,"318":2,"345":2}}],["maxreplicacount",{"1":{"130":1}}],["maxruns",{"1":{"74":3,"78":5}}],["maxseconds",{"1":{"60":4}}],["max",{"1":{"60":2,"62":2,"74":2,"78":8,"167":1,"225":2,"227":1,"231":1,"254":7,"255":12,"266":2,"274":2,"293":2,"294":1,"295":1,"296":2,"298":1,"299":1,"307":2,"322":4,"338":2}}],["maximum",{"1":{"60":3,"62":2,"82":1,"118":1,"144":2,"167":2,"196":1,"254":1,"271":2,"274":2,"289":1,"294":3,"299":1,"307":6,"311":2,"312":2,"313":2,"316":6,"317":2,"318":2,"320":2,"322":14,"324":2,"337":1}}],["mars",{"1":{"43":2}}],["marked",{"1":{"57":1,"132":1,"254":1,"353":1}}],["marks",{"1":{"18":1}}],["markdown",{"1":{"11":4,"12":4}}],["mapping",{"1":{"309":2,"314":2,"333":1,"338":1}}],["map",{"1":{"35":1,"40":2,"43":1,"52":3,"72":1,"104":1,"105":2,"135":1,"136":4,"294":2},"2":{"43":1,"72":1,"104":1,"105":1}}],["makes",{"1":{"37":1,"42":1,"85":2,"102":1,"123":1,"158":1,"176":1,"209":1,"217":1,"307":1,"315":1,"325":1,"339":1,"351":1}}],["make",{"1":{"32":1,"37":1,"42":1,"43":1,"55":1,"74":1,"104":1,"105":1,"115":1,"166":1,"169":1,"216":1,"230":1,"289":1,"294":1,"307":3,"329":1,"373":1,"374":3}}],["making",{"1":{"2":1,"115":1,"208":1,"332":1,"348":1}}],["made",{"1":{"30":1,"32":1,"62":1,"152":1}}],["mandatory",{"1":{"374":1}}],["manifest",{"1":{"289":1}}],["manner",{"1":{"60":1,"69":1}}],["manages",{"1":{"222":1,"289":1,"297":1}}],["manager",{"1":{"189":1,"237":1,"241":5},"2":{"241":1}}],["managed",{"1":{"179":1,"180":1,"182":1,"188":1,"248":1,"277":1}}],["management",{"0":{"122":1},"1":{"73":2,"74":1,"122":1,"196":2,"223":1,"225":1,"227":2}}],["manage",{"1":{"36":2,"41":1,"73":1,"74":2,"164":1,"174":1,"241":1,"348":1}}],["managing",{"0":{"36":1,"41":1},"1":{"16":1,"34":1,"38":1,"163":1,"164":1,"182":1,"307":4,"311":1,"317":1,"318":1,"319":1,"320":1,"330":1,"332":14,"334":1,"341":1,"342":1,"343":1,"344":1,"351":1}}],["many`",{"1":{"43":3,"322":7,"324":7}}],["many",{"0":{"209":1},"1":{"17":1,"22":2,"43":5,"62":2,"69":1,"70":2,"74":1,"101":2,"104":4,"105":4,"106":1,"173":1,"209":1,"294":1,"296":1,"322":5,"324":4,"326":1,"329":1},"2":{"43":2,"104":2,"105":2,"294":1,"326":1}}],["manually",{"0":{"280":1},"1":{"34":1,"38":1,"52":1,"73":1,"137":3,"138":2,"171":1,"279":2,"280":1,"309":2}}],["manual",{"0":{"137":1,"138":1},"1":{"7":1,"61":1,"72":3,"73":3,"118":1,"137":1,"139":2,"279":1}}],["mimicking",{"1":{"323":2,"324":2}}],["mimic",{"1":{"323":4,"324":4}}],["microsoft",{"1":{"277":1,"305":1}}],["migrate`",{"1":{"282":1,"284":2}}],["migrate",{"1":{"209":1,"227":2,"279":1,"283":2,"284":1}}],["migrations",{"0":{"247":1,"284":1},"1":{"247":1,"276":1,"278":2,"279":4,"281":1,"282":2,"283":1,"284":2,"285":1,"372":1}}],["migration",{"0":{"283":1},"1":{"183":1,"227":4,"278":2,"279":1,"283":5,"284":2}}],["migrating",{"0":{"183":1}}],["mismatches",{"1":{"166":1}}],["miss",{"1":{"210":1}}],["missing",{"1":{"166":1,"300":1}}],["mission",{"1":{"0":1,"3":1}}],["missed",{"0":{"210":1},"1":{"37":3,"42":3}}],["mixed",{"1":{"118":1}}],["mixing",{"0":{"101":1},"1":{"85":1,"115":1,"123":1}}],["mix",{"1":{"85":1,"100":2}}],["millis`",{"1":{"144":1}}],["milliseconds=1000",{"1":{"294":1}}],["milliseconds=100",{"1":{"294":3}}],["milliseconds=250",{"1":{"294":1}}],["milliseconds`",{"1":{"266":1,"268":1}}],["milliseconds",{"1":{"265":1,"268":8,"294":6}}],["millisecond",{"1":{"74":1,"78":1,"160":1,"175":1},"2":{"74":1,"78":1,"160":1}}],["millions",{"1":{"3":1}}],["middle",{"1":{"175":1}}],["middleware",{"0":{"152":1,"153":1,"154":1,"155":1},"1":{"144":2,"152":5,"153":4,"154":9,"155":6,"156":4,"157":1,"158":34,"345":3,"377":3},"2":{"144":1}}],["mid",{"1":{"92":1}}],["midnight",{"1":{"38":2}}],["midway",{"1":{"30":1,"32":1}}],["min",{"1":{"254":1,"255":8,"298":1,"299":1,"338":2}}],["minreplicacount",{"1":{"130":1}}],["minimum",{"1":{"189":1,"254":1,"299":1}}],["minimize",{"1":{"75":1}}],["minimal",{"0":{"253":1},"1":{"4":1}}],["minor=",{"1":{"146":1}}],["minor",{"1":{"55":1}}],["mind",{"1":{"37":1,"38":1,"42":1,"75":1}}],["mindful",{"1":{"35":1}}],["minute`",{"1":{"266":1}}],["minute",{"1":{"35":1,"38":2,"79":1,"81":4,"113":1,"118":1,"266":1,"333":1},"2":{"35":1,"81":2,"113":1,"118":1}}],["minutes=5",{"1":{"105":1,"294":1,"307":2,"322":4,"326":1}}],["minutes=1",{"1":{"84":1,"113":1,"118":1}}],["minutes=10",{"1":{"65":1}}],["minutes",{"1":{"26":1,"38":2,"39":1,"65":2,"72":1,"123":1,"268":2}}],["mitigate",{"1":{"55":1,"57":1}}],["mit",{"1":{"0":1,"4":1,"187":1}}],["gmail",{"1":{"302":1,"304":1,"305":2},"2":{"304":1}}],["gb",{"1":{"296":1}}],["glasskube`",{"1":{"237":1}}],["glasskube",{"0":{"235":1,"237":1},"1":{"235":1,"236":1,"237":5}}],["globaloutputtype",{"1":{"153":2}}],["globalinputtype",{"1":{"153":2,"155":2}}],["globally",{"1":{"83":2}}],["global",{"1":{"81":1,"82":1,"154":5,"158":3,"268":1,"333":2,"345":7,"362":2,"372":4}}],["ghcr",{"1":{"225":2,"227":4,"238":1,"278":2,"285":2,"289":4},"2":{"225":1,"227":1,"238":1,"278":1,"285":1,"289":1}}],["gdpr",{"1":{"186":1}}],["gapped",{"1":{"175":1,"180":1}}],["gauge",{"1":{"146":3,"148":1,"268":3}}],["gates",{"1":{"118":1}}],["gate",{"1":{"82":1}}],["gcloud",{"1":{"372":3}}],["gcp",{"1":{"248":3,"252":1,"372":1}}],["gcm",{"1":{"157":1}}],["gc",{"1":{"146":17}}],["gpu",{"1":{"137":1,"139":1}}],["guessing",{"1":{"207":1}}],["guidance",{"1":{"189":1,"202":1}}],["guides",{"0":{"45":1},"1":{"45":1,"211":1}}],["guide",{"0":{"180":1},"1":{"1":1,"7":3,"125":1,"165":1,"212":1,"213":1,"214":1,"215":1,"217":1,"218":1,"226":2,"228":1,"234":1,"238":1,"241":1,"248":2,"249":1,"253":1,"275":1,"279":1,"280":2,"306":1,"332":1,"372":1}}],["guaranteed",{"1":{"109":1,"301":1}}],["guarantee",{"1":{"30":1,"65":1,"83":1,"102":1}}],["guarantees",{"0":{"30":1,"172":1,"175":1,"176":1},"1":{"15":1,"29":1,"172":2,"184":2,"331":1}}],["grouped",{"1":{"314":4,"338":1}}],["grouping",{"1":{"191":1,"193":1}}],["grouproundrobin",{"1":{"74":1,"78":1},"2":{"74":1,"78":1}}],["groupkey",{"1":{"74":2},"2":{"74":1}}],["group",{"0":{"75":1},"1":{"74":8,"75":10,"76":2,"77":1,"78":8,"89":1,"116":3,"118":8,"129":2},"2":{"74":2,"78":1}}],["groups",{"0":{"116":1,"118":1},"1":{"31":2,"75":1,"88":1,"89":3,"110":2,"113":1,"116":1,"118":7,"129":1}}],["great",{"1":{"208":1,"327":1}}],["greater",{"1":{"57":1,"117":2,"118":1,"136":4},"2":{"136":1}}],["grep",{"1":{"171":1}}],["greptile",{"1":{"5":1}}],["greet",{"1":{"322":1,"345":5},"2":{"345":2}}],["greeting",{"1":{"43":3}}],["greetings",{"1":{"43":4},"2":{"43":1}}],["green",{"1":{"142":1,"219":1}}],["grant",{"1":{"247":2}}],["granular",{"1":{"26":1}}],["granularity",{"1":{"26":1}}],["gradually",{"1":{"161":1}}],["grafana",{"1":{"147":1}}],["graph",{"1":{"85":2,"91":2,"92":1,"100":1,"101":5,"117":1,"123":1,"124":1,"173":1}}],["graphs",{"1":{"13":1,"85":1,"100":1,"177":1}}],["graceful",{"1":{"67":1,"69":1,"71":1,"154":1,"170":1}}],["gracefully",{"1":{"67":2,"69":1,"71":1,"120":1}}],["grained",{"1":{"62":1}}],["grpcinsecure`",{"1":{"243":1}}],["grpcinsecure",{"1":{"243":1}}],["grpcbroadcastaddress`",{"1":{"243":1}}],["grpcbroadcastaddress",{"1":{"243":1}}],["grpc",{"1":{"21":1,"62":2,"144":1,"166":1,"167":1,"174":1,"188":1,"191":1,"193":1,"223":1,"225":9,"227":6,"230":3,"240":6,"241":10,"243":4,"249":1,"253":1,"254":12,"262":2,"270":1,"274":4,"297":2,"307":1,"315":1,"339":1,"362":3}}],["g",{"1":{"7":1,"32":1,"37":1,"42":1,"46":2,"72":1,"74":1,"79":1,"80":1,"83":1,"94":1,"109":1,"127":3,"145":1,"146":1,"151":3,"154":1,"158":1,"175":1,"177":1,"191":2,"193":1,"219":1,"220":1,"238":1,"278":1,"279":1,"283":1,"309":1,"327":1,"329":1,"338":1,"362":1}}],["git",{"1":{"189":1,"283":2}}],["githubpr",{"1":{"216":1}}],["githubproutput",{"1":{"216":3}}],["githubprinput",{"1":{"216":6}}],["githubpullrequest",{"1":{"216":2}}],["githubrepository",{"1":{"216":2}}],["github",{"0":{"215":1},"1":{"0":1,"16":1,"44":1,"45":2,"46":5,"55":1,"88":1,"112":1,"113":1,"144":1,"158":2,"188":1,"212":1,"215":3,"216":26,"234":1,"237":1,"248":1,"249":1,"260":10,"283":2,"284":1,"305":2,"318":1,"342":1,"374":22},"2":{"55":1,"144":1,"234":1,"249":1,"283":1}}],["girl",{"1":{"160":4,"161":1}}],["giving",{"1":{"60":1,"78":1,"106":1,"190":1}}],["given",{"1":{"74":1,"158":1,"307":3,"309":2,"312":4,"313":4,"315":4,"316":12,"318":4,"322":4,"336":1}}],["give",{"1":{"9":1,"30":1,"72":1,"209":1,"218":1}}],["gives",{"1":{"2":1,"29":1,"89":1,"120":1,"123":1,"154":1,"170":1,"277":1}}],["gif",{"1":{"28":1,"36":1,"41":1,"150":1}}],["geographically",{"1":{"167":1}}],["gemfile",{"1":{"127":2},"2":{"127":1}}],["gems",{"1":{"127":1}}],["generics",{"1":{"332":2}}],["generic",{"1":{"46":4}}],["generating",{"1":{"238":1}}],["generation=",{"1":{"146":9}}],["generation",{"1":{"55":1,"146":1,"241":2}}],["generator<string",{"1":{"160":1}}],["generator",{"1":{"153":2,"160":1,"309":1,"327":1,"328":3,"330":2}}],["generates",{"1":{"24":1,"214":2}}],["generate",{"1":{"21":1,"38":1,"164":1,"234":2,"238":5,"253":1,"346":1,"372":3,"374":2}}],["generated",{"1":{"1":1,"7":1,"166":1,"168":1,"219":1,"220":1,"227":1,"238":1,"372":1}}],["generally",{"1":{"209":1,"292":1}}],["general",{"1":{"3":1,"88":1,"209":1,"294":1,"332":1}}],["getworkflowidfromname",{"1":{"344":1}}],["getworkflowrunid",{"1":{"161":1,"162":1},"2":{"161":1,"162":1}}],["gettaskexternalid",{"1":{"340":1}}],["gettaskstatusmetrics",{"1":{"338":1}}],["gettaskstats",{"1":{"338":1}}],["getting",{"1":{"2":1,"7":1,"22":1,"170":1,"209":1,"218":1,"225":1,"295":1,"306":1,"332":1},"3":{"7":1,"8":1}}],["getqueuemetrics",{"1":{"338":1}}],["get`",{"1":{"311":2,"312":2,"316":2,"317":2,"318":2,"319":2,"320":2}}],["getauthtag",{"1":{"157":1},"2":{"157":1}}],["getobjectcommand",{"1":{"157":2}}],["getlogger",{"1":{"141":3},"2":{"141":1}}],["getlabels",{"1":{"136":1}}],["getname",{"1":{"84":3},"2":{"84":1}}],["getsignedurl",{"1":{"157":2}}],["gets",{"0":{"191":1},"1":{"46":1,"124":1,"333":15,"335":1,"340":2,"342":1,"344":1}}],["get",{"0":{"6":1},"1":{"4":1,"6":1,"7":2,"15":1,"16":1,"29":1,"30":1,"35":1,"59":2,"62":2,"65":1,"66":1,"68":1,"72":2,"74":1,"83":1,"95":1,"105":1,"127":6,"129":1,"136":1,"148":1,"162":2,"164":1,"196":1,"214":2,"224":1,"225":4,"226":1,"227":1,"234":6,"238":2,"309":2,"312":4,"313":4,"314":10,"316":25,"318":4,"319":4,"320":10,"322":1,"323":2,"324":13,"325":1,"332":29,"333":2,"334":1,"335":2,"338":3,"340":4,"341":1,"342":2,"343":3,"344":2,"345":2,"348":1,"359":1,"372":1,"373":1},"2":{"62":1,"68":1,"136":1,"162":1,"325":1}}],["goes",{"1":{"276":1}}],["goals",{"1":{"176":1}}],["good",{"1":{"175":2,"203":1,"217":1,"237":1}}],["google",{"1":{"88":1,"112":1,"113":1,"188":1,"248":1,"252":1,"259":3,"260":10,"277":2,"305":2}}],["going",{"1":{"167":1}}],["governess",{"1":{"160":4,"161":1}}],["golang",{"1":{"127":2}}],["gosec",{"1":{"110":1,"113":2,"117":3,"118":1}}],["got",{"1":{"109":2,"112":3,"116":1,"268":1}}],["goroutines",{"1":{"104":1,"105":1}}],["go",{"1":{"0":1,"14":1,"16":4,"20":1,"21":3,"24":1,"25":2,"35":3,"39":1,"40":3,"43":2,"48":1,"50":1,"52":4,"53":1,"55":2,"58":1,"59":1,"60":1,"61":1,"68":1,"72":2,"74":1,"78":1,"81":1,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":4,"109":1,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":2,"121":1,"125":1,"127":3,"135":1,"138":1,"144":2,"152":2,"153":2,"154":2,"155":2,"156":2,"160":5,"161":2,"162":2,"205":1,"214":3,"216":3,"218":2,"219":2,"220":2,"221":2,"251":1,"253":1,"270":1,"291":1,"294":2,"364":4,"372":1,"373":1,"374":1,"377":6},"2":{"21":1}}],["l",{"1":{"234":2,"238":1,"278":1,"289":1}}],["ln",{"1":{"127":1}}],["lr",{"1":{"101":1,"123":1,"124":1,"173":1}}],["langchain",{"1":{"325":1}}],["language",{"1":{"55":1,"81":1,"105":1,"109":2,"115":1,"120":1,"175":1}}],["languages",{"1":{"55":1,"174":1}}],["languageswitcher",{"1":{"0":2}}],["landscape",{"1":{"237":1}}],["lambda",{"1":{"153":2,"175":1}}],["lambdas",{"1":{"152":1,"153":1}}],["labels`",{"1":{"136":1,"307":2,"322":2}}],["labels=",{"1":{"135":1,"136":1}}],["labels",{"0":{"135":1,"136":1},"1":{"134":1,"135":4,"136":10,"148":1,"241":2,"307":4,"322":4,"333":6},"2":{"136":3}}],["label",{"1":{"134":2,"136":13}}],["larger",{"1":{"231":1,"316":4}}],["large",{"1":{"123":1,"157":2,"167":2,"177":1,"296":1,"297":1}}],["latencies",{"1":{"288":1}}],["latency",{"0":{"288":1},"1":{"75":2,"170":1,"174":1,"175":1,"176":1,"177":3,"199":1,"286":1,"288":2,"294":1}}],["latest",{"1":{"225":2,"227":4,"229":1,"238":1,"277":1,"305":1,"320":2,"326":1}}],["later",{"1":{"20":1,"25":4,"35":1,"40":1,"93":1,"94":1,"268":2,"374":2}}],["lag",{"1":{"62":1,"146":4,"300":1}}],["layer",{"1":{"35":1,"40":1,"158":1,"330":1}}],["lasts",{"1":{"108":1}}],["last",{"1":{"30":1,"47":1,"87":1,"102":1,"123":1,"248":1,"283":3,"322":4,"372":1}}],["llm",{"1":{"24":1,"106":1,"159":1,"160":1,"162":1}}],["llmstxt",{"1":{"11":1,"12":1}}],["llms",{"0":{"12":1},"1":{"11":8,"12":8},"3":{"9":1,"10":1,"11":1,"12":1}}],["ll",{"1":{"7":2,"21":1,"46":4,"47":1,"72":4,"93":1,"94":1,"105":2,"209":2,"211":1,"214":3,"216":1,"218":2,"241":1,"308":1,"309":1,"321":1,"326":1,"331":1,"333":1,"345":1}}],["le",{"1":{"268":2}}],["leveraging",{"1":{"151":1}}],["level`",{"1":{"255":1,"263":3,"274":1}}],["level=error",{"1":{"253":2}}],["level=logging",{"1":{"141":2}}],["levels",{"1":{"248":1,"337":1}}],["level",{"0":{"58":1},"1":{"3":1,"13":1,"26":2,"29":1,"56":1,"58":1,"62":1,"63":1,"65":1,"74":2,"78":4,"84":3,"93":2,"101":1,"109":2,"132":2,"134":1,"142":4,"186":1,"216":1,"248":4,"249":1,"255":1,"263":3,"274":1,"289":6,"301":1,"337":1,"338":1},"2":{"78":1,"142":1}}],["less",{"1":{"117":2,"136":4,"209":1,"216":1},"2":{"136":1}}],["letting",{"1":{"116":1}}],["letter",{"0":{"73":1},"1":{"73":3}}],["let",{"1":{"74":2,"86":1,"104":1,"105":1,"113":1,"117":1,"135":1,"136":1,"142":2,"160":1,"209":1,"216":1,"241":1,"294":1}}],["letsencrypt",{"1":{"241":2}}],["lets",{"1":{"23":1,"25":1,"46":1,"101":1,"105":1,"112":1,"118":1,"121":1,"152":1,"321":1,"327":1,"345":2,"348":2}}],["length`",{"1":{"274":2}}],["length",{"1":{"106":1,"157":1,"160":1,"338":4},"2":{"157":1,"160":1}}],["len",{"1":{"43":1,"160":4}}],["leakage",{"1":{"373":1}}],["leaking",{"1":{"171":1}}],["leans",{"1":{"326":1}}],["leaves",{"1":{"157":2}}],["leaving",{"1":{"22":1,"209":1}}],["least",{"1":{"116":1,"118":2,"176":2,"189":1,"248":1,"295":1,"322":2}}],["leading",{"1":{"123":1,"220":1}}],["lead",{"1":{"58":1,"74":1,"102":1,"231":1,"333":1}}],["learning",{"1":{"33":1,"327":1}}],["learn",{"0":{"33":1},"1":{"8":1,"190":1,"277":1,"325":1}}],["leftbranch",{"1":{"117":7}}],["left",{"1":{"3":1,"32":1,"36":1,"41":1,"89":1,"117":10,"123":1}}],["locks",{"1":{"139":1}}],["lockfile`",{"1":{"127":2}}],["lockfile",{"1":{"127":4}}],["lock",{"1":{"104":1,"105":1,"127":7,"254":2,"294":3},"2":{"104":1,"105":1,"127":4}}],["locations",{"1":{"372":1}}],["location",{"1":{"52":7,"134":1,"198":1,"372":3}}],["localhost",{"1":{"146":4,"162":1,"225":9,"227":4,"234":1,"243":6,"253":1,"254":1,"255":1,"328":1},"2":{"328":1}}],["local",{"1":{"10":1,"15":1,"22":1,"109":1,"127":3,"131":1,"132":2,"133":1,"164":4,"171":2,"206":1,"234":1,"238":1,"243":1,"252":1,"253":2,"255":1,"272":3,"347":1,"348":2,"358":3,"360":1,"361":1,"362":9,"372":1,"374":1}}],["locally",{"0":{"358":1,"360":1,"361":1,"363":1},"1":{"7":3,"19":1,"164":1,"174":1,"203":1,"224":1,"225":1,"227":1,"347":1,"348":2,"358":1,"359":1,"363":2,"365":1,"366":1}}],["low",{"1":{"83":1,"84":7,"174":1,"224":1,"225":1,"288":1},"2":{"84":1}}],["lowerwithfilter",{"1":{"52":2,"53":2},"2":{"53":1}}],["lowertaskoutput",{"1":{"50":2,"53":2}}],["lower",{"1":{"14":1,"50":4,"52":3,"55":1,"83":1,"84":1,"94":1,"95":2,"96":2,"136":1,"307":1},"2":{"14":1,"52":1,"95":1,"96":1}}],["lose",{"1":{"175":1,"279":1}}],["loss",{"1":{"170":1,"279":1,"280":1}}],["lost",{"1":{"62":1,"153":1,"204":1}}],["losing",{"1":{"32":1,"176":1}}],["looking",{"1":{"294":1,"307":1,"320":1,"325":1,"344":1}}],["lookupperms",{"1":{"154":1}}],["lookup",{"1":{"149":1,"328":1}}],["look",{"1":{"46":2,"52":1,"90":1,"146":1,"166":1,"220":1,"294":1,"340":1,"344":1}}],["loop`",{"1":{"101":1}}],["loops",{"1":{"69":1,"86":1,"106":1,"115":1}}],["loop",{"1":{"19":1,"32":3,"86":1,"87":1,"100":1,"101":3,"106":1,"111":1,"118":1,"123":1,"142":2,"146":7,"203":1,"272":2,"325":5},"2":{"325":2}}],["lots",{"1":{"32":1,"33":1,"331":1}}],["loading",{"1":{"327":1,"375":1}}],["loadtest1a",{"1":{"289":2}}],["loadtest",{"1":{"289":8}}],["loadtest`",{"1":{"289":1}}],["loads",{"1":{"278":1,"279":1}}],["loadnewmodel",{"1":{"136":1}}],["loaded",{"1":{"134":1,"136":1}}],["load",{"1":{"17":1,"37":1,"42":1,"74":1,"81":1,"136":3,"189":1,"248":1,"289":4,"294":1,"298":1,"300":1}}],["logins",{"1":{"243":1}}],["login",{"1":{"243":1,"247":1}}],["logic",{"0":{"61":1},"1":{"16":1,"24":1,"30":2,"35":1,"40":1,"57":1,"58":3,"61":1,"63":1,"101":1,"106":1,"114":1,"115":2,"117":1,"118":1,"119":1,"120":1,"121":2,"152":2,"157":1,"216":1,"217":1,"325":1,"329":2,"330":1}}],["logically",{"1":{"188":1,"377":1}}],["logical",{"1":{"7":1}}],["loglevel",{"1":{"142":6,"289":1},"2":{"142":1}}],["log`",{"0":{"142":1},"1":{"140":1,"142":1}}],["logged",{"0":{"191":1},"1":{"193":1}}],["loggers",{"1":{"263":4}}],["logger=root",{"1":{"141":2}}],["logger",{"1":{"105":3,"141":15,"142":28,"253":4,"255":4,"263":8},"2":{"105":1,"141":2,"142":7}}],["loggingworkflow",{"1":{"141":1,"142":1}}],["logging",{"0":{"140":1,"263":1,"274":1},"1":{"0":1,"121":1,"140":2,"141":10,"142":6,"158":1,"188":1,"225":1,"308":1,"333":1},"2":{"141":4,"142":1}}],["logsclient",{"1":{"332":1,"336":1}}],["logs",{"0":{"190":1,"195":1,"313":1,"336":1},"1":{"21":1,"140":2,"141":1,"142":1,"154":1,"166":1,"168":1,"170":1,"190":4,"195":2,"196":3,"278":3,"295":1,"307":2,"313":9,"332":6,"336":6,"337":7},"2":{"332":1}}],["log",{"1":{"3":1,"24":1,"25":2,"30":1,"35":2,"43":2,"53":1,"55":1,"59":1,"68":1,"81":1,"82":1,"87":2,"109":2,"112":2,"116":1,"121":2,"123":4,"135":2,"136":2,"142":10,"144":1,"153":3,"154":1,"155":4,"191":1,"192":1,"193":1,"194":1,"214":1,"216":1,"219":1,"220":1,"221":1,"228":1,"234":1,"255":2,"263":2,"274":4,"289":1,"309":6,"313":8,"333":2,"337":2},"2":{"20":1,"24":1,"25":1,"35":2,"39":1,"43":2,"53":1,"55":1,"59":1,"65":2,"66":2,"68":3,"81":2,"82":3,"105":1,"109":2,"112":2,"116":2,"121":2,"135":1,"136":1,"142":1,"144":1,"149":1,"153":1,"155":1,"161":1,"162":2,"214":1,"216":1,"219":1,"220":1,"221":1}}],["longer",{"1":{"82":1,"146":1,"272":1,"325":1}}],["long",{"1":{"2":1,"3":1,"17":1,"18":1,"19":1,"25":1,"64":1,"68":4,"69":1,"74":1,"75":1,"76":1,"77":1,"85":2,"86":1,"100":1,"107":2,"109":1,"112":1,"115":1,"116":1,"123":1,"131":1,"139":1,"162":1,"171":1,"329":1}}],["lifespanworkflow",{"1":{"328":1}}],["lifespan=lifespan",{"1":{"328":1}}],["lifespan`",{"1":{"323":2,"324":2,"328":2}}],["lifespans",{"0":{"327":1},"1":{"309":2,"327":5}}],["lifespan",{"1":{"307":1,"309":2,"323":2,"324":2,"327":1,"328":20},"2":{"328":2}}],["lifetime",{"1":{"255":1}}],["lifetime`",{"1":{"255":1}}],["lifecycle",{"1":{"82":1,"145":1,"297":1}}],["live",{"1":{"198":1}}],["lived",{"1":{"131":1}}],["living",{"1":{"160":4,"161":1}}],["lightweight",{"1":{"158":1,"310":2,"333":2}}],["listlogsopts",{"1":{"336":1,"337":3}}],["lists",{"1":{"127":2,"334":1,"335":1,"336":1,"339":1,"340":1,"341":1,"342":1}}],["list`",{"1":{"72":4,"311":2,"312":2,"313":2,"316":2,"317":2,"318":2,"319":2,"320":2,"353":1}}],["listed",{"1":{"70":1,"292":1,"322":8,"324":8}}],["listens",{"1":{"214":1,"216":1}}],["listenandserve",{"1":{"162":1},"2":{"162":1}}],["listening",{"1":{"46":1}}],["listen",{"1":{"46":1,"47":1,"214":1,"219":1}}],["listener",{"1":{"21":1,"25":1,"146":8,"272":1,"310":1}}],["list",{"1":{"35":7,"40":7,"43":4,"72":12,"89":2,"104":1,"105":1,"106":1,"116":2,"144":1,"150":1,"160":1,"221":1,"225":2,"250":1,"269":1,"294":1,"307":15,"311":12,"312":10,"313":8,"314":2,"316":8,"317":14,"318":10,"319":6,"320":8,"322":42,"324":22,"328":1,"333":3,"334":1,"335":5,"336":4,"337":1,"339":3,"340":3,"341":2,"342":3,"343":3,"344":4,"353":2,"368":1,"369":1,"370":1,"372":1},"2":{"35":1,"40":3,"72":2}}],["listing",{"0":{"353":1},"1":{"10":1,"35":1,"334":1,"341":1}}],["limitations",{"0":{"300":1},"1":{"301":1}}],["limitvalueexpr",{"1":{"81":1}}],["limit=1000",{"1":{"296":1}}],["limit=10",{"1":{"81":1}}],["limit`",{"1":{"79":1,"82":2,"254":2,"257":12,"261":2,"267":2}}],["limited`",{"1":{"268":1}}],["limited",{"1":{"74":1,"82":1,"106":1,"142":2,"268":5}}],["limiting",{"1":{"74":3,"79":1,"80":3,"81":1,"82":1,"158":3,"204":1,"251":1,"268":2,"301":1}}],["limits=",{"1":{"81":1,"82":1}}],["limits`",{"1":{"81":2,"82":2,"254":1,"307":3,"322":4}}],["limitstrategy",{"1":{"74":1,"78":2}}],["limits",{"0":{"79":1,"80":1,"81":1,"82":1,"315":1},"1":{"17":2,"34":2,"38":2,"74":4,"79":3,"80":5,"81":8,"82":10,"106":2,"129":1,"158":2,"169":3,"170":1,"174":2,"175":1,"176":2,"177":1,"182":1,"254":1,"257":15,"289":1,"291":1,"307":2,"315":3,"332":3,"339":4},"2":{"82":1}}],["limit",{"0":{"257":1},"1":{"17":2,"22":1,"60":3,"67":1,"74":6,"75":1,"76":1,"78":4,"80":1,"81":7,"82":29,"157":1,"169":1,"189":1,"254":2,"257":12,"261":2,"267":2,"294":2,"307":5,"315":10,"316":4,"322":8,"328":1,"335":1,"337":1,"339":2,"342":1,"344":2},"2":{"74":1,"81":1,"82":1}}],["line",{"1":{"206":1,"309":5,"347":1}}],["lines",{"1":{"142":1,"313":8,"337":1}}],["links",{"1":{"279":1}}],["link",{"1":{"11":1,"12":1}}],["linux",{"1":{"7":1,"349":1}}],["library",{"1":{"325":1}}],["libraries",{"1":{"69":2,"149":1,"325":1}}],["lib",{"1":{"7":1,"127":2,"225":3,"227":2,"231":1}}],["lite`",{"1":{"225":1,"226":1}}],["lite",{"0":{"225":1},"1":{"4":1,"164":1,"224":3,"225":22,"226":2}}],["likely",{"1":{"21":1,"58":1,"168":1,"209":3}}],["like",{"1":{"0":1,"3":1,"7":1,"11":1,"25":1,"32":1,"34":1,"38":1,"44":1,"46":7,"50":1,"55":1,"67":1,"72":1,"73":1,"83":1,"94":2,"101":1,"109":1,"113":1,"117":1,"121":2,"131":1,"146":2,"147":1,"158":2,"174":3,"177":1,"207":1,"212":1,"214":2,"216":5,"220":2,"226":2,"248":1,"294":1,"308":1,"318":1,"322":2,"324":2,"325":1,"326":1,"327":1,"329":2,"330":1,"331":2,"333":3,"342":1,"345":1,"367":1}}],["licensed",{"1":{"0":1,"4":1,"187":1}}],["oauth",{"1":{"374":1}}],["oom",{"1":{"170":1,"210":1}}],["omit",{"1":{"153":1}}],["omit=dev",{"1":{"127":1}}],["otlp",{"1":{"144":7,"372":2}}],["otel",{"1":{"144":2,"264":6,"266":4,"372":2}}],["others",{"1":{"83":1}}],["otherwise",{"1":{"11":1,"55":1,"58":1,"83":1,"102":1,"146":1,"176":1,"204":1,"294":1,"309":3,"333":2,"343":1,"358":1}}],["otheragentstablabel",{"1":{"9":1}}],["occupied",{"1":{"169":1}}],["occupies",{"1":{"137":1}}],["occurred",{"1":{"191":1,"309":3}}],["occur",{"1":{"167":1}}],["occurs",{"1":{"49":1,"92":1}}],["ocr",{"1":{"106":1}}],["oldest",{"1":{"77":1,"129":4}}],["older",{"0":{"285":1},"1":{"76":1,"194":1,"279":1,"281":1,"285":1}}],["old",{"1":{"76":1,"136":2}}],["olap",{"0":{"267":1},"1":{"62":1,"266":4,"267":2,"328":1}}],["o",{"1":{"69":1,"127":1,"209":1,"234":4,"238":2,"345":2}}],["osx",{"1":{"373":1}}],["oss",{"0":{"179":1},"1":{"179":1,"180":1,"182":1}}],["os",{"1":{"62":1,"170":1},"2":{"62":1}}],["ok`",{"1":{"55":1}}],["ok=true",{"1":{"55":1}}],["ok",{"1":{"55":4},"2":{"55":1}}],["outlook",{"1":{"305":1}}],["outlining",{"1":{"29":1}}],["out`",{"1":{"268":1}}],["outs",{"1":{"123":1}}],["outside",{"1":{"111":1}}],["output`",{"1":{"153":1,"309":2}}],["outputs=",{"1":{"323":2,"324":2}}],["outputs`",{"1":{"323":2,"324":2}}],["outputs",{"0":{"96":1},"1":{"15":1,"88":1,"90":1,"92":2,"96":1,"115":2,"121":1,"152":2,"157":1,"174":1,"205":1,"323":2,"324":2,"326":2,"331":1}}],["output",{"0":{"15":1},"1":{"14":1,"15":2,"16":1,"24":1,"25":2,"55":5,"90":1,"94":1,"95":7,"96":7,"104":1,"105":6,"117":24,"118":2,"153":9,"154":2,"155":3,"157":8,"158":1,"166":1,"167":1,"168":1,"241":2,"308":1,"309":5,"323":4,"324":4,"326":3,"330":2,"331":4,"333":4,"345":2,"372":1},"2":{"25":2,"95":1,"96":1,"105":1,"117":3,"326":1},"3":{"13":1,"14":1,"15":1,"16":1,"17":1,"18":1}}],["offset",{"1":{"311":2,"316":2,"317":2,"320":2,"322":2,"324":2,"335":1,"342":1,"344":1}}],["offline",{"1":{"167":1}}],["offloading",{"1":{"157":1}}],["offers",{"1":{"80":1,"249":1}}],["offering",{"1":{"5":1}}],["office365",{"1":{"305":1}}],["office",{"1":{"38":1,"147":1,"177":1,"305":1}}],["often",{"1":{"29":1,"35":1,"40":1,"43":1,"70":1,"114":1,"134":1,"171":2,"177":2}}],["oblonskys",{"1":{"160":4,"161":1}}],["obvious",{"1":{"32":2}}],["objects",{"1":{"55":1,"136":1,"146":12,"152":1,"153":1,"249":1,"317":1,"322":8,"323":2,"324":10,"333":3,"341":1}}],["object",{"0":{"16":1},"1":{"14":1,"15":1,"24":4,"25":5,"35":1,"40":2,"43":3,"46":1,"57":1,"58":1,"59":1,"65":1,"93":3,"94":4,"96":1,"104":1,"109":1,"112":1,"129":1,"136":1,"142":1,"154":1,"158":2,"214":12,"220":1,"243":3,"307":8,"309":1,"322":11,"324":8,"333":2,"334":6,"337":1,"341":9,"344":6,"373":1},"2":{"214":3}}],["observable",{"1":{"92":1}}],["observability",{"1":{"3":1,"106":1,"181":1,"348":1,"368":1}}],["observed",{"1":{"290":1}}],["observe",{"1":{"2":1,"62":1}}],["owner",{"1":{"247":1}}],["own",{"0":{"289":1},"1":{"7":1,"13":1,"18":1,"90":1,"101":1,"104":2,"106":1,"142":1,"144":1,"160":4,"161":1,"164":1,"178":1,"180":1,"184":1,"200":1,"222":1,"286":5,"289":1,"294":1}}],["orphaned",{"1":{"332":1}}],["oregon",{"1":{"167":1,"198":2}}],["origin",{"1":{"151":1}}],["originating",{"1":{"150":1}}],["originally",{"1":{"66":1,"123":1}}],["original",{"1":{"46":1,"62":1,"72":1,"88":1,"95":1,"96":1,"109":2,"123":1,"132":2,"153":4,"154":1}}],["orcondition",{"1":{"113":1,"118":1},"2":{"113":1,"118":1}}],["orchestrate",{"1":{"100":1}}],["orchestrator",{"0":{"164":1},"1":{"85":1,"101":1,"164":1,"170":1}}],["orchestration",{"1":{"3":1,"100":1,"175":1,"222":1,"223":1}}],["ordinarily",{"1":{"61":1}}],["ordinary",{"1":{"29":1}}],["orderbydirection",{"1":{"337":1}}],["ordered",{"1":{"196":1}}],["ordering",{"1":{"75":1}}],["order",{"1":{"17":1,"46":1,"75":2,"92":1,"102":3,"115":1,"154":4,"174":1,"247":1,"311":4,"317":4,"337":1}}],["orgroup",{"1":{"322":6}}],["orgroup`",{"1":{"310":1}}],["orgid",{"1":{"154":3}}],["organized",{"1":{"91":1,"250":1}}],["organizations",{"1":{"196":1}}],["organization",{"1":{"7":1,"46":1,"149":1,"151":1,"164":3,"190":1,"195":2,"196":2,"216":1,"374":2}}],["org",{"1":{"11":1,"12":1,"38":1,"118":1,"127":1,"154":1,"277":1,"293":1,"303":1,"325":1},"2":{"127":1}}],["opposite",{"1":{"209":1}}],["opt",{"1":{"158":1}}],["optimal",{"1":{"294":1}}],["optimize",{"1":{"139":1}}],["optimized",{"1":{"5":1,"177":1,"209":2}}],["option",{"1":{"22":1,"74":1,"144":3,"252":5,"279":2,"307":1}}],["options",{"0":{"224":1,"250":1,"269":1,"299":1},"1":{"7":1,"21":2,"46":1,"84":3,"93":2,"94":1,"105":1,"133":2,"142":1,"144":3,"149":1,"177":1,"209":2,"225":2,"248":1,"250":2,"269":3,"279":2,"316":4,"319":2,"322":7,"324":7,"332":9,"333":6,"335":2,"336":1,"337":1,"338":1,"339":2,"340":3,"342":3,"344":1,"345":8,"346":3}}],["optionally",{"1":{"55":1,"153":1,"154":1,"240":1,"346":1}}],["optional",{"1":{"7":1,"14":1,"17":1,"38":1,"46":4,"55":2,"88":1,"105":1,"136":1,"142":1,"173":1,"174":2,"182":1,"191":1,"223":1,"226":1,"241":1,"253":1,"268":2,"304":1,"307":2,"318":4,"320":8,"333":3,"338":1,"345":7,"346":3,"372":2}}],["opaque",{"1":{"130":1}}],["op",{"1":{"61":1}}],["operated",{"1":{"180":1}}],["operate",{"1":{"175":1,"180":1,"182":1,"189":1,"206":1}}],["operates",{"1":{"158":1,"173":1,"182":1}}],["operating",{"1":{"165":1,"307":1}}],["operational",{"0":{"182":1},"1":{"175":1,"182":1,"204":1,"205":1}}],["operation",{"1":{"58":1,"61":2,"66":1,"120":1,"191":1,"322":1,"324":1,"325":1,"333":1,"335":2,"336":1,"337":1,"339":2,"340":3,"342":3,"344":1}}],["operations",{"0":{"266":1},"1":{"7":1,"9":1,"31":1,"35":2,"62":3,"69":4,"71":1,"72":2,"87":1,"102":3,"107":3,"123":1,"158":2,"255":1,"265":4,"266":7,"298":1,"300":1,"316":4,"322":1}}],["operator",{"1":{"118":1,"136":1}}],["operators",{"1":{"113":1,"117":1}}],["openid",{"1":{"260":1}}],["opened",{"1":{"216":9}}],["opened`",{"1":{"216":1}}],["opentelemetry",{"0":{"143":1,"264":1},"1":{"143":3,"144":10,"264":5},"2":{"144":2}}],["openssl",{"1":{"238":1}}],["opens",{"1":{"22":1}}],["open",{"1":{"0":1,"4":1,"5":1,"74":1,"179":1,"187":1,"190":1,"199":1,"216":1,"305":1}}],["onsuccess",{"1":{"345":2}}],["onfailureworkflow",{"1":{"121":2}}],["onfailure",{"1":{"121":5,"345":2},"2":{"121":2}}],["oncron",{"1":{"39":1}}],["once",{"1":{"8":1,"21":1,"23":1,"30":2,"32":1,"46":1,"47":1,"51":1,"70":1,"104":1,"118":1,"124":1,"146":1,"150":1,"176":3,"214":1,"216":1,"218":1,"225":1,"227":1,"268":1,"278":1,"284":1,"307":3,"328":1,"330":1,"354":1,"365":1}}],["online",{"1":{"37":1,"72":1,"209":1,"371":1}}],["only",{"1":{"3":1,"31":1,"33":1,"35":1,"46":3,"51":1,"55":2,"62":2,"63":1,"72":1,"74":2,"76":1,"81":1,"83":1,"88":1,"89":2,"92":1,"93":1,"100":2,"102":1,"106":1,"109":1,"112":1,"113":1,"117":2,"121":2,"124":4,"129":1,"131":1,"132":1,"147":1,"148":1,"153":1,"157":1,"158":2,"174":1,"175":2,"177":1,"189":1,"191":2,"209":1,"214":1,"243":1,"268":1,"274":2,"300":1,"301":2,"307":3,"316":2,"322":6,"324":2,"327":1,"328":1,"337":2,"341":1,"345":2,"374":2,"376":6}}],["onevents",{"1":{"50":1,"52":1}}],["ones",{"1":{"32":2,"83":1,"84":1,"216":1,"307":1}}],["one",{"0":{"164":1},"1":{"18":1,"20":1,"21":1,"30":1,"31":1,"34":1,"46":2,"49":1,"52":1,"65":1,"75":2,"76":1,"77":1,"78":1,"81":3,"83":4,"95":3,"96":3,"100":2,"101":4,"104":1,"106":2,"115":1,"116":1,"117":8,"118":2,"121":1,"124":1,"136":2,"144":2,"164":1,"209":2,"216":3,"217":1,"218":1,"252":1,"294":1,"295":1,"309":1,"317":1,"322":3,"323":2,"324":2,"330":2,"345":1,"353":1,"355":1}}],["onhatchet",{"1":{"0":1,"4":1,"7":1,"46":1,"129":1,"130":3,"148":1,"175":1,"198":2,"201":1,"268":2},"2":{"129":1,"130":1,"148":1,"268":1}}],["oversized",{"1":{"157":1}}],["overall",{"1":{"98":1,"117":1,"288":1}}],["overwrite=false",{"1":{"227":1}}],["overwritten",{"1":{"52":1}}],["overwhelm",{"1":{"74":1}}],["overwhelming",{"1":{"17":1,"57":1}}],["overriding",{"1":{"46":1,"365":1}}],["overrides",{"1":{"338":1,"345":7,"355":1}}],["override",{"1":{"39":1,"84":2,"118":1,"144":1,"243":2,"278":1,"365":1}}],["overhead",{"1":{"43":1,"158":3}}],["overlapping",{"1":{"37":1,"42":1}}],["overload",{"1":{"74":1}}],["overloaded",{"1":{"29":1}}],["overloading",{"1":{"3":1,"74":1,"79":1}}],["overview",{"0":{"173":1,"211":1,"240":1,"276":1,"281":1},"1":{"268":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["rds",{"1":{"248":1,"277":1,"286":1,"290":1}}],["r",{"1":{"127":1,"162":1,"307":12,"309":3,"322":12,"323":1,"324":10}}],["rfc4616",{"1":{"303":1}}],["rfc3339",{"1":{"39":2,"40":1,"109":1,"112":1,"116":1},"2":{"39":1,"40":1,"109":1,"112":1,"116":1}}],["rf",{"1":{"127":2,"238":1}}],["rm",{"1":{"127":2,"238":1}}],["rr",{"1":{"74":3},"2":{"74":1}}],["risk",{"1":{"62":1,"279":1}}],["risks",{"1":{"55":1}}],["rightbranch",{"1":{"117":7}}],["right",{"1":{"10":1,"36":1,"41":1,"117":10,"164":1,"205":1,"322":1}}],["raising",{"1":{"322":2,"324":2}}],["raises",{"1":{"61":1,"309":1,"310":1,"317":2,"322":1,"323":2}}],["raise",{"1":{"58":2,"59":2,"60":2,"61":3,"68":2,"121":4,"309":1}}],["ram",{"1":{"292":1}}],["raw",{"1":{"259":1}}],["rabbitmqctl",{"1":{"225":1,"227":1}}],["rabbitmq",{"1":{"173":1,"174":1,"177":1,"223":1,"224":1,"225":17,"226":2,"227":16,"248":2,"261":4,"295":1},"2":{"225":3,"227":3}}],["rake",{"1":{"127":2}}],["ran",{"1":{"204":1,"220":4,"322":2,"324":2}}],["randstring",{"1":{"238":3}}],["randint",{"1":{"94":1,"95":1,"96":1,"110":1,"113":2,"117":3,"118":1},"2":{"94":1,"95":1,"96":1,"110":1,"113":1,"117":1,"118":1}}],["rand",{"1":{"94":2,"110":2,"113":4,"117":6,"118":2,"238":1},"2":{"74":1,"78":1,"110":1,"113":1,"117":1,"118":1}}],["randombytes",{"1":{"157":2}}],["randomuuid",{"1":{"153":2,"157":2},"2":{"153":1}}],["randomnumber",{"1":{"110":2,"113":4,"117":20,"118":2},"2":{"117":5}}],["randomsum",{"1":{"95":2,"96":2,"117":7}}],["random",{"1":{"74":1,"78":1,"84":1,"94":3,"95":5,"96":5,"110":3,"113":6,"117":27,"118":3,"238":2,"374":1},"2":{"84":1,"94":1,"95":1,"96":1,"110":2,"113":2,"117":3,"118":2}}],["range",{"1":{"38":1,"43":1,"68":1,"104":1,"105":2,"121":1,"141":1,"142":1,"160":2,"161":1,"162":1,"195":1,"196":2,"294":2,"326":1}}],["race",{"1":{"74":3,"160":2}}],["ratio",{"1":{"264":1,"268":1}}],["ratio`",{"1":{"264":1}}],["rather",{"1":{"58":1,"75":1,"115":1,"120":1,"255":1}}],["rate=0",{"1":{"301":1}}],["rates",{"1":{"268":1,"287":1}}],["rate`",{"1":{"258":1,"265":1}}],["ratelimitduration",{"1":{"81":1,"82":2},"2":{"81":1,"82":1}}],["ratelimitsclient",{"1":{"332":1,"339":2}}],["ratelimits",{"0":{"339":1},"1":{"81":1,"82":3,"332":3},"2":{"82":2,"332":1}}],["ratelimitinput",{"1":{"81":1,"82":1}}],["ratelimit",{"1":{"81":3,"82":3,"307":2,"322":4},"2":{"81":2,"82":2}}],["rate",{"0":{"79":1,"80":1,"81":1,"82":1,"315":1},"1":{"17":2,"34":1,"38":1,"74":1,"78":1,"79":5,"80":7,"81":14,"82":36,"106":3,"158":5,"169":3,"174":1,"176":2,"182":1,"204":1,"251":1,"254":4,"258":1,"265":1,"268":57,"289":1,"307":4,"315":13,"322":4,"332":3,"339":6},"2":{"81":1,"82":2}}],["rb",{"1":{"21":1,"127":1},"2":{"21":1,"127":1}}],["rollback",{"1":{"277":1,"279":3,"280":1}}],["rolling",{"1":{"199":1}}],["role",{"1":{"247":1}}],["roles",{"1":{"81":1}}],["rotate",{"1":{"189":1}}],["robin",{"0":{"75":1},"1":{"74":5,"75":3,"78":6,"129":1},"2":{"74":1,"78":1}}],["robin`",{"1":{"74":1,"75":4,"307":3,"322":4}}],["routing",{"1":{"154":1,"216":1}}],["route",{"1":{"20":1,"46":2}}],["round",{"0":{"75":1},"1":{"74":6,"75":7,"78":6,"129":1,"307":3,"322":4},"2":{"74":1,"78":1}}],["roundtrips",{"1":{"43":1}}],["rows=len",{"1":{"328":1}}],["rows",{"1":{"36":2,"72":8,"328":5},"2":{"72":4}}],["row",{"1":{"36":1,"328":4}}],["root",{"1":{"10":1,"73":1,"101":3,"127":1,"141":6,"142":1,"225":2,"227":3,"262":8,"265":2,"273":2,"376":4}}],["redeploy",{"1":{"278":1,"279":1}}],["redeliver",{"1":{"216":1}}],["redirect",{"1":{"241":1}}],["redundancy",{"1":{"202":1}}],["reduces",{"1":{"294":1}}],["reduce",{"1":{"75":1,"104":1,"105":1,"209":1,"294":2},"2":{"104":1,"105":1}}],["reducing",{"1":{"5":1,"167":1,"209":1,"294":1}}],["rewriting",{"1":{"183":1}}],["revert",{"1":{"279":1,"281":1}}],["reverse",{"1":{"95":2,"96":1,"234":1,"240":1,"243":1,"279":1}}],["revoked",{"1":{"192":1}}],["review",{"1":{"115":1,"169":1}}],["reuse",{"1":{"189":1}}],["reused",{"1":{"107":1}}],["reusable",{"1":{"107":1}}],["renamed",{"1":{"169":1}}],["rendered",{"1":{"81":1}}],["renew`",{"1":{"50":1}}],["regardless",{"1":{"333":2}}],["regions",{"0":{"198":1},"1":{"197":1,"198":1,"199":1}}],["region",{"0":{"197":1,"199":1},"1":{"157":2,"167":1,"177":1,"197":1,"198":1,"199":1,"248":1,"290":1},"2":{"157":1}}],["registration",{"1":{"154":2},"3":{"19":1,"20":1,"21":1,"22":1}}],["registering",{"1":{"377":2}}],["registerinstrumentations",{"1":{"144":2}}],["registerworkflow",{"1":{"136":2,"294":1},"2":{"136":1,"294":1}}],["register`",{"1":{"126":1}}],["registered",{"1":{"18":1,"23":1,"34":1,"38":1,"48":2,"49":2,"123":1,"133":1,"135":1,"152":1,"158":1,"166":1,"169":2,"333":2,"371":1}}],["registers",{"1":{"18":1,"20":1,"89":1}}],["register",{"1":{"7":1,"19":1,"20":2,"47":1,"50":1,"52":2,"144":2,"238":1,"307":1}}],["regular",{"1":{"71":1,"89":3,"90":2,"104":1,"120":1,"123":4,"187":1,"308":1,"333":1}}],["regularly",{"1":{"69":1,"188":1}}],["rerun",{"1":{"61":1}}],["remediated",{"1":{"187":1,"188":1}}],["remember",{"1":{"55":1,"63":1,"71":1,"144":1}}],["remained",{"1":{"288":1}}],["remaining",{"1":{"144":1,"333":1}}],["remain",{"1":{"132":1,"134":1,"136":1,"137":1,"169":1}}],["remains",{"1":{"74":1,"117":1,"136":1,"248":1,"288":1}}],["reminding",{"1":{"55":1}}],["reminder",{"1":{"27":1,"34":1}}],["remove`",{"1":{"357":1}}],["removed",{"1":{"192":1,"194":1}}],["remove",{"1":{"52":1,"357":2}}],["requeue",{"1":{"261":2}}],["requeued",{"1":{"112":1}}],["requested",{"1":{"311":2,"317":2}}],["request`",{"1":{"216":1}}],["requestid",{"1":{"153":3}}],["request",{"0":{"199":1},"1":{"46":2,"54":1,"62":1,"68":2,"72":2,"146":2,"152":1,"154":1,"157":1,"158":1,"162":1,"191":1,"216":15,"218":1,"219":2,"220":1,"221":2,"294":3,"305":3,"338":2,"342":1,"374":1},"2":{"162":1,"216":3}}],["requests",{"1":{"44":1,"45":1,"46":3,"62":2,"67":1,"69":1,"79":1,"188":1,"191":3,"212":1,"214":1,"215":1,"218":1,"289":1,"318":1,"332":1,"342":1,"374":1}}],["requiring",{"1":{"57":1,"63":1,"298":1}}],["requires",{"1":{"46":3,"55":1,"61":1,"81":1,"129":1,"137":1,"139":1,"148":1,"177":1,"225":1,"227":1,"238":1,"241":1,"247":1,"255":1,"268":1}}],["requiredcontext",{"1":{"154":1}}],["required=true",{"1":{"136":1}}],["required",{"0":{"252":1},"1":{"40":1,"46":1,"52":1,"124":1,"136":3,"142":1,"144":1,"154":3,"156":1,"173":1,"238":1,"245":1,"252":4,"254":1,"258":1,"260":4,"268":2,"278":1,"307":2,"309":6,"310":1,"311":14,"312":14,"313":2,"315":4,"316":21,"317":17,"318":14,"319":6,"320":6,"322":14,"324":17,"374":6,"376":1}}],["require",{"1":{"9":1,"17":1,"20":1,"32":1,"46":3,"73":1,"75":1,"124":1,"126":1,"142":2,"144":1,"156":1,"230":1},"2":{"20":1,"156":1}}],["requirements",{"1":{"4":1,"127":2,"134":1,"175":1,"209":1},"2":{"127":1}}],["rejecting",{"1":{"167":1}}],["rejected",{"1":{"158":1}}],["reject",{"1":{"35":1,"317":1}}],["ref`",{"1":{"316":2,"324":2}}],["reflect",{"1":{"134":1}}],["refreshed",{"1":{"66":1}}],["refreshes",{"1":{"66":1,"333":1}}],["refresher",{"1":{"29":1}}],["refreshtimeout",{"1":{"66":3,"333":1},"2":{"66":1}}],["refreshtimeoutworkflow",{"1":{"65":1,"66":2},"2":{"66":1}}],["refresh",{"1":{"65":1,"66":15,"309":2},"2":{"66":2}}],["refreshing",{"0":{"66":1},"1":{"16":2,"66":2,"309":4}}],["ref",{"1":{"25":9,"84":1,"133":4,"161":7,"162":4,"345":1},"2":{"25":3,"133":2,"161":2,"162":2}}],["refers",{"1":{"255":1,"295":1,"314":2}}],["referencing",{"1":{"54":1,"81":1}}],["references",{"1":{"10":1,"226":1,"322":2,"324":2,"333":1}}],["reference",{"0":{"305":1,"362":1},"1":{"9":1,"10":3,"11":1,"16":8,"35":1,"40":1,"54":1,"206":3,"286":1,"289":1,"306":2,"316":4,"322":6,"324":10,"332":2,"333":1}}],["referral`",{"1":{"49":1}}],["refer",{"1":{"15":1,"188":1,"225":2,"246":1,"277":1,"297":1}}],["rely",{"1":{"364":1}}],["relying",{"1":{"58":1}}],["relies",{"1":{"358":1}}],["reliable",{"1":{"127":1,"277":1}}],["reliability",{"0":{"176":1},"1":{"56":1,"63":1,"172":1,"184":1}}],["reload`",{"1":{"365":2}}],["reloading",{"1":{"348":2,"365":1,"373":1}}],["reload",{"1":{"206":1,"364":3,"365":3}}],["reloads",{"1":{"21":1}}],["releasing",{"1":{"137":1,"138":1,"294":2}}],["releasesemaphorebuffer",{"1":{"294":4}}],["releases",{"1":{"152":1,"215":1,"284":1,"294":1,"333":1,"347":1}}],["releaseslot",{"1":{"138":1,"333":1},"2":{"138":1}}],["releaseerr",{"1":{"138":3}}],["released",{"1":{"123":1,"333":1}}],["release",{"0":{"137":1,"138":1},"1":{"120":1,"123":1,"137":2,"138":11,"139":2,"278":1,"284":1,"309":2},"2":{"138":2}}],["relevant",{"1":{"76":1,"279":2,"287":1}}],["relatively",{"1":{"83":1,"288":1}}],["relative",{"1":{"17":1}}],["related",{"1":{"16":1,"191":1,"193":1,"201":1,"266":1}}],["rel=",{"1":{"11":1,"12":1}}],["recv",{"1":{"274":1}}],["recursive",{"1":{"106":2}}],["recurring",{"1":{"38":1,"322":1,"345":1}}],["recently",{"1":{"169":1}}],["recent",{"1":{"75":1,"76":2,"196":1,"205":1,"370":1}}],["receiving",{"1":{"19":1}}],["received",{"1":{"31":1,"48":1,"49":2,"52":1,"89":1,"162":1}}],["receive",{"1":{"15":1,"35":1,"40":1,"46":1,"94":1,"112":1,"118":1,"153":2,"154":2,"155":1,"174":1,"210":1,"212":1,"274":1}}],["receives",{"1":{"15":1,"16":1,"19":1,"69":1,"88":1,"153":3,"154":4,"155":1,"158":1}}],["reconfigure",{"1":{"209":1}}],["reconnect",{"1":{"176":1}}],["reconnection",{"1":{"170":1}}],["reconciliation",{"1":{"26":1}}],["recorded",{"1":{"192":1}}],["record<string",{"1":{"157":1}}],["records",{"1":{"123":1,"173":1,"174":1,"190":1}}],["record",{"1":{"73":1,"87":1,"333":2,"338":1}}],["recovery",{"1":{"154":1,"277":2}}],["recover",{"1":{"60":1,"63":1,"85":1}}],["recommend",{"1":{"162":1,"209":1,"240":1,"241":1,"276":1,"296":1,"297":1,"325":1,"326":1,"327":1}}],["recommends",{"1":{"55":1,"325":1}}],["recommended",{"1":{"7":1,"21":1,"55":2,"82":1,"136":1,"141":1,"247":1,"253":2,"279":1,"289":2,"294":1,"309":1,"331":1,"332":1,"349":2}}],["rethrowifcancelled",{"1":{"333":2},"2":{"333":1}}],["retained",{"1":{"194":1}}],["retention`",{"1":{"254":2}}],["retention",{"0":{"194":1,"291":1},"1":{"183":1,"254":2,"257":2,"291":3}}],["retrieval",{"1":{"308":1,"333":1}}],["retrieved",{"1":{"196":1}}],["retrieves",{"1":{"40":1,"317":8,"333":2,"334":1,"341":1}}],["retrieve",{"1":{"25":2,"55":1,"309":4,"311":10,"312":2,"314":8,"316":8,"317":2,"318":2,"319":2,"320":6}}],["retried",{"1":{"56":1,"57":1,"61":1,"62":5,"64":1,"268":7,"309":1,"323":2,"324":2,"333":1}}],["retries=1",{"1":{"61":1}}],["retries=10",{"1":{"60":1}}],["retries=3",{"1":{"58":1,"59":1}}],["retrieswithcountresult",{"1":{"59":2}}],["retrieswithcountinput",{"1":{"59":1}}],["retrieswithcount",{"1":{"59":2}}],["retriesresult",{"1":{"58":1}}],["retriesinput",{"1":{"58":1}}],["retries",{"0":{"58":1,"72":1},"1":{"0":2,"2":1,"14":1,"17":1,"56":2,"57":2,"58":10,"59":3,"60":10,"61":5,"62":16,"63":2,"70":1,"72":2,"102":1,"132":1,"133":1,"174":1,"175":1,"181":1,"182":1,"204":1,"225":3,"227":2,"231":1,"268":2,"307":4,"322":8}}],["retryable",{"1":{"61":5},"2":{"61":1}}],["retrycount",{"1":{"59":7,"333":1},"2":{"59":1}}],["retrying",{"1":{"58":2,"62":1,"72":2,"73":1}}],["retry",{"0":{"56":1,"59":1,"61":1,"62":1},"1":{"16":1,"17":3,"30":1,"34":1,"38":1,"56":1,"57":4,"58":4,"59":7,"60":3,"61":14,"62":13,"63":3,"64":1,"72":2,"92":2,"102":1,"104":1,"105":1,"120":3,"145":2,"158":2,"174":1,"176":2,"181":1,"254":2,"268":1,"307":2,"309":3,"322":5,"333":1,"345":1},"2":{"58":1,"59":2,"60":1}}],["returning",{"1":{"69":1,"154":2,"322":2,"324":1,"335":1}}],["returns",{"1":{"17":1,"24":5,"25":4,"57":1,"61":1,"113":1,"117":1,"129":1,"146":1,"158":2,"272":1,"279":1,"307":4,"309":21,"310":1,"311":8,"312":10,"313":2,"314":12,"315":2,"316":21,"317":13,"318":10,"319":6,"320":8,"322":28,"323":2,"324":29,"332":24,"333":40,"334":4,"335":5,"336":1,"338":6,"339":2,"340":10,"341":7,"342":5,"343":5,"344":4,"345":11,"346":3}}],["returned",{"1":{"15":1,"25":1,"35":1,"37":1,"93":1,"148":1,"152":1,"300":1,"309":1,"314":2,"320":2,"321":1,"322":2,"324":2,"345":3}}],["return",{"1":{"14":2,"24":2,"25":1,"39":2,"43":5,"55":1,"59":2,"60":1,"65":2,"66":2,"68":3,"74":1,"78":1,"89":2,"93":1,"94":3,"95":3,"96":4,"104":5,"105":12,"109":1,"110":2,"112":1,"113":4,"116":2,"117":8,"118":2,"120":2,"121":2,"132":3,"133":3,"135":1,"136":3,"138":1,"141":1,"142":3,"146":1,"153":16,"154":4,"155":5,"157":11,"158":2,"161":1,"162":3,"196":1,"214":2,"216":2,"219":2,"220":2,"221":2,"294":4,"309":5,"311":2,"312":2,"313":2,"316":2,"317":2,"318":2,"320":2,"322":1,"325":4,"326":6,"328":1,"330":5,"331":2,"335":1,"337":3,"345":5}}],["resembles",{"1":{"364":1}}],["reset",{"1":{"89":1,"340":2}}],["residency",{"1":{"180":1}}],["resilient",{"1":{"60":1,"71":1,"86":1,"176":2}}],["resilience",{"1":{"56":1,"63":1}}],["rescue",{"1":{"104":1,"105":1,"120":1}}],["reschedules",{"1":{"341":2}}],["reschedule",{"1":{"35":4,"36":3,"317":10}}],["rescheduling",{"1":{"35":2,"36":1,"317":1}}],["resumed",{"1":{"123":2}}],["resumes",{"1":{"87":3,"112":2,"123":1}}],["resume",{"1":{"85":1,"86":1,"87":1,"102":1,"112":1,"116":1,"123":1,"340":1}}],["resulting",{"1":{"83":1}}],["resultoutput",{"1":{"25":3},"2":{"25":1}}],["result2",{"1":{"25":2}}],["result`",{"1":{"25":1,"316":4,"324":4}}],["result=false",{"1":{"25":2,"84":2,"133":1,"161":1,"162":1}}],["result",{"1":{"14":1,"15":1,"18":1,"24":10,"25":25,"46":2,"49":1,"55":4,"58":1,"62":1,"87":1,"88":1,"89":3,"95":1,"97":6,"101":1,"104":6,"105":18,"106":1,"109":2,"115":5,"116":3,"120":3,"132":2,"133":6,"135":2,"136":4,"154":1,"158":2,"161":1,"162":1,"279":1,"280":1,"289":1,"300":1,"312":2,"316":8,"318":2,"322":5,"323":2,"324":14,"326":5,"332":4,"333":4,"335":1,"345":2},"2":{"25":5,"55":1,"89":1,"95":1,"104":1,"105":2,"116":1,"133":3}}],["results`",{"1":{"101":1}}],["results",{"1":{"13":1,"18":1,"19":1,"24":1,"32":2,"43":9,"85":2,"86":1,"87":2,"90":2,"92":1,"100":1,"101":2,"102":1,"103":1,"104":5,"105":8,"106":1,"115":2,"120":1,"122":1,"123":1,"124":3,"135":5,"136":6,"152":1,"155":1,"159":1,"160":1,"161":1,"162":1,"174":1,"196":3,"294":3,"301":2,"310":1,"317":4,"322":6,"324":6,"326":2,"329":1,"330":1,"333":4,"345":2},"2":{"135":1,"136":1}}],["responsibilities",{"0":{"182":1},"1":{"182":1}}],["responsible",{"1":{"157":1,"174":1,"202":1}}],["responsive",{"1":{"71":1,"74":1}}],["responsewriter",{"1":{"162":1},"2":{"162":1}}],["responses",{"1":{"62":2,"104":1,"115":1}}],["response",{"1":{"49":2,"61":1,"62":1,"68":2,"85":1,"129":2,"130":1,"148":2,"162":2,"180":1,"220":4,"316":2,"317":3}}],["respond",{"1":{"69":1,"123":1,"212":1}}],["respective",{"1":{"126":1}}],["respectively",{"1":{"83":1,"117":1}}],["respects",{"1":{"88":1,"358":1}}],["respected",{"1":{"39":1,"377":1}}],["respect",{"1":{"37":1,"42":1,"109":1}}],["restoretask",{"1":{"340":1}}],["restore",{"1":{"277":2,"279":4,"340":1}}],["restored",{"1":{"157":2}}],["restoring",{"1":{"276":1,"277":1,"279":1}}],["restricted",{"1":{"260":2}}],["restrict",{"1":{"189":1}}],["restartpolicy",{"1":{"289":1}}],["restarted",{"1":{"169":1}}],["restarts",{"1":{"109":1,"176":1,"204":1,"333":2}}],["restart",{"1":{"88":1,"89":1,"109":2,"176":1,"225":2,"227":3,"231":1,"268":1,"285":1}}],["rest",{"1":{"40":1,"62":5,"72":3,"101":1,"148":1,"188":2,"223":1,"278":2,"285":2,"297":1,"301":1},"2":{"35":1,"40":1,"52":1}}],["res",{"1":{"24":2,"43":3,"48":1,"109":4,"112":12,"113":1,"116":5,"157":2},"2":{"24":1,"157":1}}],["resolution",{"1":{"176":1}}],["resolvedat",{"1":{"154":1}}],["resolved",{"1":{"153":1,"154":3,"155":1,"156":1}}],["resolve",{"1":{"58":1,"61":1,"340":1}}],["resolves",{"1":{"24":1,"332":3,"333":5,"334":4,"335":5,"336":1,"339":2,"340":6,"341":7,"342":5,"343":5,"344":3,"345":5,"358":1}}],["resources",{"1":{"64":1,"67":1,"69":1,"74":4,"75":2,"79":1,"80":1,"81":1,"82":3,"86":1,"90":1,"92":1,"104":1,"108":1,"120":1,"123":3,"124":3,"139":2,"174":1,"185":1,"209":2,"210":1,"289":1,"306":1,"325":2,"332":1}}],["resource",{"0":{"122":1},"1":{"11":1,"12":1,"22":1,"74":2,"75":1,"76":1,"78":1,"79":2,"81":1,"107":1,"122":1,"134":1,"135":1,"136":1,"137":4,"138":11,"139":1,"170":2,"191":4,"192":1,"209":5,"210":1,"237":1,"268":1}}],["repeating",{"1":{"329":1}}],["repeatedly",{"1":{"170":1}}],["repeated",{"1":{"58":1}}],["repulling",{"0":{"229":1}}],["reprocess",{"1":{"72":1}}],["representing",{"1":{"322":14,"324":15}}],["represents",{"1":{"164":1}}],["represented",{"1":{"100":1,"237":1}}],["represent",{"1":{"38":1,"78":2,"154":1,"322":1,"326":1,"345":1}}],["replica",{"1":{"251":2,"255":8,"298":6,"299":8,"300":1}}],["replicacount",{"1":{"249":3}}],["replicas=0",{"1":{"279":1}}],["replicas",{"0":{"298":1},"1":{"248":1,"290":2,"298":1}}],["replicate",{"1":{"160":1}}],["replication",{"1":{"62":1,"300":1}}],["replaced",{"1":{"240":6,"241":6}}],["replacement",{"1":{"154":2}}],["replaces",{"1":{"153":4,"154":2}}],["replace",{"1":{"127":2,"234":2,"268":1,"283":1}}],["replacing",{"1":{"15":1,"72":1}}],["replay`",{"1":{"72":1,"316":6}}],["replaying",{"1":{"30":1,"72":3}}],["replay",{"1":{"10":1,"30":2,"32":2,"72":14,"89":1,"102":2,"123":1,"124":1,"216":1,"254":2,"316":20,"333":1,"340":3}}],["replayed",{"1":{"3":1,"32":1,"102":1,"115":1,"301":1,"340":1}}],["replays",{"1":{"0":1,"72":1,"87":1,"115":1,"123":2,"301":1,"333":2,"340":1}}],["repositories",{"1":{"374":2}}],["repository",{"1":{"215":1,"216":9,"225":2,"227":2,"283":1,"374":3},"2":{"216":2}}],["repo=repo",{"1":{"216":1}}],["report",{"1":{"38":1,"40":4,"174":1}}],["reports",{"1":{"19":1,"186":1,"188":1,"288":1}}],["repo",{"1":{"10":1,"216":12,"234":1,"249":1}}],["reassign",{"3":{"208":1,"209":1,"210":1}}],["reassignments",{"0":{"210":1}}],["reassignment",{"1":{"268":2},"3":{"208":1,"209":1,"210":1}}],["reassigned",{"1":{"137":1,"268":6}}],["reasonable",{"1":{"292":1}}],["reasonably",{"1":{"30":1}}],["reasoning",{"1":{"104":1,"107":1}}],["reasons",{"1":{"74":1,"100":1,"104":1,"210":1}}],["reason",{"1":{"72":1,"106":1,"295":1}}],["reactions",{"1":{"212":1}}],["react",{"1":{"113":1,"212":1}}],["reachability",{"1":{"189":1}}],["reaching",{"1":{"168":1}}],["reaches",{"1":{"123":1}}],["reached",{"1":{"30":1,"65":4,"67":1,"132":1,"290":1}}],["reach",{"1":{"100":1,"147":1,"157":1,"166":1,"177":1}}],["reading",{"1":{"307":1,"314":1,"338":1}}],["readiness",{"0":{"5":1},"1":{"118":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["read",{"0":{"298":1},"1":{"158":1,"238":1,"241":2,"251":1,"255":4,"260":1,"297":2,"298":10,"299":4,"300":2,"309":5,"374":9}}],["readable",{"1":{"145":1,"162":2},"2":{"162":2}}],["reads",{"1":{"62":1,"112":1}}],["ready",{"0":{"6":1},"1":{"3":1,"23":1,"92":1,"124":2,"174":2,"224":1,"226":1}}],["real",{"1":{"0":1,"44":1,"81":1,"128":1,"159":1,"177":1,"201":1,"214":1,"216":1,"223":1,"318":1,"333":2,"342":1,"348":1,"368":1}}],["re",{"0":{"223":1},"1":{"0":1,"10":1,"21":2,"23":1,"29":1,"30":2,"33":1,"43":1,"46":1,"47":1,"68":2,"74":1,"82":1,"85":1,"87":2,"92":1,"102":1,"104":1,"109":1,"112":2,"122":1,"123":2,"127":2,"158":1,"162":1,"175":1,"177":1,"182":1,"190":1,"202":1,"208":1,"209":1,"210":1,"223":1,"230":2,"307":3,"320":1,"325":1,"331":1,"340":1,"344":1}}],["rules",{"1":{"102":1}}],["runcmd",{"1":{"364":3}}],["runchild",{"1":{"105":1,"333":1},"2":{"105":1}}],["runandwait",{"1":{"332":1,"345":2},"2":{"345":1}}],["runat",{"1":{"35":2}}],["runpriorityhigh",{"1":{"84":1},"2":{"84":1}}],["runprioritylow",{"1":{"84":2},"2":{"84":1}}],["runfilter",{"1":{"72":2}}],["runmany",{"1":{"43":1},"2":{"43":1}}],["runmanyopt",{"1":{"43":3},"2":{"43":1}}],["runid",{"1":{"25":3,"97":1,"161":1,"162":1},"2":{"25":1,"161":1,"162":1}}],["runner",{"1":{"158":1}}],["runnables",{"0":{"321":1,"345":1},"1":{"316":2,"317":2,"321":2,"332":5,"333":2,"345":2}}],["runnable",{"1":{"90":1,"104":1}}],["runnowaitchild",{"1":{"333":1}}],["runnowait",{"1":{"25":1,"55":1,"84":1,"97":2,"161":2,"162":2,"332":1,"345":4},"2":{"25":1,"55":1,"84":1,"97":1,"161":2,"162":2,"345":2}}],["running",{"0":{"23":1,"59":1,"97":1,"125":1,"156":1,"284":1,"289":1,"358":1,"363":1,"365":1},"1":{"2":1,"3":1,"5":1,"7":2,"10":1,"13":1,"18":2,"19":1,"21":1,"23":1,"25":3,"26":1,"27":1,"30":2,"34":2,"37":1,"38":3,"39":1,"40":1,"42":1,"43":1,"49":2,"55":1,"66":1,"67":3,"68":6,"69":1,"72":5,"74":4,"75":2,"76":2,"77":1,"85":1,"89":1,"93":3,"97":1,"105":1,"107":1,"109":1,"112":1,"113":1,"115":1,"116":1,"123":3,"124":3,"129":3,"137":4,"138":1,"139":1,"145":1,"158":1,"161":1,"162":2,"170":1,"171":2,"174":1,"176":1,"209":3,"222":2,"224":1,"225":8,"226":1,"227":2,"230":1,"234":2,"248":2,"253":1,"266":2,"267":2,"272":1,"276":1,"278":1,"279":1,"281":1,"285":1,"286":3,"289":1,"297":3,"306":1,"314":4,"327":2,"328":3,"332":1,"333":3,"338":1,"347":3,"348":1,"358":1,"359":1,"363":1,"365":1,"367":1,"374":1},"2":{"72":1}}],["runreference",{"1":{"97":1}}],["runrefs",{"1":{"43":1}}],["runref",{"1":{"25":5},"2":{"25":3}}],["run`",{"1":{"24":2,"28":1,"144":1,"145":5,"191":1,"192":1,"201":2,"256":1,"268":1,"316":6,"317":2,"322":2,"323":14,"324":12}}],["runtimes",{"1":{"175":1}}],["runtime",{"0":{"254":1,"271":1},"1":{"13":1,"15":1,"16":1,"32":1,"80":2,"81":2,"82":1,"85":1,"86":2,"87":2,"89":1,"90":1,"100":1,"101":3,"103":1,"104":2,"106":2,"107":1,"108":1,"110":1,"111":1,"113":1,"115":1,"116":1,"117":1,"154":2,"158":1,"251":1,"293":1,"308":1,"309":1,"330":1,"333":2}}],["runsclient",{"1":{"332":1,"340":1,"344":1}}],["runs`",{"1":{"149":2,"322":4,"324":4}}],["runs=name",{"1":{"78":1}}],["runs=digit",{"1":{"78":1}}],["runs=1",{"1":{"74":1}}],["runs",{"0":{"27":1,"34":1,"35":1,"36":1,"38":1,"43":1,"316":1,"340":1,"369":1},"1":{"5":1,"10":1,"11":1,"20":1,"23":1,"26":1,"27":1,"34":9,"35":12,"36":5,"37":2,"38":5,"43":2,"49":1,"51":1,"55":1,"72":40,"74":4,"77":1,"78":6,"79":4,"80":1,"82":3,"83":2,"84":2,"87":1,"89":1,"93":1,"101":2,"104":2,"110":1,"117":2,"118":1,"121":3,"124":1,"126":1,"145":1,"149":3,"150":2,"151":3,"153":2,"154":1,"156":1,"158":4,"161":4,"162":3,"164":1,"167":1,"173":1,"174":1,"181":1,"182":1,"188":1,"193":1,"203":1,"205":1,"206":1,"209":1,"271":1,"278":1,"287":1,"288":1,"291":2,"292":1,"294":4,"297":4,"300":2,"307":11,"309":1,"316":71,"317":8,"320":2,"322":46,"324":34,"327":1,"332":9,"333":5,"338":2,"340":6,"341":3,"344":2,"369":4,"370":1},"2":{"72":4,"161":3,"162":3,"332":1}}],["run",{"0":{"37":1,"228":1},"1":{"0":1,"4":2,"7":6,"10":4,"11":6,"12":5,"14":1,"15":1,"16":2,"17":2,"18":1,"21":6,"22":3,"23":3,"24":6,"25":8,"26":2,"27":2,"28":2,"29":1,"30":1,"34":5,"35":22,"36":5,"37":3,"38":9,"39":2,"41":1,"42":1,"43":24,"46":1,"47":1,"48":4,"49":6,"50":1,"52":2,"55":6,"64":3,"65":1,"72":20,"74":4,"77":1,"81":1,"82":5,"83":4,"84":11,"88":1,"90":1,"92":3,"93":1,"94":2,"97":7,"101":3,"103":1,"104":20,"105":25,"106":2,"114":1,"115":4,"117":3,"118":1,"119":1,"120":4,"121":7,"123":4,"124":3,"126":1,"127":23,"129":1,"130":3,"132":3,"133":6,"134":1,"135":1,"136":2,"137":2,"138":3,"139":1,"142":2,"145":17,"147":1,"148":1,"149":6,"150":1,"152":4,"153":2,"154":9,"155":1,"156":1,"158":5,"160":1,"161":6,"162":3,"166":1,"173":1,"174":4,"175":2,"176":2,"177":3,"179":1,"181":1,"182":4,"184":1,"186":1,"187":1,"188":1,"189":2,"190":1,"192":2,"197":1,"198":2,"199":1,"201":2,"203":1,"206":1,"207":2,"209":1,"214":1,"225":6,"227":1,"228":5,"229":1,"234":6,"238":5,"247":3,"253":1,"254":1,"257":6,"266":4,"268":1,"278":1,"279":2,"282":1,"284":1,"286":3,"287":2,"288":1,"289":8,"290":1,"294":5,"295":2,"296":4,"297":1,"301":1,"307":10,"309":40,"313":10,"316":89,"317":46,"321":3,"322":69,"323":2,"324":76,"325":5,"326":3,"327":2,"328":1,"329":1,"331":1,"332":14,"333":15,"336":2,"338":1,"340":14,"341":8,"345":26,"348":1,"349":1,"354":1,"358":2,"360":1,"361":1,"363":1,"364":2,"365":8,"366":1,"367":2,"372":3,"373":2,"374":2},"2":{"7":1,"24":5,"25":4,"35":1,"43":5,"55":3,"72":2,"84":3,"97":2,"104":8,"105":8,"115":1,"120":4,"121":2,"129":1,"130":1,"132":1,"133":4,"135":1,"136":1,"148":1,"149":2,"154":1,"161":3,"162":2,"225":1,"268":1,"294":2,"322":1,"325":1,"326":2,"331":1,"345":3,"349":1,"367":1},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["ruby",{"1":{"0":1,"14":1,"16":2,"20":1,"21":2,"24":1,"25":2,"35":5,"39":1,"40":3,"43":1,"48":1,"50":1,"52":4,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":1,"72":2,"74":1,"78":1,"81":1,"82":2,"84":3,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":4,"109":1,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":1,"121":1,"125":1,"127":3,"132":1,"133":1,"135":1,"136":2,"138":1,"140":1,"144":2,"149":2,"152":2,"153":1,"154":1,"155":1,"156":1,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["snapshots",{"1":{"280":1}}],["snapshot",{"0":{"277":1},"1":{"276":2,"277":1,"278":1,"279":5}}],["snippet",{"1":{"7":1,"241":1}}],["snippets",{"1":{"1":1,"7":2}}],["sql`",{"1":{"283":1}}],["sql",{"1":{"248":1,"277":2}}],["squeeze",{"1":{"209":1}}],["sqs",{"1":{"73":1}}],["swallowed",{"1":{"333":1}}],["swapping",{"1":{"183":1}}],["switch",{"1":{"206":1,"348":1,"351":1}}],["smtp",{"0":{"302":1},"1":{"265":14,"302":2,"303":1,"304":8,"305":5},"2":{"304":1}}],["smoke",{"1":{"206":1}}],["sms",{"1":{"201":1}}],["small",{"1":{"160":1,"231":1,"292":1}}],["smaller",{"1":{"107":1}}],["sso",{"1":{"188":1}}],["sslmode=disable",{"1":{"225":2,"245":1}}],["ssl",{"1":{"127":1,"241":2,"245":1,"255":2}}],["s3offload",{"1":{"157":2}}],["s3url",{"1":{"157":6},"2":{"157":1}}],["s3input",{"1":{"157":1}}],["s3client",{"1":{"157":3}}],["s3",{"1":{"157":9,"158":1},"2":{"157":2}}],["skips",{"1":{"154":2}}],["skipscope",{"1":{"52":2}}],["skipif",{"1":{"113":1,"117":2}}],["skiponevent",{"1":{"113":3,"117":4}}],["skipping",{"1":{"113":1,"309":2}}],["skippayload",{"1":{"52":1}}],["skipped`",{"1":{"117":1,"309":2}}],["skipped",{"1":{"52":1,"113":3,"117":8,"118":1,"268":5,"309":7,"322":2},"2":{"117":1}}],["skip",{"1":{"48":2,"50":1,"52":6,"104":1,"105":1,"112":2,"113":16,"114":1,"117":11,"120":1,"149":2,"158":2,"196":1,"227":1,"268":1,"294":1,"312":2,"318":2,"335":1,"344":1},"2":{"52":1}}],["skill",{"1":{"10":1}}],["skills",{"0":{"10":1},"1":{"7":3,"9":2,"10":3,"207":1},"3":{"9":1,"10":1,"11":1,"12":1}}],["sydney",{"1":{"198":1}}],["synchronously",{"1":{"322":4,"324":2}}],["synchronous",{"1":{"94":1,"158":1,"329":1,"330":1}}],["sync",{"1":{"39":1,"40":3,"55":1,"68":2,"104":2,"105":2,"153":12,"155":44,"323":4,"324":2,"328":2,"330":13,"341":1},"2":{"104":2,"105":2}}],["syntax",{"1":{"38":2,"220":1}}],["systems",{"1":{"17":1,"30":1,"44":1,"71":1,"73":1,"101":1,"102":1,"107":1,"120":1,"175":1,"318":1,"342":1}}],["system",{"1":{"0":1,"29":1,"37":2,"42":2,"44":1,"57":1,"74":5,"78":1,"81":1,"127":2,"137":1,"146":1,"170":1,"174":1,"175":1,"176":1,"222":1,"243":1,"248":1,"318":3,"342":1}}],["slept",{"1":{"333":1}}],["sleepuntil",{"1":{"333":1}}],["sleepfor",{"1":{"109":2,"123":1,"333":2},"2":{"109":1}}],["sleepresult",{"1":{"333":2}}],["sleepres",{"1":{"109":2}}],["sleep`",{"1":{"109":2}}],["sleepcondition",{"1":{"89":1,"110":4,"113":8,"116":1,"118":4,"310":1},"2":{"110":2,"113":2,"118":2}}],["sleeping",{"1":{"66":1,"108":1,"109":4,"123":1,"160":2}}],["sleeps",{"1":{"65":1,"88":1,"89":1,"90":1,"104":1,"123":1}}],["sleep",{"0":{"108":1,"109":1,"110":1},"1":{"31":3,"39":1,"65":5,"66":5,"68":8,"74":3,"78":6,"84":4,"87":2,"88":2,"89":11,"101":2,"105":3,"108":3,"109":19,"110":13,"112":1,"113":1,"115":1,"116":2,"117":12,"118":12,"120":1,"123":2,"124":4,"138":3,"141":1,"142":5,"160":8,"161":1,"308":1,"310":8,"316":4,"325":5,"333":4},"2":{"39":1,"65":1,"66":1,"68":2,"74":1,"78":1,"89":2,"109":2,"138":1,"141":1,"142":1,"160":2,"325":2}}],["slug`",{"1":{"255":1}}],["slug",{"1":{"243":1,"255":1}}],["slice",{"1":{"160":1},"2":{"160":1}}],["slim",{"1":{"127":3}}],["slight",{"1":{"37":1,"42":1,"294":1}}],["slightly",{"1":{"37":1,"42":1,"300":1}}],["slower",{"1":{"295":1}}],["slowest",{"1":{"268":1}}],["slow",{"0":{"295":1},"1":{"115":1,"231":1,"289":1}}],["slotreleaseworkflow",{"1":{"138":2}}],["slot`",{"1":{"138":1,"309":2}}],["slots=1",{"1":{"328":1}}],["slots=10",{"1":{"135":1}}],["slots`",{"1":{"268":3,"271":1,"307":1}}],["slots",{"0":{"22":1,"209":1},"1":{"20":1,"22":2,"67":1,"74":3,"75":3,"76":3,"77":1,"82":1,"86":1,"104":1,"123":3,"135":1,"137":2,"169":2,"209":2,"268":3,"289":7,"294":2},"3":{"19":1,"20":1,"21":1,"22":1,"208":1,"209":1,"210":1}}],["slot",{"0":{"137":1,"138":1},"1":{"3":1,"22":2,"75":2,"76":1,"86":1,"87":4,"92":1,"101":2,"103":2,"104":3,"108":1,"109":2,"110":1,"112":1,"122":1,"123":12,"124":5,"137":4,"138":13,"139":2,"169":2,"209":3,"257":4,"307":2,"309":4,"333":2},"2":{"138":2}}],["slashes",{"1":{"376":1}}],["slash",{"0":{"220":1},"1":{"45":1,"212":1,"217":2,"220":6}}],["slackinteraction",{"1":{"221":1}}],["slackinteractionoutput",{"1":{"221":3}}],["slackinteractioninput",{"1":{"221":6}}],["slackuser",{"1":{"221":2}}],["slackaction",{"1":{"221":2}}],["slackcommand",{"1":{"220":1}}],["slackcommandoutput",{"1":{"220":3}}],["slackcommandinput",{"1":{"220":6}}],["slackmention",{"1":{"219":1}}],["slackeventoutput",{"1":{"219":3}}],["slackeventinput",{"1":{"219":6}}],["slackevent",{"1":{"219":2}}],["slack",{"0":{"217":1,"218":1},"1":{"44":1,"45":2,"46":1,"212":1,"217":3,"218":8,"219":17,"220":16,"221":15,"265":8,"318":1,"342":1}}],["src=",{"1":{"369":1,"370":1,"371":1}}],["src",{"1":{"21":1,"364":1,"367":1}}],["spin",{"1":{"234":1,"238":1}}],["spins",{"1":{"234":1}}],["spikes",{"1":{"74":2}}],["spike",{"1":{"74":1}}],["speeds",{"1":{"294":1}}],["spent",{"1":{"268":2}}],["spending",{"1":{"209":1}}],["specs",{"1":{"209":1}}],["special",{"1":{"119":1,"121":4,"156":3}}],["specified",{"1":{"57":2,"64":1,"65":4,"83":1,"108":1,"109":1,"110":3,"113":1,"129":1,"134":1,"252":1,"307":3,"312":4,"313":2,"316":15,"318":4,"322":4,"332":2,"333":4,"345":1,"354":1,"365":1}}],["specifies",{"1":{"39":1,"243":3,"289":10}}],["specifically",{"1":{"158":1}}],["specific",{"1":{"17":1,"23":1,"27":1,"34":3,"35":1,"38":2,"40":1,"46":2,"49":1,"72":1,"78":2,"82":2,"112":1,"126":1,"130":1,"134":6,"136":1,"150":1,"151":1,"158":1,"166":1,"183":1,"191":1,"196":1,"199":1,"216":1,"251":2,"254":1,"268":1,"283":1,"309":1,"311":4,"317":4,"322":4,"324":4,"333":1,"334":1,"340":1,"341":1,"345":1,"354":1,"355":1,"365":1,"366":1}}],["specifying",{"0":{"135":1,"136":1},"1":{"63":1,"151":1,"310":2,"333":2,"354":1}}],["specify",{"1":{"34":1,"36":1,"41":1,"46":2,"56":1,"65":3,"82":3,"95":1,"131":1,"132":3,"133":2,"134":2,"135":1,"136":1,"355":1}}],["spec",{"1":{"88":1,"112":1,"113":1,"130":2,"234":2,"238":1,"289":1},"2":{"234":1,"238":1}}],["sprintf",{"1":{"133":1},"2":{"133":1}}],["spread",{"1":{"107":1,"153":1}}],["splitting",{"1":{"248":1}}],["split",{"1":{"95":1,"96":1},"2":{"95":1,"96":1}}],["span",{"1":{"144":1,"145":5}}],["spans",{"0":{"145":1},"1":{"144":5,"145":10}}],["spaces",{"1":{"38":1}}],["spawnchildren",{"1":{"333":1}}],["spawnchild",{"1":{"333":1}}],["spawnworkflows",{"1":{"294":2},"2":{"294":1}}],["spawnworkflowsopts",{"1":{"294":2},"2":{"294":1}}],["spawned",{"1":{"133":1,"135":1,"136":1,"158":1,"309":1,"314":2,"333":4,"373":1}}],["spawns",{"1":{"85":4,"86":1,"101":1,"105":1,"106":1,"123":1,"326":1,"333":2}}],["spawn",{"1":{"13":1,"31":1,"32":3,"86":1,"87":2,"88":1,"90":4,"101":4,"102":1,"103":1,"104":9,"105":10,"106":4,"107":1,"115":1,"120":1,"133":1,"294":6,"301":1,"326":1,"333":1}}],["spawning",{"0":{"90":1,"103":1,"104":1,"105":1},"1":{"13":1,"31":1,"85":1,"87":2,"90":3,"100":3,"101":2,"103":2,"104":1,"105":5,"120":1,"294":1,"326":1,"333":1}}],["spun",{"1":{"19":1}}],["sdk`",{"1":{"330":1}}],["sdktrace",{"1":{"144":1}}],["sdk",{"0":{"62":1,"272":1},"1":{"16":5,"18":1,"24":1,"43":1,"55":2,"62":11,"65":1,"72":5,"105":1,"126":2,"141":3,"142":1,"144":5,"146":1,"152":2,"153":1,"154":1,"155":1,"156":1,"157":2,"166":2,"181":1,"251":1,"270":2,"274":2,"306":2,"307":1,"316":2,"317":2,"321":1,"322":1,"325":1,"326":2,"327":1,"329":1,"331":1,"332":2,"345":1,"376":1,"377":24},"2":{"62":1,"144":1}}],["sdks",{"0":{"375":1},"1":{"1":1,"15":1,"16":1,"25":1,"34":1,"38":1,"52":1,"55":1,"61":1,"72":1,"144":1,"145":1,"375":2,"376":1,"377":6}}],["said",{"1":{"331":1}}],["saving",{"1":{"216":1}}],["save",{"1":{"40":1}}],["sampling",{"0":{"301":1},"1":{"265":4,"301":6}}],["sampled",{"1":{"301":5}}],["sample",{"1":{"258":2,"301":1,"348":1}}],["samples",{"1":{"148":1}}],["same",{"0":{"181":1,"187":2},"1":{"4":1,"10":1,"17":1,"20":1,"26":1,"46":1,"50":1,"55":4,"72":3,"74":2,"76":1,"81":1,"83":2,"84":2,"85":1,"89":1,"93":1,"97":1,"100":2,"102":1,"103":1,"115":2,"118":1,"121":1,"123":2,"130":1,"131":2,"132":5,"133":5,"136":1,"145":1,"153":1,"155":2,"156":1,"157":2,"158":1,"160":4,"161":1,"173":1,"179":1,"181":1,"187":1,"216":2,"220":1,"249":1,"253":1,"255":1,"289":1,"297":1,"301":1,"327":1,"328":1,"329":1,"330":1,"331":1,"333":2,"345":2}}],["saturating",{"1":{"295":1}}],["saturday",{"1":{"38":1}}],["satisfy",{"1":{"155":1}}],["satisfies",{"1":{"153":3}}],["satisfied",{"1":{"116":1,"118":3,"333":1}}],["say",{"1":{"25":3,"109":1,"331":2},"2":{"25":3,"331":1}}],["safest",{"1":{"279":1}}],["safely",{"1":{"58":1,"61":2,"275":1,"280":1}}],["safety",{"1":{"55":2,"331":1}}],["safe",{"1":{"15":1,"30":1,"55":1,"152":1,"176":1,"234":1,"238":1,"280":1,"325":3}}],["shm",{"1":{"231":1}}],["shm`",{"1":{"231":1}}],["shell",{"1":{"225":2,"227":1,"231":1}}],["shutdown",{"1":{"144":1,"170":2,"328":1},"2":{"144":1}}],["shut",{"1":{"144":1,"279":1}}],["sharing",{"1":{"327":1,"329":1,"330":1}}],["sharding",{"1":{"175":2,"177":1}}],["share",{"1":{"74":1,"158":1,"164":1,"173":1,"327":2}}],["sharedconfig",{"1":{"243":2,"245":1,"278":1}}],["shared",{"0":{"243":1},"1":{"55":1,"74":1,"81":1,"82":1,"123":1,"137":1,"139":1,"152":1,"158":1,"161":2,"162":2,"231":4,"243":2},"2":{"161":2,"162":2}}],["shallow",{"1":{"153":1}}],["shape",{"1":{"85":1,"92":1,"100":4,"104":1,"177":1}}],["shows",{"1":{"226":1,"278":1,"371":1}}],["show",{"1":{"84":2,"157":1}}],["shown",{"1":{"7":1,"96":1,"255":1,"301":1}}],["shouldn",{"1":{"65":1}}],["shouldnotretry",{"1":{"61":1}}],["shouldskip",{"1":{"48":1,"52":5,"112":1,"113":1},"2":{"52":1}}],["shorthand",{"1":{"307":1,"310":2,"333":2}}],["shortcuts",{"1":{"221":1}}],["short",{"1":{"57":1,"161":1,"207":1,"294":1}}],["sh",{"1":{"7":1,"130":3,"227":2,"238":1,"349":1},"2":{"7":1,"130":1,"227":1,"349":1}}],["sometimes",{"1":{"373":1}}],["something",{"1":{"16":1,"24":1,"27":1,"30":1,"31":1,"32":1,"35":2,"46":2,"112":1,"113":1,"136":1,"146":1,"214":1,"220":1,"276":1}}],["somewhere",{"1":{"234":1,"238":1,"374":1}}],["sorts",{"1":{"213":1}}],["solve",{"1":{"211":1}}],["solution",{"1":{"164":2}}],["solutions",{"1":{"164":1}}],["southeast",{"1":{"198":1}}],["sourcenames",{"1":{"342":1}}],["sources",{"1":{"46":3}}],["source",{"0":{"187":1},"1":{"0":1,"4":1,"5":1,"46":7,"82":1,"149":6,"150":1,"151":1,"173":1,"179":1,"187":1,"190":1,"212":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":3,"318":4}}],["soc",{"1":{"186":1}}],["soft",{"1":{"132":4,"133":3},"2":{"132":2,"133":1}}],["soon",{"1":{"16":1,"43":1,"65":1,"83":1,"92":1,"116":1,"118":3,"144":1,"152":1,"153":1,"154":1,"155":1,"156":1,"188":1}}],["soohoon",{"1":{"5":1}}],["sigkill",{"1":{"210":1}}],["signing",{"1":{"214":3,"218":2,"219":1,"220":1,"221":1}}],["significantly",{"1":{"209":1,"286":1,"295":1,"298":1,"329":1}}],["significant",{"1":{"43":1,"139":1}}],["signature",{"1":{"158":1,"332":17,"333":1,"341":1}}],["signatures",{"1":{"152":1}}],["signals",{"1":{"69":1,"111":1,"114":1,"170":1}}],["signal",{"1":{"65":1,"66":1,"67":3,"68":3,"69":3,"118":2,"145":1,"238":1,"338":1},"2":{"65":1,"66":1,"68":1}}],["signups",{"1":{"254":1}}],["signup`",{"1":{"49":1,"254":1}}],["signup",{"1":{"49":1}}],["signed",{"1":{"7":1,"157":1,"376":2}}],["sign",{"1":{"7":1}}],["sizing",{"1":{"177":1}}],["size=15040",{"1":{"296":1}}],["sizes",{"1":{"292":1}}],["size`",{"1":{"144":2,"167":1,"254":1,"316":4}}],["size",{"1":{"104":1,"144":2,"157":1,"167":3,"231":2,"241":1,"254":1,"267":4,"274":2,"289":1,"290":1,"293":1,"294":3}}],["silently",{"1":{"176":1}}],["situation",{"1":{"279":1}}],["situations",{"1":{"24":1,"27":1,"131":1}}],["sit",{"1":{"175":1}}],["sites",{"1":{"201":1}}],["site",{"1":{"154":1}}],["sits",{"1":{"124":1}}],["six",{"1":{"117":6}}],["sibling",{"1":{"117":1}}],["side",{"1":{"58":1,"62":1,"102":2,"109":1,"137":1,"145":1,"157":2,"341":1}}],["sidebar",{"1":{"36":1,"41":1,"374":1}}],["simultaneously",{"1":{"106":1}}],["simulate",{"1":{"68":1,"289":1}}],["similarly",{"1":{"84":1,"152":1,"325":1}}],["similar",{"1":{"32":1,"55":1,"72":3,"73":1,"77":1,"209":1,"316":2,"317":2,"369":1}}],["simplicity",{"1":{"175":1}}],["simply",{"1":{"7":1,"40":1,"55":1,"58":1,"72":1,"157":1,"225":3,"289":1}}],["simpler",{"1":{"124":1,"175":1,"216":1}}],["simpleconcurrency",{"1":{"74":1}}],["simplest",{"1":{"52":1,"100":1}}],["simpleevent",{"1":{"50":2,"52":1}}],["simpletask",{"1":{"21":1}}],["simplewithzod",{"1":{"20":2}}],["simpleoutput",{"1":{"14":6,"25":1,"55":1}}],["simpleinput",{"1":{"14":6,"24":4,"25":1,"43":1,"55":1,"65":1,"66":1,"294":1}}],["simple`",{"1":{"7":1,"372":1}}],["simple",{"1":{"4":1,"7":2,"14":2,"19":1,"20":5,"34":1,"35":6,"39":2,"40":3,"43":4,"48":2,"50":2,"52":5,"55":6,"56":2,"57":1,"58":4,"59":4,"63":1,"74":2,"78":1,"80":1,"93":1,"97":4,"112":2,"113":2,"149":3,"175":1,"294":3,"326":5,"367":4,"372":1},"2":{"35":2,"40":1,"43":1,"55":4,"58":2,"59":2,"97":2,"149":1}}],["since=datetime",{"1":{"72":2}}],["since",{"1":{"32":1,"52":2,"72":1,"84":1,"100":1,"289":1,"326":1,"329":1,"337":1,"338":1}}],["single",{"1":{"2":1,"36":1,"46":1,"49":1,"61":1,"74":1,"75":1,"82":1,"83":1,"85":1,"100":1,"104":1,"105":1,"115":1,"116":1,"118":5,"164":1,"188":1,"209":1,"224":1,"261":2,"268":1,"294":1,"297":1,"321":1,"327":1,"345":2}}],["stdout",{"1":{"161":1},"2":{"161":1}}],["studio",{"1":{"372":1}}],["stubworkflow",{"1":{"55":2}}],["stuboutput",{"1":{"55":3}}],["stubinput",{"1":{"55":3}}],["stubs",{"1":{"55":1},"2":{"55":1}}],["stub",{"1":{"55":10},"2":{"55":1}}],["stuck",{"0":{"169":1},"1":{"10":1}}],["sticky=true",{"1":{"133":1}}],["sticky=stickystrategy",{"1":{"132":1,"133":1}}],["stickychildworkflow",{"1":{"133":2}}],["stickyresult",{"1":{"132":2,"133":2}}],["stickyinput",{"1":{"132":2,"133":1}}],["stickydag",{"1":{"132":4},"2":{"132":1}}],["stickystrategy",{"1":{"132":2,"133":1},"2":{"132":2,"133":1}}],["stickyworkflow",{"1":{"132":2}}],["sticky",{"0":{"131":1,"132":1,"133":1},"1":{"131":2,"132":21,"133":17,"136":2,"307":3,"333":2},"2":{"132":1,"133":4}}],["stiva",{"1":{"52":7}}],["still",{"1":{"37":1,"39":1,"42":1,"112":1,"137":1,"158":1,"171":1,"301":1}}],["stop`",{"1":{"362":1}}],["stopping",{"0":{"361":1}}],["stopped",{"1":{"65":2}}],["stops",{"1":{"86":1}}],["stop",{"1":{"40":1,"65":1,"67":1,"71":1,"87":1,"106":1,"279":1,"361":2,"362":7}}],["storing",{"1":{"223":1,"296":1,"327":1}}],["storage",{"1":{"35":1,"174":1,"177":1}}],["store",{"1":{"35":1,"73":1,"174":1,"234":1,"238":1,"327":1}}],["stored",{"1":{"3":1,"25":1,"35":2,"37":1,"40":1,"157":1,"173":1,"188":1,"296":1,"301":1}}],["style",{"1":{"23":1,"25":3,"38":1,"101":9,"130":1,"152":1,"329":1,"377":1}}],["strconv",{"1":{"294":2},"2":{"294":1}}],["strong",{"1":{"243":1}}],["stroke",{"1":{"101":14,"173":6}}],["strokes",{"1":{"23":1}}],["strftime",{"1":{"95":1,"96":1},"2":{"95":1,"96":1}}],["straight",{"1":{"124":1}}],["straightforward",{"1":{"72":1}}],["strategy`",{"1":{"262":2,"266":1,"273":1,"376":1}}],["strategy",{"1":{"74":3,"75":4,"76":2,"77":2,"78":5,"132":2,"133":1,"136":1,"252":1,"262":2,"266":1,"273":1,"307":6,"322":4,"376":5}}],["strategy=concurrencylimitstrategy",{"1":{"74":1,"78":2}}],["strategy=none",{"1":{"21":1,"238":1,"253":1}}],["strategies",{"0":{"78":1},"1":{"63":1,"67":1,"74":3,"78":1,"132":1,"275":1,"280":1}}],["strict",{"1":{"75":1,"180":1}}],["strictly",{"1":{"75":1}}],["stripepayment",{"1":{"214":1}}],["stripepaymentoutput",{"1":{"214":3}}],["stripepaymentinput",{"1":{"214":6}}],["stripedata",{"1":{"214":2}}],["stripeobject",{"1":{"214":2}}],["stripe",{"0":{"213":1},"1":{"44":1,"45":2,"46":9,"212":1,"213":3,"214":23,"318":1,"342":1}}],["stringify",{"1":{"157":3},"2":{"157":1}}],["stringdata",{"1":{"130":1}}],["strings",{"1":{"50":1,"53":1,"135":1,"238":2},"2":{"50":1,"53":1}}],["string",{"1":{"14":3,"35":1,"40":3,"46":1,"52":3,"55":3,"65":1,"81":2,"82":1,"105":2,"121":1,"135":1,"136":4,"142":9,"149":5,"154":2,"155":4,"157":10,"160":4,"191":1,"214":6,"216":9,"219":8,"220":12,"221":9,"252":1,"255":2,"259":3,"289":2,"291":1,"294":4,"299":1,"309":3,"333":2,"334":2,"337":1,"341":3,"344":1,"345":3,"362":3}}],["streamed",{"1":{"333":1}}],["streams",{"0":{"161":1},"1":{"333":1}}],["streamtaskoutput",{"1":{"160":2}}],["streamtaskinput",{"1":{"160":1,"161":1,"162":1},"2":{"161":1,"162":1}}],["streamtask",{"1":{"160":1}}],["stream`",{"1":{"160":1,"162":1,"309":2,"316":2}}],["streamingresponse",{"1":{"162":2}}],["streamingworkflow",{"1":{"161":2,"162":2},"2":{"161":2,"162":2}}],["streamingtask",{"1":{"160":1,"161":1,"162":1},"2":{"161":1,"162":1}}],["streaming",{"0":{"159":1,"162":1},"1":{"16":1,"159":2,"160":2,"161":1,"162":3}}],["stream",{"0":{"160":1},"1":{"16":2,"159":1,"160":10,"161":8,"162":12,"167":1,"309":4,"333":1,"340":2},"2":{"160":3,"161":3,"162":2}}],["structured",{"1":{"106":1,"154":1,"325":1}}],["structure",{"1":{"91":1,"95":1,"98":1,"105":1,"117":1,"141":1,"175":1}}],["struct",{"1":{"14":2,"55":2,"105":4,"214":5,"216":5,"219":4,"220":3,"221":5}}],["str",{"1":{"14":2,"39":4,"58":2,"59":2,"60":2,"65":2,"66":2,"68":4,"74":1,"78":2,"89":5,"104":1,"105":7,"116":2,"121":2,"132":4,"133":4,"136":2,"138":2,"141":2,"142":2,"153":12,"155":6,"160":2,"214":3,"216":4,"219":4,"220":6,"221":4,"294":3,"307":10,"309":2,"310":1,"312":4,"314":6,"316":18,"317":3,"318":6,"322":13,"323":6,"324":10,"326":10,"328":1,"330":18,"331":2}}],["steprunid`",{"1":{"154":1}}],["steprunerrors",{"1":{"121":1},"2":{"121":1}}],["steperrors",{"1":{"121":1}}],["stepname",{"1":{"121":3}}],["step4",{"1":{"95":4,"96":4}}],["step3",{"1":{"95":4,"96":4}}],["step2",{"1":{"94":2,"95":6,"96":5,"133":2}}],["stepoutput",{"1":{"94":4,"95":6,"96":2,"110":4,"113":8,"117":18,"118":4,"121":1,"138":2},"2":{"121":1}}],["step1b",{"1":{"132":3,"133":2}}],["step1a",{"1":{"132":3,"133":2}}],["step1output",{"1":{"95":3,"121":3},"2":{"95":1,"121":1}}],["step1",{"1":{"21":1,"39":4,"74":3,"94":4,"95":8,"96":6,"121":3,"135":3,"136":4,"138":3,"141":3,"142":13}}],["step",{"0":{"136":1,"277":1,"278":1,"279":1},"1":{"9":2,"10":2,"45":2,"68":1,"79":1,"81":5,"82":13,"86":1,"87":1,"94":1,"95":2,"100":2,"101":1,"105":1,"121":18,"136":2,"142":2,"144":1,"145":9,"154":1,"158":2,"207":2,"214":2,"268":1,"278":1,"279":3,"295":2,"296":2,"297":2,"309":9,"317":5,"322":4,"323":4,"324":4,"372":1,"377":1}}],["steps",{"0":{"8":1,"178":1,"184":1},"1":{"7":2,"30":1,"32":1,"68":1,"74":4,"81":1,"82":1,"105":2,"114":1,"132":1,"138":3,"142":1,"164":1,"223":1,"225":1,"281":1,"294":1}}],["staging",{"1":{"289":1,"351":1}}],["stage",{"1":{"98":2,"101":1,"127":8}}],["stack",{"1":{"234":2,"240":3,"278":2}}],["stack`",{"1":{"234":1,"243":1,"245":1,"249":1}}],["stale",{"1":{"76":1,"300":1}}],["starvation",{"1":{"209":2}}],["starve",{"1":{"104":1,"123":1,"210":1}}],["starved",{"1":{"75":1,"209":1}}],["star",{"1":{"46":1}}],["start`",{"1":{"362":1}}],["startout",{"1":{"117":3},"2":{"117":1}}],["startup",{"1":{"82":1,"278":1}}],["starts",{"1":{"20":1,"21":1,"34":1,"38":1,"75":2,"76":2,"85":1,"109":1,"113":1,"123":1,"124":1,"214":1,"279":1,"288":1,"327":2,"328":1}}],["startblocking",{"1":{"20":1},"2":{"20":1}}],["start",{"0":{"295":1},"1":{"10":1,"19":1,"20":4,"21":2,"34":1,"37":2,"38":1,"42":1,"55":1,"72":2,"76":1,"82":1,"89":2,"93":1,"95":1,"100":1,"110":4,"113":13,"116":2,"117":14,"118":11,"124":2,"126":1,"127":1,"135":2,"138":1,"144":1,"145":3,"156":1,"162":1,"168":1,"174":1,"178":1,"182":1,"196":1,"224":1,"225":3,"227":3,"231":1,"252":1,"292":1,"295":1,"307":1,"311":2,"313":2,"314":2,"316":6,"320":2,"322":2,"324":2,"326":1,"329":1,"338":1,"360":2,"362":13,"364":1,"372":5,"374":1},"2":{"20":1,"82":1,"135":1,"156":1,"326":1}}],["starting",{"0":{"21":1,"360":1},"1":{"10":2,"21":1,"65":2,"66":1,"68":1,"72":1,"74":1,"77":1,"82":1,"109":1,"112":1,"113":1,"116":1,"118":1,"145":1,"158":1,"207":1,"289":1,"294":1,"312":2,"318":2,"322":2,"324":1,"362":2,"372":1}}],["started",{"0":{"6":1},"1":{"2":1,"4":1,"6":1,"123":1,"164":1,"218":1,"230":1,"307":1,"348":1,"359":1,"362":1},"3":{"7":1,"8":1}}],["stable",{"1":{"46":1,"52":1,"74":1,"284":1,"288":1}}],["stays",{"1":{"158":1}}],["stay",{"1":{"43":1,"63":1,"74":1,"152":1,"153":1,"154":1,"155":1,"156":1,"189":1,"209":1}}],["standarderror",{"1":{"104":1,"105":1,"120":1}}],["standard",{"1":{"38":1,"115":1,"119":1,"148":1,"278":1,"302":1,"307":1,"308":1,"333":1}}],["standalonetask",{"1":{"50":1,"52":1,"55":1,"105":2,"133":1},"2":{"50":1,"52":1,"55":1,"105":1,"133":1}}],["standalone",{"0":{"324":1},"1":{"13":1,"55":3,"158":1,"307":8,"331":1,"345":2}}],["stat",{"1":{"129":1}}],["statistics",{"1":{"129":2,"314":6,"338":2}}],["statictask",{"1":{"82":1}}],["statickey",{"1":{"82":1}}],["static",{"0":{"80":1,"82":1},"1":{"46":5,"80":2,"82":7,"100":1,"146":1,"268":1,"318":4,"325":1}}],["stats`",{"1":{"314":4}}],["stats",{"0":{"129":1},"1":{"128":1,"129":3,"130":5,"263":2}}],["status`",{"1":{"316":4}}],["statusinternalservererror",{"1":{"162":1},"2":{"162":1}}],["status2",{"1":{"105":2,"326":1}}],["statuses",{"1":{"72":1,"316":6,"317":4,"322":2,"324":2}}],["statuses=",{"1":{"35":1,"72":2}}],["status",{"0":{"201":1},"1":{"39":1,"59":2,"60":2,"62":1,"65":2,"66":2,"68":2,"89":1,"105":4,"112":1,"121":3,"138":3,"141":1,"142":3,"146":4,"162":1,"174":1,"180":1,"182":2,"198":1,"200":2,"201":4,"205":1,"225":1,"227":1,"267":4,"268":3,"309":1,"314":4,"316":6,"326":2,"338":2,"340":5,"371":1},"2":{"112":1}}],["states",{"1":{"129":1}}],["stateless",{"1":{"90":1,"104":1,"176":1}}],["state",{"0":{"169":1},"1":{"3":1,"13":1,"79":1,"104":1,"109":1,"112":1,"115":1,"124":1,"129":2,"131":1,"132":3,"133":1,"134":4,"136":4,"137":1,"158":1,"169":1,"173":3,"174":3,"176":4,"223":1,"268":2,"279":1,"291":1,"327":3,"333":1}}],["scan",{"1":{"160":1},"2":{"160":1}}],["scaling",{"0":{"297":1},"1":{"20":1,"130":1,"176":1,"180":1,"182":1,"325":1},"3":{"19":1,"20":1,"21":1,"22":1}}],["scaletargetref",{"1":{"130":1}}],["scaler",{"1":{"130":1}}],["scales",{"1":{"130":1,"288":1}}],["scaledobject",{"1":{"130":2}}],["scaled",{"1":{"5":1}}],["scale",{"1":{"3":2,"5":2,"128":1,"130":1,"174":1,"175":1,"177":1,"209":2,"279":1,"296":2,"297":1}}],["score",{"1":{"115":1}}],["scoped",{"1":{"158":1,"188":2,"376":1}}],["scope=",{"1":{"52":4}}],["scopes`",{"1":{"260":2,"265":1}}],["scopes",{"1":{"51":1,"260":2,"265":1,"312":2,"335":2}}],["scope",{"1":{"46":4,"51":1,"52":15,"312":2,"318":4,"322":2,"324":2}}],["schemes",{"1":{"376":1}}],["scheme",{"1":{"268":1}}],["schemas",{"1":{"55":1,"247":1}}],["schema",{"0":{"280":1},"1":{"24":1,"25":1,"279":3,"280":2,"281":1,"333":3,"346":2}}],["scheduling",{"1":{"17":1,"23":1,"27":1,"34":2,"38":1,"64":1,"65":1,"93":1,"124":2,"132":1,"134":1,"158":1,"223":1,"268":6,"307":5}}],["scheduleclient",{"1":{"332":2,"341":2}}],["schedule`",{"1":{"322":2,"324":2}}],["scheduletimeout",{"1":{"65":1,"66":1}}],["scheduler",{"1":{"38":1,"249":1,"251":1,"254":1,"297":4}}],["schedules",{"0":{"341":1},"1":{"35":3,"37":2,"42":3,"84":1,"173":1,"174":2,"222":1,"297":1,"332":5,"345":2},"2":{"35":1,"84":1}}],["scheduledworkflowslist",{"1":{"341":1}}],["scheduledworkflows",{"1":{"341":6}}],["scheduledworkflowsbulkupdateitem",{"1":{"317":1}}],["scheduledrunstatus",{"1":{"35":1,"317":4},"2":{"35":1}}],["scheduledruns",{"1":{"35":4,"341":1}}],["scheduledrun",{"1":{"35":2},"2":{"35":1}}],["scheduledrunid",{"1":{"35":5}}],["scheduled",{"0":{"27":1,"34":1,"35":1,"36":1,"37":1,"317":1},"1":{"17":1,"23":1,"27":1,"28":2,"34":10,"35":58,"36":7,"37":9,"42":5,"65":1,"84":4,"93":1,"124":2,"145":1,"174":1,"192":1,"268":2,"307":2,"317":60,"321":1,"322":8,"324":2,"332":4,"341":13,"345":4,"377":2},"2":{"35":10,"332":1}}],["schedule",{"1":{"2":1,"26":1,"28":1,"34":2,"35":8,"36":2,"38":4,"39":2,"55":1,"65":2,"84":6,"87":1,"145":1,"257":4,"311":2,"317":2,"321":1,"322":9,"324":8,"345":6},"2":{"35":3,"55":1,"84":2,"345":1},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["scratch",{"1":{"158":1,"218":1}}],["scratching",{"1":{"33":1}}],["scrapeprometheusmetrics",{"1":{"338":1}}],["scrapes",{"1":{"268":1}}],["scraper",{"1":{"268":1}}],["scraped",{"1":{"147":1,"268":1}}],["scrape",{"1":{"146":2,"268":2,"314":6,"338":1}}],["scripts",{"1":{"21":1}}],["script",{"1":{"21":2,"127":1,"238":2,"349":1}}],["screen",{"1":{"7":1}}],["scenarios",{"1":{"75":1,"76":1,"77":1,"80":1,"81":1,"132":1,"296":1,"332":1}}],["scenario",{"1":{"20":1,"86":1,"100":1}}],["sustaining",{"1":{"175":1}}],["supply",{"1":{"154":1}}],["supplementary",{"1":{"149":1}}],["support`",{"1":{"265":2}}],["supported",{"1":{"174":1,"224":1,"225":2,"377":3}}],["supportingcharacter",{"1":{"52":1}}],["supporting",{"1":{"52":6}}],["support",{"0":{"331":1},"1":{"15":1,"23":1,"55":1,"69":1,"103":1,"108":1,"111":1,"114":1,"119":1,"144":1,"152":1,"153":1,"154":1,"155":1,"156":1,"190":1,"243":1,"255":1,"265":4,"268":1,"298":2,"299":1,"304":2,"305":2,"322":1,"326":1,"331":2,"345":1,"369":1,"370":1,"371":1,"375":1,"376":1,"377":6}}],["supports",{"1":{"0":1,"5":1,"11":1,"12":1,"26":2,"27":1,"34":1,"35":1,"38":2,"50":1,"60":1,"70":1,"143":1,"188":1,"248":1,"268":1,"282":1,"298":1,"303":1,"331":1,"351":1}}],["summarizing",{"1":{"288":1}}],["summarized",{"1":{"287":1}}],["sum",{"1":{"95":1,"96":1,"104":2,"105":6,"117":7,"136":2,"268":8}}],["sum=one",{"1":{"95":1,"96":1,"117":1}}],["subnets",{"1":{"189":1}}],["subgraph",{"1":{"173":3}}],["subtle",{"1":{"166":1}}],["subtask",{"1":{"101":3,"106":1}}],["subtasks",{"1":{"86":1}}],["subject",{"1":{"131":1,"134":1,"327":1,"329":1}}],["subsequent",{"1":{"294":1}}],["subset",{"1":{"93":1}}],["subscription",{"1":{"50":2,"81":1,"212":1,"213":1}}],["subscriptions",{"0":{"219":1},"1":{"45":2,"217":2,"219":3,"220":1}}],["subscribes",{"1":{"340":1}}],["subscribetostream",{"1":{"161":2,"162":2,"340":1},"2":{"161":1,"162":1}}],["subscribe",{"1":{"25":1,"161":2,"162":2,"174":1,"201":1,"219":1,"309":2,"340":1},"2":{"161":1,"162":1}}],["sub",{"1":{"85":1,"101":1,"106":1,"175":1,"177":1}}],["submissions",{"1":{"76":1}}],["suitable",{"1":{"58":1,"75":1,"134":1}}],["suited",{"1":{"3":1}}],["succeed",{"1":{"322":2}}],["succeeding",{"1":{"213":1}}],["succeeded",{"1":{"62":1,"92":1,"214":4,"268":8},"2":{"214":1}}],["succeeds",{"1":{"57":1}}],["successfully",{"1":{"65":1,"66":2,"68":1,"73":1,"95":1,"121":1,"227":5,"252":1,"285":1,"345":1}}],["successful",{"1":{"55":1,"61":1,"120":1,"121":1,"214":1}}],["success",{"1":{"18":1,"39":1,"59":4,"60":2,"65":2,"66":2,"89":1,"121":3,"138":3,"141":1,"142":3,"145":1,"268":3,"322":9}}],["sunday",{"1":{"38":1}}],["survive",{"1":{"85":1}}],["sure",{"1":{"37":1,"42":1,"55":1,"132":1,"166":1,"169":1,"216":1,"230":1,"289":1,"373":1,"374":1}}],["surfaces",{"1":{"181":1}}],["surface",{"1":{"33":1,"173":1}}],["s",{"0":{"181":1},"1":{"3":3,"4":1,"7":1,"10":1,"15":2,"16":3,"24":1,"25":2,"30":1,"32":2,"35":1,"39":1,"40":1,"43":1,"46":2,"52":1,"55":1,"57":1,"58":1,"59":1,"61":1,"63":2,"65":1,"66":1,"68":1,"73":1,"75":3,"76":3,"77":1,"81":1,"82":1,"83":1,"84":1,"87":1,"94":5,"95":2,"96":1,"101":2,"103":1,"104":1,"105":3,"106":1,"109":1,"112":2,"116":1,"120":1,"121":4,"123":2,"124":2,"127":4,"133":2,"136":1,"140":1,"141":1,"142":3,"144":6,"152":1,"154":1,"155":2,"157":1,"158":5,"162":3,"164":4,"173":1,"174":1,"185":1,"188":2,"193":1,"205":1,"207":1,"214":4,"216":7,"217":2,"218":2,"219":4,"220":4,"221":3,"241":2,"260":1,"266":2,"279":1,"286":1,"287":1,"288":1,"289":2,"307":3,"309":2,"312":1,"313":1,"314":1,"315":1,"316":2,"317":2,"321":1,"322":3,"324":2,"325":5,"326":1,"327":2,"328":2,"329":2,"330":1,"335":1,"336":1,"338":2,"339":1,"340":1,"364":1,"376":2},"2":{"104":1,"105":1}}],["sessions",{"1":{"330":1}}],["ses",{"1":{"302":1,"305":2}}],["segment",{"1":{"231":1}}],["semaphores",{"1":{"139":1}}],["semantics",{"1":{"30":2,"181":1,"184":1}}],["several",{"1":{"74":1,"167":1,"217":1,"250":1,"294":1,"296":1}}],["sequencediagram",{"1":{"87":1}}],["sequence",{"1":{"60":3,"98":1,"102":1,"123":1,"153":1}}],["separately",{"1":{"62":1,"377":2}}],["separate",{"1":{"55":1,"62":2,"73":2,"106":1,"123":2,"158":1,"164":1,"173":1,"216":1,"240":1,"247":1,"248":1,"278":1,"297":3,"298":1,"307":3,"309":2,"377":1}}],["separated",{"1":{"38":1}}],["separation",{"1":{"7":1}}],["select",{"1":{"164":1,"214":2,"216":3,"218":1,"328":1,"367":2,"374":3}}],["selection",{"1":{"354":1}}],["selective",{"1":{"158":1}}],["selectively",{"1":{"158":1}}],["selecting",{"1":{"32":1,"136":1}}],["selects",{"1":{"46":1}}],["selected",{"1":{"36":1,"46":1,"136":1}}],["self",{"0":{"4":1,"183":1,"189":1,"202":1,"223":1},"1":{"0":2,"4":4,"6":2,"7":3,"21":4,"68":2,"164":1,"167":1,"173":2,"174":1,"175":2,"178":2,"179":1,"180":1,"182":4,"184":2,"185":1,"187":1,"189":3,"200":1,"202":4,"222":3,"223":2,"224":4,"225":2,"226":1,"248":1,"253":1,"268":1,"275":1,"277":1,"279":1,"280":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["secure",{"1":{"243":3,"246":1,"252":1}}],["security",{"0":{"187":1,"256":1},"1":{"182":4,"185":3,"186":2,"187":1,"188":1,"189":2,"256":5}}],["sec",{"1":{"175":1,"177":2}}],["secret=<webhook",{"1":{"374":1}}],["secret=<client",{"1":{"374":1}}],["secret>",{"1":{"374":2}}],["secret`",{"1":{"258":1,"260":2,"265":1}}],["secretref",{"1":{"246":2}}],["secretname",{"1":{"241":2}}],["secret2",{"1":{"241":2}}],["secret2=$",{"1":{"238":1}}],["secret1",{"1":{"241":2}}],["secret1=$",{"1":{"238":1}}],["secrettargetref",{"1":{"130":1}}],["secrets`",{"1":{"260":1}}],["secrets=",{"1":{"252":1,"253":1}}],["secrets",{"1":{"46":1,"154":1,"189":3,"238":2,"241":2,"246":4,"252":1,"260":1,"374":1}}],["secret",{"1":{"46":5,"130":1,"214":4,"216":4,"218":2,"219":2,"220":2,"221":2,"238":1,"241":1,"246":2,"253":2,"258":1,"260":2,"265":1,"304":1,"305":1,"374":8},"2":{"238":1}}],["secondextra",{"1":{"153":1}}],["secondmiddleware",{"1":{"153":2}}],["secondary",{"1":{"50":4,"52":2}}],["seconds`",{"1":{"146":1,"268":2,"272":1,"307":2,"322":4}}],["seconds=60",{"1":{"307":2,"322":4}}],["seconds=1",{"1":{"121":1}}],["seconds=10",{"1":{"60":1,"66":1,"110":1}}],["seconds=30",{"1":{"113":1}}],["seconds=sleep",{"1":{"89":2,"116":1}}],["seconds=4",{"1":{"66":1}}],["seconds=5",{"1":{"65":1,"94":1,"95":1,"96":1,"109":1}}],["seconds",{"1":{"38":3,"60":4,"65":6,"66":5,"89":2,"108":1,"109":1,"112":1,"116":1,"118":1,"146":5,"210":2,"265":1,"268":6,"289":1,"307":2,"316":4,"322":4,"333":1,"345":1},"2":{"89":1}}],["second",{"1":{"26":1,"37":1,"38":1,"39":1,"65":4,"66":3,"68":2,"72":1,"79":1,"82":4,"89":1,"109":1,"110":1,"113":1,"138":1,"153":1,"154":1,"155":1,"158":1,"160":1,"162":2,"288":2,"289":3,"290":1,"292":1,"308":1,"325":2,"333":1},"2":{"39":1,"65":1,"66":1,"68":1,"82":2,"109":1,"110":1,"113":1,"138":1,"155":1,"160":1,"162":1}}],["section",{"1":{"8":1,"10":1,"11":1,"12":1,"21":1,"255":1,"286":2,"374":4}}],["sensitive",{"1":{"157":1}}],["sense",{"1":{"16":1,"19":1,"209":2}}],["sentry",{"1":{"258":8}}],["sent",{"1":{"46":1,"54":1,"140":1,"167":1,"295":1,"309":3}}],["sendgrid",{"1":{"302":1,"305":3}}],["sender",{"1":{"46":2,"47":1,"304":2}}],["send",{"0":{"167":1},"1":{"44":1,"109":1,"119":1,"142":1,"144":2,"157":1,"167":1,"210":1,"214":1,"215":1,"216":2,"217":3,"219":1,"220":2,"221":1,"241":2,"274":2,"289":1,"294":1,"298":1,"305":1,"309":5,"312":2,"318":1,"322":2,"324":2,"342":1},"2":{"157":1}}],["sending",{"1":{"21":1,"25":1,"27":2,"32":1,"34":1,"38":1,"44":1,"121":1,"144":1,"159":1,"294":1,"305":1,"318":3,"342":1}}],["sends",{"1":{"18":1,"34":1,"67":1,"144":3,"170":1,"213":1,"216":1,"220":1}}],["serialized",{"1":{"157":2},"2":{"157":1}}],["serializable",{"1":{"15":1,"309":1}}],["served",{"1":{"243":2}}],["serves",{"1":{"73":1}}],["server`",{"1":{"358":1}}],["serverauthbasicauthenabled`",{"1":{"243":1}}],["serverauthbasicauthenabled",{"1":{"243":1}}],["serverauthsetemailverified`",{"1":{"243":1}}],["serverauthsetemailverified",{"1":{"243":1}}],["serverauthcookieinsecure`",{"1":{"243":1}}],["serverauthcookieinsecure",{"1":{"243":1}}],["serverauthcookiedomain`",{"1":{"243":1}}],["serverauthcookiedomain",{"1":{"243":1}}],["serverurl`",{"1":{"243":1}}],["serverurl",{"1":{"243":1}}],["serverless",{"1":{"175":1}}],["server",{"0":{"11":1,"272":1,"302":1},"1":{"11":1,"62":1,"109":1,"157":1,"158":2,"162":6,"173":2,"174":2,"176":1,"207":1,"223":1,"224":1,"225":22,"226":1,"227":10,"230":2,"234":2,"238":6,"240":10,"241":19,"243":5,"250":1,"251":1,"252":10,"253":12,"254":5,"262":4,"264":6,"265":4,"268":10,"270":4,"272":3,"273":2,"291":1,"294":20,"295":1,"301":2,"302":1,"304":9,"305":1,"317":1,"341":1,"360":1,"361":1,"362":21,"372":4,"374":9,"376":5},"2":{"162":2}}],["serviceport",{"1":{"241":4}}],["servicename",{"1":{"241":3}}],["services`",{"1":{"254":1}}],["services",{"0":{"55":1},"1":{"44":1,"57":1,"58":1,"74":1,"79":2,"106":1,"147":1,"173":1,"174":1,"176":2,"182":3,"189":1,"201":1,"212":1,"225":2,"227":3,"231":1,"240":4,"243":4,"248":1,"249":1,"254":1,"278":1,"279":1,"285":1,"297":3,"318":1,"342":1,"358":1,"372":1,"377":2}}],["service",{"1":{"0":1,"17":1,"37":1,"55":5,"60":1,"72":2,"74":1,"79":1,"82":2,"112":1,"113":1,"173":1,"188":1,"223":1,"225":4,"226":1,"227":8,"230":2,"231":1,"234":2,"238":1,"240":2,"243":2,"253":1,"254":1,"255":1,"264":2,"268":1,"297":5,"330":1,"372":2}}],["searchability",{"1":{"151":1}}],["searching",{"1":{"149":1}}],["search",{"1":{"11":1,"106":1,"150":1,"337":2,"344":1}}],["seed",{"1":{"243":3,"372":2}}],["seeding",{"1":{"243":2,"251":2,"255":4}}],["seeing",{"0":{"210":1},"1":{"177":2,"294":1,"295":1,"297":1}}],["sees",{"1":{"152":1,"158":1}}],["see",{"1":{"7":2,"9":1,"15":1,"16":4,"21":2,"47":1,"58":1,"65":1,"90":2,"92":1,"110":1,"113":1,"115":1,"132":1,"134":1,"158":1,"166":1,"167":1,"168":1,"171":1,"182":2,"189":1,"206":1,"207":1,"214":2,"216":1,"218":2,"219":1,"228":1,"234":2,"238":1,"249":1,"253":1,"279":3,"280":1,"286":3,"292":1,"294":1,"295":1,"296":2,"297":1,"316":2,"317":3,"322":3,"370":1,"371":1}}],["setauthtag",{"1":{"157":1},"2":{"157":1}}],["sethours",{"1":{"35":1}}],["sets",{"1":{"34":1,"118":1,"137":1,"243":2,"278":1,"348":1}}],["settings",{"0":{"296":1},"1":{"21":1,"174":1,"183":1,"195":1,"205":1,"214":1,"216":3,"219":1,"220":1,"221":1,"228":1,"243":2,"251":1,"253":1,"255":1,"294":11,"296":3,"304":1,"305":1,"307":4,"362":1,"364":1,"374":4}}],["setting",{"0":{"84":1,"132":1,"244":1,"354":1,"364":1},"1":{"7":1,"21":1,"74":2,"84":1,"130":1,"132":1,"133":1,"136":1,"213":1,"214":1,"243":2,"244":1,"268":1,"291":1,"295":1,"296":1,"298":1,"301":1,"357":1,"377":2}}],["set",{"0":{"209":1},"1":{"7":6,"20":1,"21":2,"22":2,"34":1,"35":1,"36":1,"38":1,"39":1,"40":1,"46":4,"57":1,"61":1,"62":1,"70":1,"72":2,"81":1,"83":2,"84":4,"118":1,"126":1,"127":1,"132":2,"134":1,"135":2,"136":2,"144":1,"145":1,"146":3,"154":1,"162":3,"164":1,"178":1,"206":1,"209":1,"216":1,"220":1,"225":2,"233":1,"234":1,"236":1,"243":8,"245":1,"248":1,"253":1,"255":1,"260":2,"268":3,"278":2,"289":1,"291":1,"293":1,"294":2,"296":1,"299":1,"304":1,"307":1,"309":5,"312":2,"315":4,"316":12,"318":2,"323":2,"324":2,"326":1,"333":3,"335":1,"353":1,"354":7,"355":1,"357":1,"358":3,"364":1,"365":2,"366":1,"372":2,"374":3,"376":4}}],["setups",{"1":{"55":1,"173":1,"177":1}}],["setup",{"0":{"144":1,"214":1,"216":1,"218":1,"290":1,"347":1,"374":1},"1":{"5":1,"7":1,"10":1,"46":1,"142":2,"156":1,"158":1,"177":1,"207":1,"212":1,"217":1,"224":1,"227":3,"228":2,"234":1,"238":1,"241":1,"251":1,"268":4,"286":4,"301":1,"307":1,"347":1,"364":1,"372":2}}],["pwd",{"1":{"238":1}}],["pgx",{"1":{"263":2}}],["pgxstats",{"1":{"263":2}}],["pgbouncer",{"1":{"255":5}}],["pg",{"1":{"225":2,"227":1,"231":1}}],["p95",{"1":{"177":1}}],["phantom",{"0":{"171":1}}],["pnpm",{"1":{"127":10,"364":2}}],["p",{"1":{"87":4,"238":1,"289":2,"307":4,"322":4,"362":2}}],["pem",{"1":{"374":1}}],["penetration",{"1":{"187":1,"188":1}}],["pending",{"1":{"69":1,"124":3,"132":1,"134":1,"136":1,"254":2}}],["personal",{"1":{"374":1}}],["persistence",{"1":{"35":1,"40":1,"73":1}}],["persistent",{"1":{"35":1}}],["persisted",{"1":{"0":1,"73":1,"123":1,"176":1}}],["persist",{"1":{"24":1}}],["persists",{"1":{"13":1}}],["permanently",{"1":{"320":4,"324":4}}],["permitted",{"1":{"265":2}}],["permissions",{"1":{"154":1,"189":1,"247":2,"374":1}}],["percentile",{"1":{"268":1}}],["period=720h",{"1":{"291":1}}],["period`",{"1":{"257":1}}],["period",{"1":{"225":2,"227":1,"231":1,"257":1,"266":2,"294":12}}],["periods",{"1":{"171":1}}],["periodically",{"1":{"23":1}}],["performed",{"1":{"176":1,"190":1,"191":2,"193":2}}],["performance",{"0":{"177":1,"292":1},"1":{"137":1,"158":1,"167":1,"177":3,"268":1,"286":1,"287":1,"290":1,"292":3,"294":3,"295":1,"296":2,"298":2}}],["performing",{"1":{"121":1,"139":1,"294":1}}],["performs",{"1":{"69":1}}],["perform",{"1":{"34":1,"69":1,"73":1,"74":1,"102":1,"121":3,"307":1,"316":4,"322":1,"331":1,"345":1}}],["perfectly",{"1":{"22":1,"26":1,"94":1,"209":1}}],["per",{"1":{"5":2,"11":1,"12":1,"36":1,"74":1,"79":2,"80":1,"100":1,"106":2,"117":2,"121":1,"177":1,"188":1,"268":1,"288":4,"289":1,"291":1,"292":1,"293":1,"294":2,"317":3,"338":1,"341":2}}],["pycache",{"1":{"364":1}}],["pycon",{"1":{"325":1}}],["pylon",{"1":{"258":7}}],["py`",{"1":{"127":1,"141":2}}],["pyproject",{"1":{"127":1},"2":{"127":1}}],["pydantic",{"0":{"326":1},"1":{"24":1,"25":1,"55":1,"94":1,"307":4,"322":1,"326":8,"331":4}}],["py",{"1":{"21":1,"127":2,"141":4,"364":2,"367":1},"2":{"21":1,"127":1,"141":3,"364":1,"367":1}}],["python3",{"1":{"127":1}}],["pythonunbuffered=1",{"1":{"127":2}}],["python",{"0":{"62":1,"272":1,"307":1},"1":{"0":1,"14":1,"16":5,"20":1,"21":2,"24":2,"25":2,"35":5,"39":2,"40":6,"43":1,"48":1,"50":1,"52":4,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"62":6,"65":1,"68":1,"72":5,"74":1,"78":1,"93":1,"94":2,"95":1,"96":1,"97":1,"104":3,"105":4,"109":2,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":2,"121":1,"125":1,"127":8,"135":1,"140":1,"144":1,"146":20,"152":2,"153":1,"154":1,"155":1,"156":1,"157":1,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"274":2,"294":2,"306":2,"316":2,"317":2,"325":3,"327":1,"329":1,"331":1,"364":2,"367":1,"377":6},"2":{"127":1}}],["pkg",{"1":{"16":1}}],["pubsub",{"1":{"254":1}}],["pubs`",{"1":{"254":1}}],["publishes",{"1":{"289":1}}],["published",{"1":{"161":2}}],["publishing",{"0":{"160":1}}],["publish",{"1":{"101":1,"207":1}}],["public>",{"1":{"252":1}}],["public",{"1":{"28":1,"36":1,"41":1,"238":2,"241":4,"243":1,"252":3,"253":2,"259":4,"289":1,"369":1,"370":1,"371":1,"376":2},"2":{"238":1,"252":1,"253":1}}],["put`",{"1":{"315":2}}],["putobjectcommand",{"1":{"157":2}}],["put",{"1":{"82":2,"160":2,"172":1,"189":1,"309":2,"315":4},"2":{"82":1,"160":2}}],["putstream",{"1":{"160":2,"333":1},"2":{"160":1}}],["puts",{"1":{"14":1,"35":1,"39":1,"43":1,"52":1,"53":1,"68":1,"74":2,"81":1,"82":1,"84":1,"95":1,"96":1,"97":1,"104":2,"105":4,"109":2,"112":3,"116":1,"120":1,"121":1,"138":2,"214":1,"216":1,"219":1,"220":1,"221":1}}],["pulls",{"1":{"316":4}}],["pullrequest",{"1":{"216":3},"2":{"216":2}}],["pull",{"1":{"45":1,"212":1,"215":1,"216":15,"229":2,"278":1,"374":1},"2":{"216":3}}],["pushes",{"1":{"45":1,"145":2,"212":1,"215":1}}],["pushed",{"1":{"25":1,"31":1,"112":1,"113":1}}],["push",{"1":{"25":1,"48":5,"51":1,"52":9,"104":1,"105":1,"111":1,"112":5,"113":5,"135":1,"136":1,"140":1,"145":2,"149":6,"216":1,"294":2,"307":1},"2":{"48":2,"52":3,"104":1,"105":1,"112":2,"113":2,"135":1,"136":1,"149":2,"294":1}}],["pushing",{"0":{"48":1},"1":{"16":2,"25":2,"48":1,"52":2,"111":2,"112":2,"113":2,"149":1,"175":1,"209":1}}],["purposes",{"1":{"121":1,"149":1,"358":1,"363":1,"366":1}}],["purpose",{"1":{"3":1,"73":1,"88":2,"301":1,"372":1}}],["pluggable",{"1":{"142":1}}],["please",{"1":{"7":2,"177":1,"238":1,"294":1}}],["playbooks",{"1":{"207":1}}],["plain",{"1":{"158":2,"162":3,"303":1}}],["plaintext",{"1":{"157":4,"158":1}}],["place",{"1":{"52":1,"124":1,"132":1,"278":1,"374":1}}],["places",{"1":{"18":1}}],["planned",{"1":{"198":1}}],["planning",{"1":{"182":1}}],["plans",{"1":{"190":1}}],["plane",{"1":{"157":1,"180":1,"222":2,"223":1,"224":1}}],["plan",{"1":{"4":1,"132":1}}],["platform",{"1":{"0":2,"3":1,"29":1,"146":1,"170":1}}],["popular",{"1":{"325":1}}],["pop",{"1":{"305":1}}],["pods",{"1":{"234":2,"238":1}}],["pod",{"1":{"234":4,"238":2,"289":2}}],["port=8080",{"1":{"253":1}}],["port=8002`",{"1":{"146":1}}],["port=$",{"1":{"234":2,"238":1}}],["ports",{"1":{"225":3,"227":4,"231":1,"234":2,"238":1,"362":1}}],["port`",{"1":{"146":1,"254":3,"255":1,"270":1,"272":1,"376":3}}],["port",{"1":{"146":3,"162":1,"225":2,"234":5,"238":3,"245":1,"252":1,"254":3,"255":1,"268":1,"270":3,"272":1,"304":1,"362":9,"374":1,"376":2}}],["portion",{"1":{"100":1}}],["portions",{"1":{"32":1}}],["potential",{"1":{"132":1}}],["poetry",{"1":{"127":11,"364":2,"367":1},"2":{"127":2}}],["pool=pool",{"1":{"328":1}}],["pooling",{"0":{"293":1}}],["pool",{"1":{"123":2,"255":2,"293":1,"299":2,"328":5},"2":{"328":1}}],["pools",{"1":{"82":1,"123":2,"128":1,"327":1}}],["power",{"1":{"95":1}}],["powerful",{"1":{"71":1,"72":1,"74":1}}],["powering",{"1":{"32":1,"173":1}}],["possibly",{"1":{"240":1}}],["possible",{"1":{"30":1,"37":1,"38":1,"42":1,"133":1,"139":1,"158":2,"279":1,"294":1,"297":1,"316":2,"317":2,"332":1}}],["positional",{"1":{"94":1,"155":1,"330":1}}],["positive",{"1":{"52":1}}],["postmark",{"1":{"265":10}}],["posthog",{"1":{"258":10}}],["postgres",{"0":{"245":1},"1":{"225":26,"226":1,"227":19,"231":9,"245":10,"247":2,"248":1,"252":3,"255":6,"277":1,"293":1,"296":2,"328":1},"2":{"225":1,"227":1}}],["postgresql",{"1":{"4":1,"173":1,"174":2,"175":1,"176":1,"177":2,"182":1,"189":1,"223":1,"224":1,"225":4,"227":1,"231":1,"251":1,"252":1,"253":2,"255":8,"277":4,"293":1,"298":1,"362":1}}],["posted",{"1":{"219":1}}],["posture",{"1":{"189":1}}],["post",{"1":{"29":1,"90":1,"220":1,"325":1,"345":2}}],["poll",{"1":{"265":2}}],["polling",{"1":{"10":1,"254":4}}],["polyglot",{"1":{"55":1,"175":1}}],["polyrepo",{"1":{"55":1}}],["policy",{"1":{"37":1,"42":1,"56":1,"158":1}}],["policies",{"0":{"56":1},"1":{"17":1,"34":1,"38":1,"64":1,"92":2,"173":1,"174":1,"176":2,"182":1,"188":1,"189":1}}],["points",{"1":{"145":1,"185":1}}],["pointing",{"1":{"10":1,"11":1,"12":1,"183":1}}],["point",{"1":{"3":1,"20":1,"30":1,"102":1,"126":2,"209":1,"225":2,"277":1,"286":1,"340":1}}],["panes",{"1":{"372":1}}],["panic",{"1":{"294":1}}],["pagination",{"1":{"316":6,"317":2,"322":2,"324":2}}],["pagination`",{"1":{"316":8}}],["pages",{"1":{"28":1}}],["page",{"0":{"201":1},"1":{"1":1,"9":1,"11":4,"12":4,"172":1,"179":1,"180":1,"185":1,"200":1,"201":1,"208":1,"228":1,"284":1,"286":1,"369":1,"374":2}}],["packet",{"1":{"170":1}}],["packages",{"1":{"144":1}}],["package",{"0":{"141":1},"1":{"10":1,"55":1,"127":6,"140":1,"144":1,"237":2},"2":{"127":1}}],["pair",{"1":{"149":4,"317":2}}],["pairs",{"1":{"135":1,"149":1,"150":1,"246":1,"317":2,"333":1,"338":1}}],["paused",{"1":{"254":2,"343":3}}],["pause",{"1":{"88":1,"108":1,"110":1,"111":1,"112":1,"124":1,"343":3}}],["pauses",{"1":{"86":1,"108":1,"109":1,"333":2}}],["paying",{"1":{"83":1}}],["payment",{"1":{"46":5,"212":1,"214":13},"2":{"214":1}}],["payments",{"1":{"45":1,"213":1,"214":1}}],["payload`",{"1":{"318":4}}],["payload=",{"1":{"52":4,"294":1}}],["payload",{"0":{"53":1},"1":{"25":1,"46":14,"48":1,"52":7,"53":3,"54":2,"88":1,"89":1,"113":1,"149":1,"157":5,"167":2,"214":1,"216":3,"217":1,"220":1,"221":1,"289":2,"292":1,"294":3,"312":2,"318":12,"322":2,"324":2,"333":5,"341":1},"2":{"52":1,"53":2}}],["payloads`",{"1":{"316":2}}],["payloadsize",{"1":{"289":1}}],["payloads",{"1":{"16":1,"115":1,"152":1,"157":2,"177":1,"220":1,"221":1,"316":2}}],["pass",{"1":{"118":1,"141":3,"142":1,"144":1,"153":4,"155":1,"157":2,"225":1,"227":1,"241":1,"322":1,"323":6,"324":7,"328":1,"330":4,"365":1}}],["passes",{"1":{"112":1,"154":1}}],["passed",{"1":{"15":1,"46":2,"55":1,"92":1,"142":1,"152":1,"155":1,"308":1,"309":1,"333":1}}],["password`",{"1":{"254":1,"255":2,"264":1,"265":1,"268":1}}],["password=your",{"1":{"252":1,"304":1}}],["password=hatchet",{"1":{"225":2,"227":1,"231":1}}],["password",{"1":{"46":2,"225":3,"227":3,"234":1,"243":7,"245":1,"247":2,"252":2,"254":1,"255":2,"264":1,"265":1,"268":1,"303":1,"304":2,"305":3}}],["passing",{"1":{"40":1,"43":2,"69":1,"74":1,"126":1,"345":1}}],["paste",{"1":{"214":1,"216":1,"219":1,"221":1}}],["past",{"1":{"32":1,"38":2,"72":1,"209":1,"216":1,"268":1,"314":2}}],["patchschedulecreate",{"1":{"341":1}}],["patching",{"1":{"188":1}}],["patchlevel=",{"1":{"146":1}}],["pattern",{"0":{"100":1},"1":{"50":1,"81":1,"98":1,"100":3,"101":1,"106":1,"117":1,"122":1,"152":1,"155":1,"158":1}}],["patterns",{"0":{"101":1,"106":1},"1":{"13":1,"31":1,"33":1,"80":1,"85":4,"90":1,"100":2,"115":2,"120":1,"123":2,"124":1,"157":1,"158":1,"181":1,"322":2,"345":2}}],["path=<path",{"1":{"374":1}}],["path=",{"1":{"268":1}}],["path`",{"1":{"264":1,"268":2,"374":1}}],["paths",{"1":{"114":1,"117":1,"241":2}}],["path",{"1":{"11":1,"115":2,"120":1,"158":1,"175":1,"180":1,"241":3,"252":3,"259":3,"262":3,"264":1,"268":2,"273":3,"277":1,"279":1,"331":1,"374":1,"376":3}}],["parsing",{"1":{"216":1,"326":1}}],["parse",{"1":{"157":1,"333":1},"2":{"157":1}}],["parses",{"1":{"106":1,"220":1}}],["partway",{"1":{"124":1}}],["parts",{"1":{"100":1}}],["partial",{"1":{"120":1}}],["participant",{"1":{"87":3}}],["particular",{"1":{"46":1}}],["particularly",{"1":{"3":1,"24":1,"27":1,"32":1,"74":1,"75":1,"76":1,"77":1,"294":1}}],["partitioned",{"1":{"81":1}}],["party",{"1":{"44":1,"187":1,"318":1,"342":1}}],["part",{"1":{"32":1,"39":1,"54":1,"81":1,"82":1,"94":1,"121":1,"149":1,"158":1,"237":1,"307":4,"322":6,"324":2,"333":1,"376":1}}],["parantheses",{"1":{"268":1}}],["parameter",{"1":{"43":1,"130":1,"154":3,"196":1,"309":1,"317":1,"326":1,"331":1,"332":11,"333":17,"334":4,"335":5,"336":1,"338":2,"339":2,"340":9,"341":7,"342":5,"343":4,"344":4,"345":11,"346":5}}],["parameters",{"1":{"21":1,"28":1,"36":1,"41":1,"65":1,"152":1,"153":1,"154":1,"155":1,"196":1,"249":1,"252":1,"307":4,"309":6,"310":1,"311":8,"312":10,"313":2,"314":2,"315":2,"316":25,"317":13,"318":10,"319":4,"320":8,"322":21,"323":2,"324":22,"329":1,"332":9,"333":17,"334":6,"335":5,"336":1,"338":2,"339":2,"340":9,"341":9,"342":5,"343":4,"344":4,"345":11,"346":3}}],["parallel",{"1":{"3":2,"5":1,"43":2,"92":1,"101":1,"104":2,"105":3,"106":1,"107":1,"139":1,"289":1,"294":1,"295":1,"333":2}}],["parallelization",{"1":{"3":1}}],["parentworkflowrunid",{"1":{"333":1}}],["parent=wait",{"1":{"117":2}}],["parentcondition",{"1":{"117":8},"2":{"117":2}}],["parentinput",{"1":{"104":2,"105":8,"294":1,"326":2}}],["parenttask",{"1":{"96":1}}],["parentoutputtype",{"1":{"96":1}}],["parentoutput",{"1":{"95":2,"96":4,"105":5,"117":12,"333":1},"2":{"95":1,"96":1,"117":1}}],["parentsinglechild",{"1":{"104":1,"105":1}}],["parents=",{"1":{"95":1,"96":1,"105":1,"110":1,"113":2,"117":3,"118":1,"133":1,"326":1}}],["parents",{"1":{"92":2,"95":4,"96":3,"105":2,"110":2,"113":4,"117":6,"118":2,"124":1,"133":1,"149":1,"322":4}}],["parent",{"0":{"96":1,"117":1},"1":{"20":2,"24":1,"43":3,"87":2,"88":1,"90":1,"96":3,"103":2,"104":13,"105":22,"106":1,"107":1,"110":3,"113":1,"117":9,"118":6,"120":2,"123":2,"124":4,"133":2,"145":3,"294":3,"301":2,"309":5,"314":4,"316":2,"317":6,"322":4,"323":2,"324":6,"326":4,"333":5,"338":2},"2":{"24":1,"105":2,"294":1,"326":1}}],["ping",{"1":{"146":1}}],["pinologger",{"1":{"142":2}}],["pino",{"1":{"142":2}}],["pip",{"1":{"127":2,"144":1}}],["pipeline",{"1":{"85":1,"100":2,"101":4,"105":1}}],["pipelines",{"0":{"98":1},"1":{"2":1,"25":1,"26":1,"86":1,"98":2,"100":1,"111":1}}],["pieces",{"1":{"173":1,"204":1}}],["piece",{"1":{"30":1}}],["picks",{"1":{"85":1,"123":1,"124":1}}],["picked",{"1":{"83":1,"132":1}}],["pick",{"1":{"2":1,"3":1,"123":1,"189":1,"209":1,"210":1,"216":1,"333":1}}],["prs",{"1":{"374":3}}],["prnumber",{"1":{"216":3}}],["pr=pr",{"1":{"216":1}}],["pr",{"1":{"216":19,"374":1}}],["practical",{"0":{"157":1},"1":{"189":1,"203":1}}],["practices",{"0":{"69":1,"99":1},"1":{"69":1,"71":1,"99":1,"115":1,"186":1}}],["practice",{"1":{"30":1,"161":1,"174":1,"183":1}}],["preload=",{"1":{"369":1,"370":1,"371":1}}],["precmds",{"1":{"364":3}}],["precedence",{"1":{"46":1,"278":1,"358":1}}],["pressing",{"1":{"368":1}}],["pressure",{"1":{"210":1,"316":4}}],["presigner",{"1":{"157":1}}],["presets",{"1":{"46":1}}],["prevents",{"1":{"123":1}}],["preventing",{"1":{"75":1,"123":1}}],["prevent",{"1":{"74":2,"79":1,"254":2,"279":1,"325":1}}],["previously",{"1":{"153":1}}],["previous",{"1":{"5":1,"10":1,"30":1,"39":2,"46":1,"98":1,"153":3,"279":2,"280":1}}],["prepare",{"1":{"43":1,"101":1,"294":1}}],["pre",{"0":{"98":1},"1":{"38":1,"46":2,"61":1,"82":1,"155":1,"345":1}}],["predefined",{"1":{"38":1,"100":1}}],["predetermined",{"1":{"34":1}}],["predictable",{"1":{"15":1,"91":1,"98":1}}],["prefixes",{"0":{"251":1},"1":{"214":1,"250":1,"251":1}}],["prefixed",{"1":{"24":1}}],["prefix",{"1":{"46":1,"270":1}}],["preferable",{"1":{"316":2,"317":2}}],["prefer",{"1":{"17":1,"100":1,"216":1,"279":2}}],["prerequisites",{"0":{"233":1,"236":1,"282":1,"303":1,"359":1},"1":{"225":1,"227":1}}],["prerequisite",{"1":{"9":1}}],["premise",{"1":{"4":1}}],["prisma",{"1":{"372":1,"373":1}}],["privilege",{"1":{"189":1}}],["private>",{"1":{"252":1}}],["private",{"1":{"188":1,"189":1,"198":3,"238":2,"241":4,"252":3,"253":2,"259":4,"374":7},"2":{"238":1,"252":1,"253":1}}],["prio",{"1":{"84":4}}],["prior",{"1":{"80":1,"82":1}}],["prioritized",{"1":{"83":1,"136":2}}],["prioritize",{"1":{"76":1}}],["prioritization",{"1":{"75":1}}],["priorities",{"1":{"3":1,"83":1,"84":2,"174":1,"176":1}}],["priority`",{"1":{"307":3}}],["priority=priority",{"1":{"84":4}}],["priority=default",{"1":{"84":1}}],["prioritywf",{"1":{"84":1}}],["priorityworkflow",{"1":{"84":2}}],["priority",{"0":{"83":1,"84":1},"1":{"3":1,"17":2,"83":11,"84":57,"129":1,"176":1,"307":7,"309":3,"311":2,"316":2,"322":2,"324":2,"333":2,"334":1,"341":1},"2":{"84":12}}],["pricing",{"1":{"38":1}}],["priceless",{"1":{"30":1}}],["printf",{"1":{"39":1,"59":1,"65":1,"66":2,"68":3,"81":1,"104":1,"105":5,"109":2,"112":2,"116":2,"120":1,"121":3,"214":1,"216":1,"219":1,"220":1,"221":1},"2":{"39":1,"59":1,"65":1,"66":1,"68":1,"81":1,"104":1,"105":2,"109":1,"112":1,"116":1,"120":1,"121":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["println",{"1":{"25":2,"53":1,"55":1,"65":1,"66":3,"68":1,"82":1,"138":2,"161":1,"162":1},"2":{"25":1,"53":1,"55":1,"65":1,"66":1,"68":1,"82":1,"138":1,"161":1,"162":1}}],["print",{"1":{"14":1,"35":1,"43":1,"53":1,"55":1,"68":1,"81":1,"82":1,"89":5,"104":1,"105":4,"109":1,"112":1,"120":1,"121":1,"138":2,"161":3,"214":1,"216":1,"219":1,"220":1,"221":1,"326":3,"328":4},"2":{"161":1}}],["prints",{"1":{"11":1}}],["printed",{"1":{"7":1,"161":1}}],["primary",{"1":{"2":1,"90":1,"93":1,"120":1,"175":1,"297":1,"298":1}}],["prone",{"1":{"327":1}}],["probe",{"1":{"265":2}}],["problem",{"1":{"58":1}}],["problems",{"1":{"17":1,"211":1,"325":1}}],["provisioning",{"1":{"182":1}}],["providing",{"1":{"35":1,"48":1,"72":2}}],["providers",{"1":{"305":1}}],["provider`",{"1":{"144":1}}],["provider",{"0":{"305":1},"1":{"144":1,"248":1,"277":1,"279":1,"302":1,"303":1,"305":2}}],["provider=your",{"1":{"144":1}}],["provided",{"1":{"46":2,"52":1,"72":2,"81":1,"286":1,"307":6,"317":8,"322":4,"326":2,"328":1,"333":2,"334":1,"341":1}}],["provide",{"1":{"7":1,"15":1,"21":1,"28":1,"46":4,"47":1,"69":1,"72":3,"81":1,"145":1,"154":2,"158":1,"316":4,"317":1,"326":1,"331":3,"352":2}}],["provides",{"1":{"0":1,"15":1,"16":2,"29":1,"46":1,"55":1,"56":1,"67":1,"72":3,"74":1,"82":1,"128":1,"149":1,"154":1,"164":1,"208":1,"268":2,"286":1,"307":1,"308":1,"318":1,"331":1,"332":1,"333":3,"348":1,"358":1,"363":1,"368":1,"369":1}}],["proxy",{"1":{"162":2,"234":1,"240":2,"241":4,"243":1}}],["procs",{"1":{"152":1}}],["procedure",{"1":{"279":1}}],["procedures",{"1":{"202":1}}],["procedurally",{"1":{"105":1}}],["procedural",{"0":{"115":1},"1":{"86":1,"101":1}}],["proceeding",{"1":{"114":1,"154":1,"158":1}}],["proceed",{"1":{"104":2,"105":2,"113":1,"116":1,"118":6,"120":2}}],["proceeds",{"1":{"101":1}}],["processinput",{"1":{"115":1}}],["processing",{"1":{"5":1,"25":1,"26":1,"69":1,"76":1,"98":1,"106":2,"107":1,"109":1,"112":1,"116":1,"139":1,"268":1,"316":4,"332":1}}],["process2",{"1":{"105":2,"326":1}}],["processed`",{"1":{"118":1}}],["processed",{"1":{"14":1,"73":1,"75":2,"84":1,"109":1,"112":2,"116":1,"131":1,"133":1,"295":2,"301":1}}],["processes",{"1":{"2":1,"19":1,"171":2,"173":1,"174":1,"182":1,"223":1}}],["process",{"1":{"5":1,"14":1,"18":1,"25":1,"38":1,"75":1,"101":4,"105":13,"106":1,"109":2,"115":1,"138":10,"146":2,"157":4,"158":2,"161":1,"170":1,"210":2,"272":1,"316":4,"325":1,"326":6},"2":{"157":3,"161":1}}],["prompts",{"1":{"354":1}}],["prompt",{"1":{"352":1,"367":1}}],["promql",{"1":{"268":4}}],["prometheusserverpassword`",{"1":{"268":1}}],["prometheusserverusername`",{"1":{"268":1}}],["prometheusserverurl`",{"1":{"268":1}}],["prometheus",{"0":{"147":1,"268":1},"1":{"146":3,"147":2,"148":4,"264":12,"268":24,"314":14,"338":2}}],["promise<unknown>",{"1":{"157":1}}],["promise<string>",{"1":{"157":1}}],["promises",{"1":{"30":1,"104":3,"105":3},"2":{"104":1,"105":1}}],["promise",{"1":{"24":1,"104":1,"105":1,"135":1,"136":1,"162":1,"332":3,"333":5,"334":4,"335":5,"336":1,"339":2,"340":6,"341":7,"342":5,"343":5,"344":3,"345":5},"2":{"104":1,"105":1,"135":1,"136":1}}],["prod`",{"1":{"241":1}}],["prod",{"1":{"127":1,"241":2}}],["producer",{"1":{"145":3}}],["produce",{"1":{"102":2,"216":1}}],["production`",{"1":{"220":1}}],["production",{"0":{"5":1},"1":{"5":1,"21":1,"46":2,"127":5,"157":1,"180":1,"182":1,"203":1,"224":4,"226":1,"243":9,"247":1,"248":1,"275":1,"276":1,"279":2,"280":2,"298":1,"351":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["propagated",{"1":{"149":1}}],["propagates",{"1":{"145":1}}],["propagating",{"1":{"69":1}}],["propagation",{"1":{"69":1,"71":1,"145":1}}],["properties",{"1":{"136":1,"332":1,"337":1}}],["property",{"1":{"35":1,"39":2,"40":1,"50":1,"58":2,"131":1,"132":2,"133":1,"136":2,"309":3,"332":2,"337":1,"345":4}}],["proper",{"1":{"71":1,"126":1,"164":1}}],["properly",{"1":{"55":2}}],["prop",{"1":{"39":2}}],["programmatic",{"1":{"269":1,"301":1}}],["programmatically",{"0":{"35":1,"40":1},"1":{"34":2,"35":1,"38":2,"40":1,"72":1,"193":1,"196":1,"307":2,"319":1,"320":1,"343":1,"344":1}}],["programming",{"1":{"58":1,"179":1}}],["progress`",{"1":{"67":1,"74":1,"76":2,"77":1}}],["progress",{"0":{"76":1},"1":{"32":1,"67":1,"74":1,"76":2,"77":1,"85":1,"87":1,"120":1,"123":2,"159":1,"162":1,"174":1,"284":1}}],["protection",{"1":{"74":1}}],["protect",{"1":{"22":1}}],["protocol",{"1":{"11":1,"62":1,"207":1,"241":1,"254":1,"305":1}}],["profile`",{"1":{"206":1,"348":1,"354":2,"355":1}}],["profiles",{"0":{"351":1,"353":1},"1":{"10":1,"348":3,"351":2,"353":2}}],["profile",{"0":{"352":1,"354":1,"355":1,"356":1,"357":1},"1":{"7":5,"260":1,"352":6,"353":3,"354":12,"355":4,"356":4,"357":5,"362":5,"365":2}}],["projects",{"1":{"372":1}}],["project",{"1":{"7":3,"10":3,"187":1,"348":1,"362":8,"364":4}}],["w3c",{"1":{"145":1}}],["wg",{"1":{"104":4,"105":4},"2":{"104":3,"105":3}}],["w",{"1":{"87":2,"105":2,"136":2,"138":1,"162":4,"173":3,"289":1,"294":1},"2":{"136":1,"162":1,"294":1}}],["wl",{"1":{"78":4}}],["whenever",{"1":{"330":1}}],["whether",{"1":{"52":1,"62":3,"87":1,"106":1,"108":1,"117":3,"118":1,"123":1,"136":1,"158":1,"259":1,"260":5,"264":1,"309":1,"316":4,"322":2,"324":2,"377":6}}],["whose",{"1":{"88":1,"100":2,"106":1,"309":2}}],["whichever",{"1":{"31":1,"89":1,"110":1,"113":1,"116":1,"118":2}}],["while",{"1":{"3":2,"5":1,"47":1,"55":1,"66":1,"73":1,"87":1,"90":1,"101":4,"103":2,"104":3,"108":2,"111":1,"112":1,"117":1,"119":1,"123":2,"132":1,"138":1,"155":1,"167":1,"173":1,"268":2,"298":1,"322":4,"324":4,"345":1}}],["whatever",{"1":{"30":1,"94":1,"171":1,"214":1,"216":1,"328":1}}],["www",{"1":{"30":1,"216":1,"277":1,"293":1,"325":1,"359":1}}],["won",{"1":{"216":1,"377":1}}],["wouldn",{"1":{"29":1}}],["worth",{"1":{"294":1}}],["world2",{"1":{"294":1}}],["world1",{"1":{"294":1}}],["world",{"1":{"24":5,"25":4,"35":2,"43":4,"48":1,"55":5,"97":1,"112":1,"113":1,"149":1,"177":1,"294":1,"322":1,"323":2,"324":2,"326":2,"345":3}}],["workload",{"1":{"128":1,"177":1,"209":1,"294":1}}],["workloads",{"1":{"3":1,"4":1,"197":1,"275":1,"279":1,"280":1,"288":1,"294":1,"298":1}}],["workdir",{"1":{"127":11}}],["workspace",{"1":{"218":1,"219":1}}],["works",{"0":{"57":1,"87":1},"1":{"55":1,"57":1,"75":1,"76":1,"77":1,"81":1,"82":1,"90":1,"113":1,"123":1,"124":1,"148":1,"157":1,"158":1,"218":1,"268":1,"322":1,"325":2,"331":1}}],["working",{"1":{"10":1,"68":1,"69":1,"276":1,"307":1,"325":2}}],["workflowversionid",{"1":{"333":1}}],["workflowv1",{"1":{"333":2}}],["workflowdeclaration",{"1":{"332":1,"345":2,"346":1}}],["workflowname",{"1":{"294":1,"333":1}}],["workflow`",{"1":{"145":3,"192":1,"307":1}}],["workflowjob",{"1":{"136":2,"294":1},"2":{"136":1,"294":1}}],["workflowruntriggerconfig",{"1":{"322":4,"324":4}}],["workflowrunbuffer",{"1":{"294":4}}],["workflowrun",{"1":{"161":1,"162":1},"2":{"161":1,"162":1}}],["workflowrunid`",{"1":{"154":1}}],["workflowrunid",{"1":{"121":1,"162":2,"333":1},"2":{"121":1}}],["workflowrunref",{"1":{"25":1,"322":2,"332":1,"345":2}}],["workflowinput",{"1":{"74":1,"78":1}}],["workflowids",{"1":{"335":1}}],["workflowid",{"1":{"52":1,"333":1}}],["workflow",{"0":{"84":1,"93":1,"97":1,"322":1},"1":{"7":3,"10":2,"13":1,"14":1,"16":1,"20":4,"24":2,"25":3,"35":5,"39":16,"40":4,"43":3,"46":1,"50":6,"51":2,"52":15,"53":2,"55":10,"58":2,"59":2,"60":2,"61":4,"65":1,"66":1,"68":5,"72":29,"74":6,"78":11,"81":3,"82":10,"83":1,"84":19,"87":1,"89":6,"90":2,"91":1,"92":2,"93":13,"94":12,"95":6,"96":4,"97":6,"100":3,"101":3,"104":6,"105":16,"106":1,"107":1,"110":3,"113":6,"115":1,"116":1,"117":13,"118":3,"120":5,"121":17,"124":1,"131":1,"132":15,"133":12,"134":1,"135":6,"136":16,"137":3,"138":9,"141":6,"142":13,"145":19,"149":2,"153":1,"154":2,"155":6,"156":1,"158":2,"161":4,"162":1,"164":1,"174":4,"175":2,"176":1,"181":1,"184":1,"192":2,"193":1,"206":1,"207":1,"223":4,"225":1,"266":2,"268":14,"276":1,"278":1,"288":1,"291":2,"292":1,"294":11,"295":1,"296":2,"297":1,"300":1,"301":2,"307":24,"309":14,"311":30,"312":4,"314":2,"316":79,"317":57,"320":32,"322":104,"324":86,"326":7,"328":4,"330":1,"331":1,"332":34,"333":48,"334":3,"335":1,"338":2,"340":9,"341":2,"344":11,"345":21,"346":5,"367":2,"370":1},"2":{"39":2,"40":2,"43":1,"50":1,"52":3,"53":1,"55":1,"58":2,"59":2,"60":1,"61":1,"65":1,"68":2,"72":4,"74":2,"78":3,"81":1,"82":1,"84":7,"89":3,"93":1,"94":2,"95":2,"96":1,"97":1,"105":1,"110":2,"113":2,"115":1,"116":1,"117":2,"118":2,"121":1,"132":2,"133":5,"135":2,"136":3,"138":3,"141":3,"142":3,"155":2,"161":1,"162":1,"322":3,"326":1,"328":2,"345":3},"3":{"13":1,"14":1,"15":1,"16":1,"17":1,"18":1}}],["workflowsclient",{"1":{"332":2,"344":1}}],["workflowscheduledlistparams",{"1":{"35":1},"2":{"35":1}}],["workflows`",{"1":{"145":1,"307":1}}],["workflowstep",{"1":{"136":2,"294":1},"2":{"136":1,"294":1}}],["workflows=",{"1":{"20":1,"135":1,"326":1,"328":1}}],["workflows",{"0":{"92":1,"204":1,"320":1,"344":1,"366":1,"370":1},"1":{"0":2,"2":2,"5":1,"10":1,"20":5,"29":1,"32":3,"43":2,"44":1,"46":7,"55":2,"71":1,"72":12,"82":1,"83":1,"84":2,"85":1,"86":1,"90":2,"91":2,"97":2,"98":2,"100":1,"101":1,"103":1,"104":2,"105":5,"106":1,"107":3,"111":1,"114":1,"118":1,"126":2,"132":1,"134":2,"135":3,"136":1,"139":1,"140":2,"141":2,"142":1,"145":3,"156":1,"158":1,"164":2,"173":1,"174":2,"179":1,"181":1,"182":1,"203":1,"204":1,"205":1,"206":1,"207":1,"214":1,"222":1,"223":1,"268":1,"279":1,"289":2,"294":10,"307":9,"311":3,"316":2,"317":11,"318":1,"320":10,"321":1,"322":5,"324":2,"326":3,"332":8,"333":5,"334":1,"341":1,"342":1,"344":6,"345":5,"348":1,"366":3,"368":1,"370":3,"371":1,"372":2,"377":4},"2":{"72":3,"332":1}}],["workerdelay",{"1":{"289":2}}],["worker`",{"1":{"171":1,"363":1,"364":1}}],["workerlabelcomparator",{"1":{"136":2},"2":{"136":2}}],["workerid",{"1":{"132":2}}],["worker|",{"1":{"123":1,"124":1}}],["worker",{"0":{"20":1,"21":1,"134":1,"135":1,"146":1,"156":1,"167":1,"170":1,"209":1,"269":1,"271":1,"272":1,"365":1},"1":{"3":1,"7":6,"10":2,"17":2,"18":3,"19":1,"20":22,"21":11,"22":5,"23":1,"24":1,"30":1,"34":1,"35":1,"38":1,"49":1,"55":3,"61":1,"62":1,"65":1,"67":1,"74":2,"78":1,"82":6,"85":1,"86":1,"87":4,"89":1,"92":1,"101":2,"103":2,"104":4,"105":1,"106":1,"108":1,"109":2,"110":1,"112":1,"122":1,"123":9,"124":5,"126":3,"127":13,"128":1,"130":5,"131":3,"132":25,"133":14,"134":7,"135":14,"136":41,"137":2,"138":1,"141":1,"144":2,"145":6,"146":23,"154":2,"156":5,"157":2,"158":8,"159":1,"166":3,"167":5,"168":3,"169":4,"170":6,"171":2,"175":1,"177":2,"178":1,"181":1,"182":1,"183":1,"184":1,"188":1,"206":2,"207":1,"209":7,"210":6,"222":1,"228":1,"230":3,"234":1,"238":1,"254":2,"257":8,"266":2,"268":14,"269":2,"270":2,"271":1,"272":7,"274":2,"289":5,"294":7,"295":1,"307":15,"309":3,"316":2,"319":16,"322":10,"323":2,"324":4,"325":1,"326":4,"327":6,"328":6,"332":3,"333":11,"343":12,"348":3,"354":1,"355":1,"364":3,"365":8},"2":{"20":3,"21":3,"24":1,"35":1,"61":1,"74":1,"78":1,"82":2,"104":1,"105":1,"127":3,"132":4,"133":3,"135":2,"136":12,"141":1,"144":1,"156":2,"294":6,"326":2,"328":1,"364":1},"3":{"19":1,"20":1,"21":1,"22":1}}],["workersclient",{"1":{"332":1,"343":1}}],["workers=10",{"1":{"296":1}}],["workers",{"0":{"18":1,"19":1,"128":1,"165":1,"168":1,"171":1,"319":1,"343":1,"363":1,"371":1},"1":{"2":2,"3":2,"10":1,"17":1,"18":2,"19":3,"20":1,"21":1,"22":1,"23":1,"34":1,"38":1,"47":1,"49":1,"74":2,"75":1,"76":1,"87":1,"106":3,"107":1,"123":1,"126":1,"128":2,"131":2,"134":3,"135":2,"136":2,"144":1,"156":1,"164":1,"165":2,"166":2,"167":2,"168":1,"169":3,"171":2,"172":1,"173":2,"174":7,"175":1,"176":2,"179":1,"181":2,"183":1,"188":1,"203":1,"205":2,"206":1,"209":4,"210":2,"222":1,"223":1,"294":2,"297":2,"307":3,"319":9,"332":7,"343":5,"347":1,"348":2,"363":2,"368":1,"371":4},"2":{"332":1}}],["work",{"0":{"92":1},"1":{"2":1,"3":1,"9":1,"13":1,"20":1,"30":1,"68":1,"77":1,"85":4,"92":1,"100":1,"101":1,"104":1,"105":1,"106":1,"107":1,"118":1,"120":1,"123":1,"136":2,"137":1,"139":1,"164":1,"173":1,"175":1,"204":1,"209":2,"216":1,"220":1,"225":1,"227":1,"284":1,"296":3,"307":1,"315":1,"322":1,"325":2,"327":1,"331":1,"339":1,"345":1}}],["wf",{"1":{"24":1,"43":2,"65":3,"66":2,"84":1,"104":9,"105":23,"120":2,"121":6,"155":1,"294":3,"326":8},"2":{"24":1,"43":2,"65":1,"66":1,"104":6,"105":10,"120":2,"121":2,"294":3,"326":4}}],["wiring",{"1":{"212":1}}],["wired",{"1":{"152":1}}],["width",{"1":{"173":3}}],["wife",{"1":{"160":4,"161":1}}],["wildcard",{"1":{"50":6,"52":2}}],["wisely",{"1":{"38":1}}],["wiki",{"1":{"38":1,"118":1}}],["wikipedia",{"1":{"38":1,"118":1}}],["withhostport",{"1":{"294":1},"2":{"294":1}}],["withmiddleware",{"1":{"153":4,"154":2,"157":2}}],["withmetadata",{"1":{"149":2}}],["withbatchspanprocessoroptions",{"1":{"144":1}}],["withbackoff",{"1":{"60":2}}],["withtracerprovider",{"1":{"144":1}}],["withtimeouts",{"1":{"65":1}}],["withlabels",{"1":{"135":1},"2":{"135":1}}],["withslots",{"1":{"135":1},"2":{"135":1}}],["withskipif`",{"1":{"118":1}}],["withskipif",{"1":{"113":1,"117":2},"2":{"113":1,"117":1}}],["withwaitfor`",{"1":{"118":1}}],["withwaitfor",{"1":{"110":1,"113":2,"118":1},"2":{"110":1,"113":1,"118":1}}],["withworkflowdefaultpriority",{"1":{"84":1},"2":{"84":1}}],["withworkflowdescription",{"1":{"39":1,"66":1},"2":{"39":1,"66":1}}],["withworkflowconcurrency",{"1":{"74":1,"78":1},"2":{"74":1,"78":1}}],["withworkflowcroninput",{"1":{"39":1},"2":{"39":1}}],["withworkflowcron",{"1":{"39":1},"2":{"39":1}}],["withworkflowversion",{"1":{"66":1},"2":{"66":1}}],["withworkflowevents",{"1":{"50":1,"52":1,"214":1,"216":1,"219":1,"220":1,"221":1},"2":{"50":1,"52":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["withworkflowstickystrategy",{"1":{"132":1},"2":{"132":1}}],["withworkflows",{"1":{"20":1,"135":1},"2":{"20":1,"135":1}}],["witheventmetadata",{"1":{"149":1},"2":{"149":1}}],["witherrorhandling",{"1":{"104":1,"105":1,"120":1}}],["withexecutiontimeout",{"1":{"65":1,"66":1,"68":1},"2":{"65":1,"66":1,"68":1}}],["withparents",{"1":{"95":1,"110":1,"113":2,"117":3,"118":1},"2":{"95":1,"110":1,"113":1,"117":1,"118":1}}],["withrunmetadata",{"1":{"149":1},"2":{"149":1}}],["withrunsticky",{"1":{"133":1},"2":{"133":1}}],["withrunpriority",{"1":{"84":1},"2":{"84":1}}],["withratelimits",{"1":{"81":1,"82":1},"2":{"81":1,"82":1}}],["withretrybackoff",{"1":{"60":1},"2":{"60":1}}],["withretries",{"1":{"58":1,"59":1,"60":1,"61":1},"2":{"58":1,"59":1,"60":1,"61":1}}],["withfilterscope",{"1":{"52":2},"2":{"52":1}}],["withfilters",{"1":{"52":1},"2":{"52":1}}],["within",{"0":{"230":1},"1":{"43":2,"66":1,"69":1,"74":1,"75":3,"82":1,"83":1,"100":2,"104":1,"117":1,"132":1,"138":1,"145":1,"151":1,"154":2,"158":1,"248":1,"301":1,"307":5,"311":1,"316":1,"317":1,"319":1,"320":1,"322":9,"334":1,"340":1,"341":1,"343":1,"344":1,"345":1,"368":1}}],["without",{"1":{"3":1,"21":2,"25":4,"30":2,"32":2,"55":1,"57":1,"58":1,"63":1,"68":2,"72":1,"84":1,"85":1,"86":2,"106":1,"123":1,"127":1,"152":1,"158":1,"164":1,"175":1,"176":1,"183":1,"207":1,"290":1,"294":1,"296":1,"298":1,"301":1,"307":3,"322":12,"323":2,"324":14,"326":1,"332":1,"333":2,"345":2,"354":1,"365":1}}],["window`",{"1":{"257":2}}],["window",{"1":{"17":1,"34":1,"79":1,"257":2}}],["warn",{"1":{"142":2,"289":5},"2":{"142":1}}],["warnings",{"1":{"278":1,"295":1,"297":1}}],["warning",{"1":{"34":1,"35":1,"38":1,"46":1,"52":1,"62":1,"65":1,"113":1,"115":1,"117":1,"131":1,"132":1,"136":1,"137":1,"148":1,"177":1,"216":1,"247":1,"253":1,"268":1,"278":1,"279":2,"280":1,"294":2,"331":1,"333":1,"347":1}}],["wasting",{"1":{"123":1}}],["waste",{"1":{"104":1}}],["wasted",{"1":{"86":1,"124":1}}],["wasskipped",{"1":{"117":3},"2":{"117":1}}],["wal",{"1":{"296":1}}],["walk",{"1":{"241":1}}],["walkthrough",{"1":{"216":1}}],["walks",{"1":{"7":1,"212":1,"213":1,"215":1,"217":1}}],["wall",{"1":{"115":1}}],["waitfor`",{"1":{"333":4}}],["waitforevent|",{"1":{"123":1}}],["waitforeventout",{"1":{"117":3},"2":{"117":1}}],["waitforevent",{"1":{"112":4,"113":3,"116":2,"117":3,"118":3,"333":1},"2":{"112":1,"116":1}}],["waitfor",{"1":{"110":1,"113":2,"118":1,"333":2}}],["waitforsleepout",{"1":{"117":3},"2":{"117":1}}],["waitforsleep",{"1":{"110":3,"117":11}}],["waitgroup",{"1":{"104":1,"105":1},"2":{"104":1,"105":1}}],["waits",{"1":{"85":2,"86":2,"87":1,"89":1,"90":1,"101":2,"104":1,"113":1,"122":1,"123":2,"124":2,"145":1,"332":2,"333":4,"345":2}}],["wait`",{"1":{"25":3,"43":2,"322":8,"324":8}}],["waiting",{"1":{"21":1,"25":4,"88":1,"89":2,"92":1,"101":1,"103":1,"104":1,"111":2,"112":5,"113":2,"116":1,"123":4,"124":2,"209":1,"268":2,"322":8,"324":8,"332":1,"333":4,"345":2}}],["wait",{"0":{"24":1,"111":1,"112":1},"1":{"17":1,"22":1,"23":2,"24":2,"25":4,"27":1,"31":2,"42":1,"60":6,"64":1,"65":2,"72":1,"76":1,"84":4,"86":2,"87":2,"88":4,"89":8,"90":2,"93":1,"97":2,"100":1,"101":3,"102":1,"103":1,"104":4,"105":2,"108":1,"109":1,"110":11,"112":7,"113":14,"114":1,"115":1,"116":7,"117":14,"118":12,"122":1,"123":3,"124":1,"133":2,"145":1,"161":2,"162":1,"169":1,"227":1,"266":2,"289":6,"294":1,"308":1,"310":9,"322":16,"324":8,"329":1,"333":5},"2":{"25":1,"84":1,"89":3,"104":1,"105":1,"112":2,"116":3,"133":1,"161":1},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["wanting",{"1":{"209":1}}],["wanted",{"1":{"49":2,"72":1}}],["want",{"1":{"17":1,"24":2,"27":2,"34":2,"40":1,"43":1,"46":1,"47":1,"49":1,"55":1,"61":1,"74":2,"75":3,"76":1,"77":2,"83":1,"118":1,"142":1,"153":1,"157":1,"162":1,"175":1,"177":1,"180":1,"214":1,"216":1,"219":1,"229":1,"248":1,"309":2,"365":1,"372":1}}],["ways",{"1":{"23":1,"34":1,"38":1,"52":1,"55":1,"72":1,"84":1,"154":1,"217":1,"224":1,"228":1,"294":2}}],["way",{"1":{"15":1,"21":1,"24":1,"26":1,"52":1,"55":2,"56":1,"63":1,"70":1,"72":4,"90":1,"93":1,"102":1,"119":1,"157":1,"160":4,"161":1,"164":1,"206":1,"224":1,"225":1,"286":1,"301":1,"316":4,"327":1,"329":1,"349":1}}],["watch",{"1":{"10":1,"325":1}}],["wsl",{"1":{"7":1,"349":1}}],["west",{"1":{"167":1,"198":3,"290":1}}],["website",{"1":{"201":1}}],["web",{"0":{"162":1},"1":{"223":1}}],["webhooknames",{"1":{"342":1}}],["webhook`",{"1":{"46":2,"374":1}}],["webhooksclient",{"1":{"332":1,"342":1}}],["webhooks<br",{"1":{"173":1}}],["webhooks",{"0":{"44":1,"212":1,"318":1,"342":1},"1":{"25":2,"44":2,"45":3,"46":3,"47":1,"74":1,"174":1,"212":2,"213":3,"214":4,"215":3,"216":6,"217":1,"318":14,"332":6,"342":5,"374":3},"2":{"332":1}}],["webhook",{"0":{"46":1},"1":{"44":1,"45":1,"46":19,"47":3,"49":1,"111":1,"131":1,"212":1,"214":9,"216":10,"217":1,"219":2,"220":2,"221":2,"265":1,"318":39,"342":12,"374":5},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["weight=10",{"1":{"136":1}}],["weight",{"1":{"136":3}}],["weights",{"1":{"136":4}}],["welcome",{"1":{"27":1,"49":1}}],["well",{"1":{"3":1,"9":1,"101":1,"113":1,"117":1,"158":1,"268":1,"293":1,"325":1,"326":1,"331":1,"332":1}}],["weekly",{"1":{"38":1}}],["week",{"1":{"5":1,"38":2}}],["weeks",{"1":{"5":1,"108":1,"123":1}}],["wrapped",{"1":{"307":2,"322":4}}],["wrapper",{"1":{"307":1,"310":2,"315":1,"333":3,"339":1,"341":1}}],["wrappers",{"1":{"88":1}}],["wrap",{"1":{"94":1,"120":1,"373":1}}],["wraps",{"1":{"2":1,"116":1,"118":3,"224":1,"345":1}}],["wrong",{"1":{"61":1,"276":1}}],["wrote",{"1":{"29":1}}],["writing",{"1":{"212":1,"217":1,"294":7,"331":1,"373":1}}],["writes",{"1":{"279":1,"294":2}}],["write",{"1":{"29":1,"43":1,"161":1,"179":1,"203":1,"214":1,"216":1,"219":1,"220":1,"221":1,"247":1,"288":3,"294":1,"295":1,"297":2,"298":1,"374":4},"2":{"161":1}}],["written",{"1":{"0":1,"55":1,"123":1}}],["by`",{"1":{"309":1}}],["bytes",{"1":{"309":1}}],["bytes`",{"1":{"309":1}}],["bypass",{"1":{"61":1}}],["bypassing",{"0":{"61":1},"1":{"58":2,"255":1}}],["blip",{"1":{"210":1}}],["blunt",{"1":{"209":1}}],["bloat",{"1":{"296":1}}],["blob",{"1":{"248":1}}],["blobs",{"1":{"157":1}}],["blog",{"1":{"29":2,"30":1,"90":2}}],["blocked",{"1":{"74":1,"146":2,"272":1}}],["blocking",{"1":{"43":1,"169":1,"322":4,"324":4,"325":5,"377":1}}],["block",{"1":{"24":1,"25":1,"120":1,"139":1,"146":1,"155":1,"166":1,"221":4,"272":1,"325":2}}],["blocks",{"1":{"24":3,"119":1,"150":1,"322":3,"324":3,"333":1}}],["bidirectional",{"1":{"174":1}}],["bit",{"1":{"161":2,"217":1}}],["bind",{"1":{"225":2,"227":2,"240":2,"241":2,"254":2,"268":1}}],["binary",{"1":{"171":1,"284":1,"333":1}}],["binstub",{"1":{"127":1}}],["bin",{"1":{"127":2,"238":1}}],["billing",{"1":{"38":1,"61":1}}],["billions",{"1":{"5":1}}],["b`",{"1":{"118":1}}],["b147",{"1":{"305":1}}],["b1a",{"1":{"101":1}}],["b1",{"1":{"101":3}}],["bottleneck",{"1":{"290":1,"294":1}}],["bottlenecks",{"1":{"132":1,"177":1}}],["bot",{"1":{"219":1}}],["board",{"1":{"209":1}}],["body",{"1":{"146":1,"157":1,"241":2}}],["box",{"1":{"142":1}}],["boundary",{"1":{"158":1}}],["bound",{"1":{"94":1,"209":1}}],["bool",{"1":{"50":1,"55":3,"219":3}}],["b",{"1":{"83":3,"87":1,"101":7,"104":4,"105":4,"118":3,"120":2,"130":1,"252":2,"279":1},"2":{"130":1}}],["bb866b59",{"1":{"52":1}}],["buckets",{"1":{"268":2}}],["bucket",{"1":{"157":6,"268":3},"2":{"157":1}}],["buffers",{"1":{"294":2}}],["buffer",{"1":{"157":4,"294":7},"2":{"157":2}}],["bug",{"1":{"72":2}}],["bulkrunnowaitchildren",{"1":{"333":1}}],["bulkrunchildren",{"1":{"43":1,"294":1,"333":1},"2":{"43":1,"294":1}}],["bulkpush",{"1":{"294":2},"2":{"294":1}}],["bulkpusheventwithmetadata",{"1":{"294":1}}],["bulkcancelreplayopts",{"1":{"72":6}}],["bulkinputs",{"1":{"43":2}}],["bulkupdate",{"1":{"35":1,"341":1},"2":{"35":1}}],["bulkdelete",{"1":{"35":1,"341":1},"2":{"35":1}}],["bulk",{"0":{"43":1,"72":1},"1":{"35":9,"36":2,"43":18,"70":3,"72":37,"104":2,"105":4,"145":2,"294":11,"316":24,"317":15,"322":14,"324":13,"326":1,"333":1,"341":2,"345":1},"2":{"35":2,"43":2,"72":1,"104":2,"105":2,"294":5,"326":1}}],["business",{"1":{"35":1,"40":1,"151":1,"190":1}}],["buttons",{"1":{"217":1,"221":1}}],["button",{"1":{"28":1,"72":1,"216":1,"221":4,"234":1,"238":1}}],["bundled",{"1":{"224":1,"243":1,"289":1}}],["bundle",{"1":{"21":1,"127":4}}],["built",{"0":{"141":1},"1":{"0":1,"3":3,"4":1,"73":1,"140":2,"154":1,"158":1,"162":1,"348":1,"368":1}}],["builder",{"1":{"127":4}}],["builders",{"1":{"32":1}}],["builds",{"1":{"87":1,"127":1}}],["building",{"0":{"95":1},"1":{"33":1}}],["build",{"1":{"0":1,"32":1,"71":1,"90":1,"127":9,"204":1}}],["bring",{"1":{"142":1,"182":1}}],["browser",{"1":{"369":1,"370":1,"371":1}}],["broker",{"1":{"173":1,"226":2}}],["broken",{"1":{"129":1}}],["broadly",{"1":{"325":1}}],["broadcast",{"1":{"225":3,"227":2,"230":3,"240":2,"241":2,"254":2,"262":2}}],["broad",{"1":{"23":1,"333":1}}],["broadest",{"1":{"19":1}}],["branchdurabletask",{"1":{"340":1}}],["branching",{"0":{"114":1,"115":1},"1":{"113":1,"114":1,"115":2,"117":2}}],["branch",{"1":{"106":1,"114":1,"115":2,"117":21}}],["branches",{"1":{"85":1,"87":1,"115":1,"117":1}}],["breakdown",{"1":{"129":1}}],["breaking",{"1":{"63":1,"347":1}}],["break",{"1":{"55":1,"102":1,"107":1}}],["brew",{"1":{"7":1,"349":1}}],["benchmark",{"1":{"286":1,"289":1}}],["benchmarks",{"0":{"289":1},"1":{"286":7,"287":1,"288":1}}],["benchmarking",{"0":{"286":1},"1":{"286":1,"289":1,"294":1}}],["benefit",{"1":{"10":1,"142":1,"187":1}}],["better",{"1":{"149":1,"286":2}}],["beta",{"1":{"131":2,"134":2,"198":3,"347":1}}],["bearer",{"1":{"129":3,"130":3,"148":3}}],["beginning",{"1":{"161":1,"283":1}}],["begin",{"1":{"104":1,"105":1,"120":1,"161":1,"322":4}}],["begins",{"1":{"85":1,"87":1,"92":1,"145":1}}],["behaves",{"1":{"113":1,"117":1,"327":1,"329":1}}],["behavior",{"0":{"62":1},"1":{"31":1,"61":1,"62":3,"94":1,"101":1,"102":1,"123":1,"174":1,"176":1,"301":1,"322":1,"326":1,"327":1,"333":1,"345":1,"375":1}}],["behind",{"1":{"102":1,"243":1,"248":1}}],["belongs",{"1":{"75":1,"76":1,"309":4}}],["become",{"1":{"74":1,"76":1,"145":1}}],["becomes",{"1":{"15":1,"75":1,"132":2,"134":1,"136":1,"146":1}}],["because",{"1":{"61":1,"62":3,"104":2,"115":1,"121":3,"123":1,"124":3}}],["best",{"0":{"69":1,"99":1},"1":{"37":1,"42":1,"43":1,"69":1,"71":1,"99":1,"115":1,"167":1,"286":1}}],["baking",{"1":{"189":1}}],["balancer",{"1":{"189":1,"248":1}}],["batching",{"1":{"294":1}}],["batchspanprocessor",{"1":{"144":3}}],["batches",{"1":{"101":1}}],["batch",{"1":{"101":3,"106":2,"144":2,"267":4,"294":1}}],["battle",{"1":{"5":1}}],["bar",{"1":{"94":1,"328":1}}],["backing",{"1":{"277":1}}],["backuprestore",{"1":{"277":1}}],["backup",{"1":{"277":8,"282":1}}],["backups",{"1":{"180":1,"182":1,"202":1,"277":1}}],["backed",{"1":{"177":1}}],["backend",{"1":{"162":3,"241":5,"243":3,"346":1}}],["backlog",{"1":{"167":3}}],["backwards",{"1":{"102":1}}],["back",{"1":{"18":1,"19":1,"37":1,"72":1,"123":1,"157":1,"159":2,"160":1,"161":1,"162":4,"169":1,"174":1,"214":1,"220":1,"278":1,"316":4}}],["backoffresult",{"1":{"60":1}}],["backoffinput",{"1":{"60":1}}],["backoff",{"0":{"60":1},"1":{"17":1,"60":15,"62":3,"63":1,"120":1,"307":6,"322":12},"2":{"60":1}}],["background",{"1":{"0":1,"24":1,"25":2,"35":3,"40":3,"43":1,"48":1,"52":3,"55":1,"84":3,"112":1,"113":1,"144":1,"149":2,"161":1,"162":1,"175":1,"294":1},"2":{"24":1,"25":1,"35":1,"40":1,"43":1,"48":1,"52":1,"55":1,"84":1,"112":1,"113":1,"144":1,"149":1,"161":1,"162":1,"294":1}}],["basics",{"1":{"325":1}}],["basically",{"1":{"209":1}}],["basicconfig",{"1":{"141":2},"2":{"141":1}}],["basic",{"0":{"52":1,"270":1},"1":{"13":1,"29":1,"46":3,"218":1,"243":2,"253":1,"260":2,"268":2,"318":2}}],["bash",{"1":{"7":1,"238":2,"349":1}}],["baseexception",{"1":{"322":2,"324":2}}],["bases",{"1":{"310":1,"311":1,"312":1,"313":1,"314":1,"315":1,"316":1,"317":1,"318":1,"319":1,"320":1,"322":1,"323":1,"324":1}}],["baseworkflowdeclaration",{"1":{"345":7},"2":{"345":6}}],["baseworkflow",{"1":{"307":1}}],["base`",{"1":{"262":1}}],["baseline",{"1":{"189":1}}],["base64",{"1":{"46":1,"157":6,"238":1,"259":3,"374":1}}],["based",{"1":{"32":1,"35":1,"38":1,"40":1,"46":4,"54":2,"57":1,"74":1,"75":2,"76":2,"81":3,"85":2,"86":1,"88":1,"90":1,"101":1,"104":1,"107":2,"115":1,"117":1,"118":1,"124":1,"128":1,"130":3,"134":2,"136":1,"150":1,"151":1,"173":1,"199":1,"216":1,"252":1,"288":1,"317":4,"322":1,"334":1,"341":1,"345":1,"369":1}}],["basemodel",{"1":{"14":2,"50":1,"55":5,"74":1,"78":1,"89":1,"105":2,"214":4,"216":4,"219":3,"220":2,"221":4,"322":2,"326":4,"328":2}}],["base",{"1":{"5":1,"115":1,"117":2,"144":1,"153":8,"243":1,"253":1,"262":9,"330":8}}],["dg",{"1":{"305":1}}],["dsn",{"1":{"258":1}}],["dsn`",{"1":{"258":1}}],["ddl",{"1":{"255":1}}],["dns",{"1":{"210":1}}],["dbname",{"1":{"252":1}}],["db=hatchet",{"1":{"225":2,"227":1,"231":1}}],["db",{"0":{"280":1},"1":{"173":4,"177":1,"182":1,"189":3,"225":2,"245":1,"255":1,"266":1,"279":2,"280":2,"294":2,"372":1}}],["dt",{"1":{"101":7}}],["dlq",{"1":{"73":2}}],["d60181b7",{"1":{"46":1}}],["dramatically",{"1":{"294":1}}],["drag",{"1":{"32":1}}],["driven",{"1":{"101":1,"106":1,"111":1,"118":2,"130":1}}],["drops",{"1":{"170":1}}],["dropdown",{"1":{"164":1}}],["dropped",{"1":{"161":1,"176":1,"279":1}}],["drop",{"1":{"32":1}}],["dynamictask",{"1":{"81":1}}],["dynamickey",{"1":{"81":1}}],["dynamically",{"1":{"34":1,"35":1,"38":1,"40":1,"81":1,"90":1,"108":1,"110":1,"111":1,"113":1,"128":1,"134":1,"136":1}}],["dynamic",{"0":{"80":1,"81":1},"1":{"32":1,"35":1,"38":1,"40":4,"46":2,"80":2,"81":6,"86":1,"100":3,"101":1,"104":1,"105":1,"106":1,"107":1,"115":1,"136":1,"325":1},"2":{"40":2}}],["duration`",{"1":{"255":1}}],["durations",{"1":{"92":1,"115":1,"124":1}}],["duration=timedelta",{"1":{"89":1,"113":1,"118":1}}],["duration=ratelimitduration",{"1":{"81":1}}],["duration",{"1":{"74":1,"78":1,"81":4,"82":1,"88":2,"89":2,"103":1,"108":1,"109":5,"110":4,"123":2,"124":1,"131":1,"132":2,"136":1,"158":1,"251":1,"255":1,"268":10,"289":7,"291":1,"315":2,"333":6},"2":{"74":1,"78":1,"89":1,"109":1}}],["durably",{"1":{"0":1,"173":2,"174":1,"181":1,"310":2,"333":1}}],["durableeventwithfiltertask",{"1":{"112":1,"116":1}}],["durableevent",{"1":{"112":1,"116":1}}],["durableeventtask",{"1":{"112":2,"116":1}}],["durableoutput",{"1":{"109":3,"112":4,"116":3}}],["durableinput",{"1":{"109":1,"112":1,"116":1}}],["durabletask",{"1":{"109":1,"112":1,"116":1,"332":2,"345":2},"2":{"109":1,"112":1,"116":1}}],["durablesleep",{"1":{"109":1},"2":{"109":1}}],["durablesleeptask",{"1":{"109":2}}],["durablecontext",{"0":{"310":1},"1":{"89":2,"109":2,"112":2,"115":1,"116":2,"307":2,"322":1,"326":2,"333":5},"2":{"109":1,"112":1,"116":1}}],["durableworkflow",{"1":{"89":1}}],["durable",{"0":{"29":1,"85":1,"86":1,"88":1,"102":1,"104":1,"109":1,"120":1},"1":{"0":3,"2":3,"3":2,"13":2,"29":6,"30":5,"31":4,"32":4,"33":2,"85":3,"86":1,"87":5,"88":1,"89":11,"90":7,"100":7,"101":20,"102":4,"103":4,"104":6,"106":3,"108":3,"109":10,"110":1,"111":3,"112":10,"113":2,"114":2,"115":4,"116":8,"119":3,"120":1,"122":1,"123":11,"124":2,"145":2,"155":6,"174":1,"175":1,"176":1,"271":1,"307":3,"308":3,"310":4,"322":6,"326":4,"332":5,"333":8,"340":3,"345":1,"346":2},"2":{"89":3,"109":1,"112":1,"115":1,"116":3,"155":1,"326":1}}],["durability",{"1":{"0":1,"3":1,"175":1,"181":1,"204":1}}],["duplicated",{"1":{"289":1}}],["duplicate",{"1":{"62":2}}],["duplication",{"1":{"55":2}}],["duplicating",{"1":{"30":1}}],["due",{"1":{"37":2,"42":2,"58":1,"60":3,"65":1,"171":1,"294":1,"327":1}}],["difficulty",{"1":{"289":1}}],["differ",{"1":{"158":1}}],["differs",{"1":{"108":1,"114":1,"119":1}}],["difference",{"1":{"89":1,"103":1,"158":1,"173":1,"217":1}}],["differently",{"1":{"220":1}}],["different",{"1":{"23":1,"34":1,"35":2,"38":1,"40":2,"43":1,"46":4,"75":1,"83":2,"84":1,"87":1,"100":2,"102":1,"107":1,"114":1,"115":3,"117":1,"118":1,"120":1,"123":2,"158":1,"164":1,"174":2,"217":4,"222":1,"248":1,"287":1,"348":1,"351":1}}],["diving",{"1":{"166":1}}],["di",{"1":{"155":2},"2":{"155":1}}],["dies",{"1":{"137":1}}],["digit",{"1":{"78":4},"2":{"78":1}}],["digest",{"1":{"38":1}}],["dictionary",{"1":{"307":6,"309":2,"310":1,"314":2,"322":4,"326":1}}],["dict",{"1":{"39":2,"58":1,"59":1,"60":1,"65":1,"66":1,"68":2,"89":2,"104":1,"105":4,"116":1,"121":1,"132":2,"133":2,"136":1,"138":1,"141":1,"142":1,"294":2,"322":4,"326":5}}],["disabling",{"1":{"365":1}}],["disables",{"1":{"268":1}}],["disablehatchetcollector",{"1":{"144":1}}],["disable",{"1":{"62":1,"144":2,"243":1,"245":2,"254":2}}],["disabled",{"1":{"36":2}}],["disruption",{"1":{"210":1}}],["dispatch",{"1":{"174":1,"175":1,"177":1}}],["dispatches",{"1":{"173":1,"174":1}}],["display",{"1":{"284":1,"304":1,"353":1,"368":1}}],["displaying",{"1":{"72":2}}],["displays",{"1":{"18":1}}],["disk",{"1":{"134":1,"290":1,"296":1}}],["disconnect`",{"1":{"373":1}}],["disconnecting",{"0":{"170":1}}],["discovered",{"1":{"160":4,"161":1}}],["discoverability",{"1":{"149":1,"151":1}}],["discord",{"1":{"43":2,"74":2,"152":2,"153":2,"154":2,"155":2,"156":2,"177":1}}],["discuss",{"1":{"94":1,"199":1}}],["distant",{"1":{"167":1}}],["distribution",{"1":{"128":1,"129":1,"268":2}}],["distributes",{"1":{"20":1,"106":1}}],["distribute",{"1":{"17":1,"74":2,"246":1,"298":1}}],["distributed",{"1":{"3":1,"17":1,"175":1}}],["dist",{"1":{"127":9}}],["distinguish",{"1":{"69":1,"193":1}}],["diagnosing",{"1":{"10":1}}],["dir",{"1":{"10":1,"127":1,"227":1,"238":1,"253":1}}],["direction",{"1":{"311":2,"317":2,"337":1}}],["direction`",{"1":{"311":2,"317":2}}],["direct",{"1":{"255":7,"298":1}}],["directed",{"1":{"13":1,"85":1,"91":1,"100":1}}],["directory",{"1":{"10":3,"238":2,"283":1,"364":2,"372":3}}],["directly",{"1":{"7":1,"11":1,"25":1,"43":2,"55":3,"62":1,"97":1,"115":1,"155":1,"158":2,"189":1,"201":1,"213":1,"268":1,"294":1,"310":1,"323":2,"324":2,"326":2,"333":2,"368":1}}],["dangerous",{"1":{"320":2,"324":2}}],["da6c",{"1":{"46":2}}],["dailycleanup",{"1":{"39":1}}],["daily",{"1":{"38":1,"39":3,"40":7}}],["date",{"1":{"35":6,"43":1,"55":1,"84":4,"152":1,"153":1,"154":2,"155":1,"156":1,"189":1,"333":1,"337":2,"338":2,"345":2},"2":{"35":1,"84":1,"154":1}}],["datetime",{"1":{"35":4,"72":4,"317":5},"2":{"35":1,"72":2,"84":1}}],["dataclasses",{"1":{"331":1}}],["dataclass",{"0":{"331":1},"1":{"331":6},"2":{"331":1}}],["datatracker",{"1":{"303":1}}],["datadog",{"1":{"147":1}}],["data",{"0":{"291":1},"1":{"26":1,"58":1,"68":1,"72":2,"73":1,"74":1,"76":1,"85":1,"86":1,"87":1,"101":1,"102":1,"104":1,"114":1,"115":1,"117":1,"118":1,"139":1,"149":2,"150":1,"152":1,"157":4,"158":1,"159":1,"160":1,"167":2,"174":1,"180":1,"188":2,"197":1,"204":1,"214":12,"217":2,"225":8,"227":5,"231":2,"243":1,"246":1,"254":2,"279":3,"280":1,"282":1,"291":2,"296":1,"300":1,"301":1,"308":1,"309":3,"311":2,"316":2,"317":2,"320":2,"322":9,"324":11,"328":1,"332":3,"333":12,"334":1,"345":7},"2":{"214":3}}],["databases",{"1":{"58":1}}],["database",{"0":{"244":1,"255":1,"267":1,"277":1,"293":1,"294":1,"296":1},"1":{"24":1,"34":1,"35":2,"40":1,"62":2,"79":1,"82":1,"102":1,"137":1,"173":1,"177":1,"223":1,"225":2,"227":4,"243":2,"244":1,"245":8,"246":2,"247":7,"248":1,"251":2,"252":4,"253":5,"255":10,"266":1,"276":1,"277":4,"278":1,"279":6,"280":1,"281":1,"282":2,"284":1,"286":1,"287":4,"290":3,"292":1,"294":12,"295":1,"296":2,"297":2,"298":5,"299":2,"301":1,"330":1,"362":1,"372":2,"373":2}}],["dasharray",{"1":{"101":5}}],["dash",{"1":{"28":1,"36":1,"41":1}}],["dashboard`",{"1":{"227":1}}],["dashboard",{"0":{"28":1,"36":1,"41":1,"150":1,"168":1,"171":1,"205":1},"1":{"7":2,"21":1,"28":1,"34":3,"36":2,"38":3,"39":1,"41":1,"43":2,"46":1,"72":6,"73":2,"79":1,"84":2,"92":1,"98":1,"106":1,"117":1,"150":1,"154":1,"164":1,"166":2,"168":2,"169":2,"188":1,"193":1,"195":1,"205":1,"214":6,"216":1,"223":1,"227":2,"228":2,"231":1,"278":3,"279":1,"285":2,"301":2,"362":5,"369":1,"372":1}}],["days",{"1":{"86":1,"123":1,"194":2,"291":4,"296":1}}],["days=1",{"1":{"72":2}}],["day",{"1":{"26":1,"38":4,"40":1,"203":2,"314":2,"333":1}}],["dag3",{"1":{"101":3}}],["dag2",{"1":{"101":3}}],["dag1",{"1":{"101":3}}],["dagworkflow",{"1":{"93":2}}],["dags",{"0":{"91":1},"1":{"91":1,"98":1,"100":2,"104":2,"108":2,"109":1,"111":2,"112":1,"114":1,"116":1,"119":2,"120":1,"123":1,"124":1,"132":1,"268":1,"322":1,"345":1,"377":2}}],["dag",{"0":{"92":1,"95":1,"105":1},"1":{"3":1,"13":1,"20":4,"32":2,"85":1,"87":1,"90":2,"91":1,"93":4,"94":4,"95":6,"96":5,"97":2,"100":4,"101":10,"103":3,"104":1,"105":1,"110":2,"113":2,"117":2,"118":1,"124":4,"132":2,"267":2,"309":2,"323":2,"324":2,"333":1,"377":1},"2":{"94":2,"95":2,"96":2,"97":1}}],["d",{"1":{"7":1,"30":1,"46":2,"59":1,"68":2,"105":7,"109":2,"112":1,"116":1,"121":1,"162":3,"204":1,"214":2,"216":2,"225":2,"226":2,"227":1,"231":1,"238":1,"278":1,"285":1,"289":5,"329":1,"362":1}}],["dodds",{"1":{"325":1}}],["doubles",{"1":{"288":1}}],["doubling",{"1":{"5":1}}],["domain>",{"1":{"374":1}}],["domain`",{"1":{"260":1}}],["domains",{"1":{"260":1}}],["domains`",{"1":{"260":1}}],["domain",{"1":{"225":2,"227":1,"240":8,"241":8,"243":3,"260":1,"374":2}}],["door",{"1":{"174":1}}],["doesn",{"1":{"85":1,"104":1,"170":1,"209":1}}],["doe",{"1":{"40":3}}],["done",{"1":{"47":1,"50":1,"66":1,"68":1,"104":1,"105":1,"109":2,"112":1,"116":1,"126":1,"139":1,"301":2},"2":{"68":1,"104":1,"105":1}}],["don",{"1":{"18":1,"21":1,"61":1,"64":1,"85":1,"92":1,"100":1,"139":1,"175":1,"189":1,"214":1,"216":1,"218":1,"279":1,"373":2}}],["down`",{"1":{"284":1}}],["downgrades",{"1":{"280":1}}],["downgrade",{"0":{"279":1},"1":{"275":1,"276":1,"280":1,"283":1}}],["downgrading",{"0":{"275":1,"280":1},"1":{"275":1,"279":2,"280":4,"281":1,"282":1}}],["downloadfroms3",{"1":{"157":2}}],["download",{"1":{"127":1,"157":1,"359":1,"364":1,"372":1,"374":1}}],["downtime",{"1":{"37":1,"42":1,"278":1,"279":1}}],["downstream",{"1":{"17":1,"82":1,"92":1,"101":1,"113":1,"117":3}}],["downcase",{"1":{"14":1}}],["doc",{"1":{"303":1}}],["dockerfile",{"1":{"126":1}}],["dockerfiles",{"0":{"127":1},"1":{"125":1}}],["dockerizing",{"1":{"125":1}}],["docker",{"0":{"125":1,"226":1,"230":1,"231":1},"1":{"224":4,"225":8,"226":2,"227":6,"228":1,"229":2,"231":1,"238":6,"253":3,"255":1,"278":6,"279":2,"285":3,"289":3,"358":4,"359":3,"362":4}}],["documenting",{"1":{"306":1,"332":1}}],["document",{"1":{"98":1,"106":3,"250":1,"268":1,"269":1,"375":1}}],["documentation",{"1":{"7":1,"9":1,"11":2,"12":1,"33":1,"47":1,"94":1,"182":1,"186":1,"201":1,"246":1,"251":1,"277":1,"279":1,"297":1,"316":2,"317":2,"322":2,"325":1,"368":1}}],["documents",{"1":{"5":1,"10":1}}],["docs",{"0":{"1":1},"1":{"1":1,"7":2,"9":1,"11":8,"12":6,"21":1,"143":1,"207":1,"214":1,"216":2,"218":1,"225":3,"227":1,"228":1,"234":1,"238":1,"241":1,"246":1,"277":4,"293":1,"305":2,"309":5,"322":1,"325":1,"326":1,"331":1,"332":1},"2":{"225":1}}],["deduplication",{"1":{"322":1,"324":1,"333":1}}],["dedicated",{"1":{"44":1,"147":1,"318":1,"342":1}}],["degradation",{"1":{"290":1,"292":2}}],["derived",{"1":{"154":1}}],["demand",{"1":{"128":1,"199":1}}],["demonstrates",{"1":{"66":1}}],["demo",{"1":{"66":1}}],["dequeued",{"1":{"75":1}}],["deadlock",{"1":{"123":1}}],["deadlocks",{"1":{"104":1,"123":1}}],["deadline",{"1":{"109":1,"118":1}}],["dead",{"0":{"73":1},"1":{"73":3}}],["denied`",{"1":{"62":1}}],["descending",{"1":{"196":1}}],["describing",{"1":{"148":1}}],["described",{"1":{"62":1}}],["descriptions",{"1":{"368":1}}],["description",{"1":{"40":2,"62":2,"135":1,"136":2,"142":2,"144":3,"145":2,"191":1,"192":1,"196":1,"254":1,"255":1,"256":1,"257":1,"258":1,"259":1,"260":1,"261":1,"262":1,"263":1,"264":1,"265":1,"266":1,"267":1,"268":8,"270":1,"271":1,"272":1,"273":1,"274":1,"294":1,"307":12,"309":29,"310":4,"311":17,"312":21,"313":5,"314":11,"315":5,"316":47,"317":29,"318":21,"319":11,"320":17,"322":45,"323":7,"324":45,"332":10,"333":16,"334":4,"335":5,"336":1,"337":1,"338":2,"339":2,"340":9,"341":7,"342":5,"343":4,"344":4,"345":11,"346":3,"367":1,"376":2}}],["desirable",{"1":{"134":1,"301":1}}],["desiredworkerlabels",{"1":{"136":1}}],["desiredworkerlabel",{"1":{"136":6,"307":2,"322":2},"2":{"136":2}}],["desired",{"0":{"136":1},"1":{"22":1,"134":1,"136":13,"279":1,"307":2,"322":2}}],["designated",{"1":{"164":1}}],["designing",{"1":{"132":1}}],["design",{"1":{"91":1,"95":1,"158":1,"172":1,"177":1}}],["designed",{"1":{"9":1,"91":1,"176":1,"203":1,"225":1,"237":1}}],["delegates",{"1":{"333":1}}],["deletes",{"1":{"317":4,"334":1,"335":1,"341":2,"342":1}}],["delete`",{"1":{"311":2,"312":2,"317":6,"318":2,"320":2,"324":2}}],["deleted",{"1":{"291":1,"312":2,"317":2,"318":2,"334":1,"335":1,"341":2,"342":1,"344":1}}],["delete",{"1":{"35":11,"36":3,"40":8,"52":1,"62":2,"226":1,"311":6,"312":6,"317":10,"318":6,"320":8,"324":6,"334":1,"335":2,"341":1,"342":2,"344":2,"357":1},"2":{"35":2,"40":3,"62":1}}],["deleting",{"0":{"357":1},"1":{"35":2,"40":2}}],["delivery",{"1":{"302":1}}],["delivered",{"1":{"111":1}}],["deliberate",{"1":{"158":1}}],["delay=10",{"1":{"296":1}}],["delayed",{"1":{"84":1,"329":1}}],["delay",{"1":{"57":1,"60":1,"84":1,"109":2,"112":1,"116":1,"144":2,"289":2,"345":4},"2":{"84":1,"109":1,"112":1,"116":1,"345":1}}],["delays",{"0":{"108":1},"1":{"37":1,"42":1,"108":1,"167":1}}],["decreasing",{"1":{"295":1}}],["decryption",{"1":{"158":1}}],["decrypting",{"1":{"152":1}}],["decrypts",{"1":{"157":1}}],["decrypt",{"1":{"157":3,"372":1}}],["decrypted",{"1":{"154":1,"157":2}}],["decorators",{"1":{"322":1}}],["decorator",{"1":{"89":1,"94":1,"307":8,"322":17}}],["decision",{"0":{"180":1}}],["decisions",{"1":{"85":2,"115":2}}],["decipher",{"1":{"157":4},"2":{"157":3}}],["decided",{"1":{"100":1,"104":1}}],["decides",{"1":{"86":1,"87":1,"101":3}}],["decide",{"1":{"34":1,"87":1,"90":1,"101":1,"104":3,"105":2,"106":1,"117":1,"120":2,"179":1,"322":1}}],["declarative",{"1":{"52":1,"91":1}}],["declaratively",{"1":{"52":1,"121":1,"307":3}}],["declarations",{"1":{"148":1}}],["declaration",{"1":{"50":1,"89":1,"307":1,"320":1,"344":1,"345":1,"346":2}}],["declaring",{"0":{"20":1,"50":1},"1":{"50":1,"81":1,"82":1,"93":1,"112":1,"132":2,"154":1}}],["declared",{"1":{"87":1,"101":1,"104":1,"110":1,"112":1,"113":1,"152":1,"154":1,"330":1}}],["declare",{"1":{"14":1,"20":2,"49":1,"50":2,"52":1,"55":1,"82":1,"88":1,"92":1,"93":2,"94":1,"105":1,"108":1,"110":1,"111":1,"113":2,"117":1,"118":3,"119":1,"124":1,"307":3}}],["detect",{"1":{"362":2}}],["detected",{"1":{"348":1,"376":1}}],["determinism",{"0":{"102":1},"1":{"102":3,"115":1,"123":2}}],["deterministic",{"1":{"100":1,"102":2,"115":1,"123":1}}],["determine",{"1":{"46":1,"52":2,"75":1,"76":1,"307":2,"322":2}}],["determines",{"1":{"39":1,"333":1}}],["determined",{"0":{"98":1},"1":{"34":1,"38":1,"85":1,"87":1,"90":1,"107":1,"319":4,"320":4}}],["detailed",{"1":{"33":1,"279":1}}],["details",{"1":{"16":3,"21":1,"72":2,"110":1,"113":1,"115":1,"214":1,"307":1,"316":22,"317":3,"322":2,"345":3,"370":1}}],["debug",{"1":{"10":1,"21":3,"142":5,"203":1,"289":1},"2":{"142":1}}],["debugging",{"0":{"166":1},"1":{"0":1,"3":1,"10":1,"121":1,"140":1,"205":1,"207":1,"223":1}}],["defer",{"1":{"20":1,"144":1}}],["def",{"1":{"14":1,"20":2,"39":2,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":2,"81":1,"82":2,"89":3,"94":1,"95":2,"96":2,"104":2,"105":5,"109":1,"110":1,"112":1,"113":2,"115":1,"116":1,"117":4,"118":1,"121":2,"132":2,"133":2,"135":1,"136":1,"138":1,"141":1,"142":1,"153":8,"155":1,"160":2,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":1,"322":1,"325":4,"326":6,"328":2,"330":9,"331":1}}],["definitive",{"1":{"188":1}}],["definition",{"0":{"39":1},"1":{"38":2,"39":4,"40":1,"55":1,"58":1,"81":2,"82":3,"108":1,"110":1,"111":1,"113":1,"121":1,"132":1,"133":1,"136":1,"145":1,"317":1,"332":1,"345":1}}],["definitions",{"1":{"38":1,"39":1,"105":1,"152":1,"174":1,"205":1}}],["defining",{"0":{"14":1,"39":1,"93":1,"94":1,"153":1},"1":{"15":1,"37":1,"38":1,"42":1,"55":1,"79":1,"121":1,"152":1,"155":1,"158":1,"311":2,"345":1,"377":3}}],["defines",{"1":{"94":1,"243":2,"322":2,"324":2,"332":2}}],["define",{"1":{"14":1,"38":2,"39":1,"55":2,"81":1,"82":1,"92":1,"94":1,"105":1,"121":5,"142":1,"153":2,"158":3,"204":1,"238":1,"307":2,"321":1,"322":1,"325":1,"327":1,"328":1,"345":2,"364":1,"366":1,"367":1}}],["defined",{"1":{"2":1,"23":1,"34":1,"35":1,"36":1,"38":1,"39":1,"55":1,"57":1,"75":1,"76":1,"82":4,"94":1,"101":1,"121":1,"141":1,"142":1,"158":1,"188":1,"322":3,"323":2,"324":2,"325":1,"326":2,"330":1,"345":1,"370":1}}],["default`",{"1":{"354":1}}],["defaultadminpassword`",{"1":{"243":1}}],["defaultadminpassword",{"1":{"243":2}}],["defaultadminemail`",{"1":{"243":1}}],["defaultadminemail",{"1":{"243":2}}],["defaults`",{"1":{"307":1}}],["defaults",{"1":{"216":1,"243":1,"252":1,"255":1,"307":5,"309":1,"314":2,"322":4,"376":1}}],["defaultpriority",{"1":{"84":1}}],["defaulting",{"1":{"62":1}}],["defaultfilters",{"1":{"52":1}}],["defaultfilter",{"1":{"52":3,"307":3},"2":{"52":2}}],["default",{"0":{"354":1},"1":{"3":1,"22":2,"52":3,"61":1,"62":6,"64":2,"83":1,"84":11,"129":1,"136":3,"144":5,"145":1,"146":2,"167":3,"176":1,"188":1,"196":1,"209":2,"225":6,"227":4,"231":1,"234":1,"240":1,"243":19,"251":1,"254":3,"255":4,"256":1,"257":31,"258":1,"259":1,"260":1,"261":1,"262":1,"263":1,"264":1,"265":1,"266":6,"267":1,"268":3,"270":1,"271":1,"272":1,"273":1,"274":1,"289":6,"291":3,"293":1,"294":2,"295":1,"296":1,"297":1,"307":8,"309":6,"310":1,"311":8,"312":10,"313":4,"314":2,"315":2,"316":25,"317":13,"318":10,"319":4,"320":8,"322":21,"323":2,"324":22,"326":2,"340":1,"344":2,"353":2,"354":10,"355":1,"357":2,"362":7,"376":1}}],["deprecated",{"1":{"309":2,"376":1}}],["dep=chained",{"1":{"155":2,"330":2}}],["dep=async",{"1":{"155":2,"330":2}}],["dep=sync",{"1":{"155":2,"330":2}}],["deps",{"1":{"153":4,"155":32,"228":1},"2":{"155":1}}],["deps|",{"1":{"153":2}}],["deps`",{"1":{"152":1,"155":1}}],["dep",{"1":{"153":24,"155":116,"330":34}}],["depths",{"1":{"128":1}}],["depth",{"1":{"90":1,"130":1,"241":1}}],["dependent",{"1":{"301":1}}],["dependents",{"1":{"117":1}}],["dependency",{"0":{"329":1},"1":{"85":1,"87":1,"152":2,"153":12,"154":2,"155":7,"157":2,"176":1,"177":1,"322":1,"323":2,"324":2,"329":3,"330":6,"345":1},"2":{"153":1,"155":1}}],["dependencies",{"0":{"95":1},"1":{"2":1,"4":1,"69":1,"91":2,"92":1,"95":1,"96":1,"98":1,"101":1,"124":1,"127":3,"152":2,"153":2,"154":3,"155":8,"156":2,"173":1,"174":1,"175":1,"182":1,"189":2,"322":1,"323":6,"324":6,"329":7,"330":5,"345":1,"372":1}}],["depends",{"1":{"98":1,"111":1,"122":1,"153":4,"155":6,"177":1,"189":1,"200":1,"225":2,"227":4,"230":2,"323":2,"324":2,"330":10}}],["depend",{"1":{"72":1}}],["depending",{"1":{"46":1,"83":1,"114":1,"117":1,"174":1,"176":1,"279":1}}],["deploy`",{"1":{"220":2}}],["deploying",{"0":{"285":1},"1":{"165":1,"167":1,"223":1,"241":1,"243":2,"268":1,"279":1,"281":1}}],["deployed",{"1":{"5":1,"168":1,"171":1,"198":1,"290":1}}],["deploymentenvfrom",{"1":{"246":2}}],["deployments",{"0":{"202":1},"1":{"130":1,"173":1,"174":1,"177":1,"185":1,"200":1,"231":1,"243":1,"248":1,"253":1,"255":1,"268":1,"276":1,"298":1}}],["deployment",{"0":{"224":1},"1":{"4":1,"130":1,"170":1,"179":1,"180":1,"183":1,"187":1,"189":1,"202":1,"225":4,"226":3,"227":1,"235":1,"246":1,"248":1,"253":1,"278":3,"279":3,"347":1,"348":1,"368":1,"370":1,"371":1}}],["deploy",{"1":{"0":1,"2":1,"72":1,"178":1,"182":1,"184":1,"189":1,"220":5,"225":1,"226":2,"234":1,"249":1,"279":1,"285":1}}],["developing",{"1":{"325":2}}],["develop",{"1":{"206":1}}],["development`",{"1":{"255":1}}],["development",{"1":{"10":1,"21":1,"127":1,"164":2,"171":2,"224":1,"225":1,"237":1,"243":1,"251":1,"253":3,"255":2,"347":1,"351":1,"358":1,"363":1,"366":1,"372":6}}],["developers",{"0":{"164":1},"1":{"55":1,"164":2,"305":1}}],["developer",{"0":{"203":1},"1":{"0":1,"164":3,"203":1,"374":1}}],["dev`",{"1":{"7":1,"206":1,"348":1,"372":1}}],["dev",{"1":{"0":1,"7":2,"10":1,"16":2,"21":1,"46":2,"55":1,"105":1,"144":2,"158":1,"225":2,"227":4,"231":1,"234":1,"236":1,"237":1,"238":2,"248":1,"249":1,"278":2,"283":1,"284":1,"285":2,"289":5,"305":1,"325":1,"326":1,"349":1,"354":1,"355":1,"364":3,"365":6,"372":3,"374":1},"2":{"234":1,"249":1}}],["a>",{"1":{"332":24,"333":40,"334":5,"335":6,"336":2,"337":8,"338":5,"339":3,"340":10,"341":8,"342":6,"343":6,"344":5,"345":12,"346":3}}],["azure",{"1":{"277":3}}],["aes",{"1":{"157":1}}],["aevy",{"1":{"5":2}}],["aware",{"1":{"217":1}}],["awaits",{"1":{"322":1,"324":1}}],["awaited",{"1":{"154":1}}],["awaitedevent",{"1":{"89":1}}],["await",{"1":{"15":1,"20":2,"24":2,"25":5,"35":6,"40":6,"43":4,"48":1,"55":1,"65":1,"66":1,"68":5,"89":3,"95":1,"96":1,"97":2,"104":4,"105":7,"109":2,"112":5,"113":1,"115":4,"116":2,"117":6,"120":1,"132":1,"133":3,"135":2,"136":2,"142":2,"149":1,"154":1,"156":2,"157":4,"158":1,"160":5,"161":4,"162":3,"294":3,"325":3,"326":1,"345":3}}],["aws",{"1":{"157":3,"175":1,"188":1,"248":1,"277":2,"290":2,"302":1,"305":2},"2":{"157":2}}],["audited",{"0":{"192":1}}],["audit",{"0":{"190":1,"195":1},"1":{"190":4,"191":1,"192":1,"193":1,"194":1,"195":1,"196":2}}],["augmenting",{"1":{"152":1}}],["authority",{"1":{"376":2}}],["authorization",{"1":{"129":2,"148":2,"264":1,"268":1,"374":1}}],["auth`",{"1":{"264":1}}],["authmode",{"1":{"130":3}}],["auth",{"1":{"46":4,"225":6,"227":2,"238":3,"240":2,"241":12,"243":2,"252":1,"253":2,"260":29,"265":2,"268":2,"304":2,"318":2}}],["authenticated",{"1":{"188":1}}],["authenticate",{"1":{"46":1}}],["authenticationref",{"1":{"130":3}}],["authentication",{"0":{"260":1},"1":{"21":1,"46":4,"129":2,"148":2,"154":1,"158":1,"188":1,"217":1,"218":1,"243":4,"251":1,"252":1,"253":1,"265":2,"270":1,"303":1,"305":1,"318":2,"374":1}}],["auto",{"1":{"362":2,"365":2,"369":1,"370":1,"371":1}}],["autovacuum",{"0":{"296":1},"1":{"296":10}}],["automated",{"1":{"188":1}}],["automatically",{"1":{"3":1,"10":1,"11":1,"17":1,"37":1,"38":1,"42":1,"57":1,"58":1,"67":1,"73":2,"86":1,"92":1,"101":1,"130":1,"144":1,"145":2,"154":1,"155":1,"156":3,"158":1,"194":1,"219":1,"243":2,"260":1,"278":1,"354":1,"357":1,"362":1,"376":1}}],["automatic",{"1":{"0":1,"158":1,"316":4,"348":1}}],["autoscaling",{"0":{"128":1,"130":1},"1":{"128":2,"130":1},"3":{"19":1,"20":1,"21":1,"22":1}}],["a=str",{"1":{"104":1,"105":2,"294":1,"326":1}}],["a=",{"1":{"104":2,"105":2,"120":1}}],["a2",{"1":{"101":1}}],["a156",{"1":{"129":1,"148":1,"228":1,"243":1,"268":1,"372":1}}],["a1",{"1":{"101":1}}],["ahead",{"1":{"84":1,"85":1}}],["ample",{"1":{"290":1}}],["amqp",{"1":{"225":1,"227":1}}],["among",{"1":{"75":1}}],["amount=amount",{"1":{"214":1}}],["amount",{"1":{"3":1,"27":1,"109":1,"110":1,"214":19,"294":1,"296":1,"301":1,"309":1},"2":{"214":1}}],["amazonaws",{"1":{"305":1}}],["amazonrds",{"1":{"277":1}}],["amazon",{"1":{"73":1,"277":1,"286":1,"290":1,"305":1}}],["axios",{"1":{"68":2},"2":{"68":2}}],["absolutely",{"1":{"252":1}}],["abc",{"1":{"153":2}}],["ability",{"1":{"72":1}}],["abortsignal",{"1":{"68":1}}],["aborting",{"1":{"67":1}}],["aborted",{"1":{"65":2,"66":2},"2":{"65":1,"66":1}}],["abortcontroller",{"1":{"65":2,"66":2,"68":2},"2":{"65":1,"66":1,"68":1}}],["abort",{"1":{"65":2,"66":2,"68":1,"69":1,"158":1}}],["able",{"1":{"55":1,"65":1,"102":1,"137":1,"209":1,"316":4,"323":2,"324":2,"325":1}}],["args=input",{"1":{"220":1}}],["args",{"1":{"220":5,"289":1}}],["arguments",{"1":{"88":1,"94":2,"155":1,"289":1,"330":1}}],["argument`",{"1":{"62":1}}],["argument",{"1":{"52":1,"94":1,"109":1,"154":1,"158":1,"308":1,"309":1,"326":1,"333":1}}],["area",{"1":{"173":1}}],["aren",{"1":{"46":1,"114":1}}],["architecture",{"0":{"172":1,"173":1},"1":{"172":1,"173":1,"184":2}}],["arise",{"1":{"164":1}}],["arbitrarily",{"1":{"118":1}}],["arbitrary",{"1":{"72":1,"149":5,"328":1}}],["arranged",{"1":{"322":2,"345":2}}],["array<",{"1":{"221":1}}],["array",{"1":{"43":2,"333":3,"345":1}}],["arrived",{"1":{"124":2}}],["arrives|",{"1":{"123":1}}],["arrives",{"1":{"42":1,"46":1,"106":1,"111":1,"112":2,"113":4,"116":1,"118":8,"123":1,"124":1}}],["around",{"1":{"0":1,"88":1,"158":1,"172":1,"288":1,"290":1,"333":1,"375":1}}],["at`",{"1":{"317":4,"322":2,"324":2}}],["attributes",{"1":{"145":4,"307":1,"309":1,"322":1,"326":1}}],["attribute",{"1":{"144":1,"145":2}}],["attaching",{"1":{"158":1}}],["attached",{"1":{"150":2}}],["attach",{"1":{"118":1,"149":2,"153":1,"307":3,"323":2,"324":2}}],["attempted",{"1":{"309":1}}],["attempts",{"1":{"62":2}}],["attempts=5",{"1":{"62":1}}],["attempts`",{"1":{"62":3}}],["attempt",{"1":{"37":1,"42":1,"132":2,"145":1,"309":3,"337":2}}],["attempting",{"1":{"22":1}}],["at=datetime",{"1":{"35":1,"84":1}}],["affects",{"1":{"83":1}}],["affected",{"1":{"72":1,"191":1}}],["affect",{"1":{"34":1,"38":1}}],["affinityworkflow",{"1":{"135":1,"136":2}}],["affinity",{"0":{"134":1},"1":{"17":2,"134":2,"135":6,"136":13,"307":1,"322":2},"2":{"136":1}}],["announced",{"1":{"160":4,"161":1}}],["annotations",{"1":{"155":1,"241":2}}],["annotated",{"1":{"153":4,"155":6,"323":2,"324":2,"330":10}}],["annakarenina",{"1":{"160":4}}],["anna",{"1":{"52":7,"160":4},"2":{"160":1}}],["ansi",{"1":{"127":1}}],["answers",{"1":{"208":1}}],["answer",{"1":{"122":1,"305":1}}],["analytical",{"1":{"300":1}}],["analytics",{"1":{"258":6,"266":1}}],["analyze",{"1":{"115":1,"266":4,"268":1,"296":2}}],["analysis",{"1":{"73":2,"139":1,"268":1,"325":1}}],["anotherdep",{"1":{"153":1}}],["another",{"1":{"25":1,"26":1,"55":2,"78":1,"90":1,"104":1,"132":3,"164":1,"305":1,"333":1}}],["anything",{"1":{"218":1,"330":1}}],["any>",{"1":{"117":1,"157":1}}],["anywhere",{"1":{"55":1}}],["any",{"1":{"11":1,"12":1,"19":1,"22":2,"30":2,"32":1,"34":1,"39":1,"40":1,"44":1,"46":1,"48":1,"51":1,"57":1,"58":1,"69":2,"75":2,"76":1,"82":1,"85":1,"87":1,"88":1,"89":1,"90":1,"100":1,"101":1,"102":1,"103":1,"104":3,"105":2,"110":1,"111":1,"112":2,"113":4,"115":1,"117":5,"118":1,"121":3,"123":3,"124":2,"135":1,"137":1,"138":1,"144":1,"145":1,"149":1,"152":1,"155":1,"158":1,"161":2,"182":1,"222":1,"240":1,"243":2,"268":1,"277":1,"279":1,"294":2,"302":1,"307":5,"310":1,"314":2,"317":2,"318":5,"322":12,"323":4,"324":4,"325":2,"326":2,"328":2,"329":1,"330":1,"331":1,"333":1,"342":1,"345":1}}],["advisory",{"1":{"254":2}}],["advantages",{"1":{"115":1,"124":1}}],["advance",{"1":{"72":1}}],["advanced",{"0":{"54":1},"1":{"17":1,"63":1,"137":1,"309":2,"322":1,"327":1,"332":1,"333":1}}],["ad",{"1":{"205":1}}],["administrator",{"1":{"243":2,"251":1}}],["admin123",{"1":{"225":1,"227":1,"234":1,"243":3}}],["admins",{"1":{"195":1}}],["admin",{"1":{"189":1,"225":1,"227":3,"228":1,"234":1,"238":2,"243":5,"253":1,"255":3,"372":1,"373":1}}],["adjust",{"1":{"130":1,"231":1,"364":1}}],["adjusting",{"1":{"81":1}}],["adapted",{"1":{"157":1}}],["adapt",{"1":{"71":1}}],["addr=smtp",{"1":{"304":1}}],["addr`",{"1":{"265":1}}],["addresses",{"1":{"374":1}}],["address>",{"1":{"374":1}}],["address=",{"1":{"268":1}}],["address`",{"1":{"230":1,"254":2,"262":1,"264":1,"268":2}}],["address",{"1":{"191":2,"193":1,"225":5,"227":4,"230":2,"240":4,"241":4,"243":1,"254":2,"262":1,"264":1,"265":1,"268":1,"304":1,"305":1,"374":1}}],["addl",{"1":{"150":1}}],["adds",{"1":{"66":1,"72":1,"153":1,"345":4}}],["added",{"1":{"56":1,"66":1,"74":1,"75":1,"149":1,"331":1,"345":5}}],["additive",{"1":{"66":1}}],["addition",{"1":{"16":1,"54":1,"70":1,"74":1}}],["additionaldata",{"1":{"153":1}}],["additionally",{"1":{"58":1,"249":1,"296":1}}],["additionalmetadata",{"1":{"24":1,"149":2,"294":5,"333":3,"334":1,"341":1}}],["additional",{"0":{"70":1,"149":1,"231":1},"1":{"4":1,"21":1,"22":1,"28":1,"35":1,"36":1,"40":2,"41":1,"43":3,"72":3,"84":4,"93":1,"105":2,"121":1,"149":8,"150":2,"151":2,"173":1,"177":1,"243":3,"263":4,"294":1,"307":6,"308":1,"309":3,"311":4,"316":8,"317":6,"322":11,"323":2,"324":13,"325":1,"326":1,"333":3,"338":2}}],["adding",{"1":{"39":4,"46":1,"74":1,"81":2,"82":1,"84":2,"106":1,"132":1,"158":1,"161":1}}],["add",{"1":{"7":1,"21":1,"35":1,"39":1,"46":1,"58":2,"62":1,"68":1,"81":2,"89":1,"100":1,"104":1,"105":2,"117":1,"158":1,"164":1,"173":1,"174":1,"214":1,"216":1,"219":1,"220":1,"234":1,"245":1,"249":1,"326":1,"330":1,"331":1,"352":2,"372":1,"374":1},"2":{"104":1,"105":1}}],["add`",{"1":{"7":1,"352":1}}],["average",{"1":{"268":2,"288":6}}],["availability",{"0":{"197":1,"248":1},"1":{"134":1,"135":1,"136":1,"182":2,"197":1,"198":1,"199":1,"200":2,"201":1,"202":2,"248":5,"249":1,"268":1,"278":1,"297":2}}],["available",{"1":{"10":1,"11":2,"12":1,"16":1,"18":1,"25":1,"43":3,"74":4,"75":7,"76":4,"77":1,"87":2,"97":1,"102":1,"103":1,"104":2,"106":1,"115":1,"123":2,"124":1,"129":1,"132":1,"134":1,"136":1,"137":2,"142":1,"145":2,"146":1,"147":1,"148":1,"150":1,"152":1,"153":1,"154":1,"155":1,"179":1,"186":1,"190":1,"197":1,"198":1,"237":1,"248":1,"250":1,"268":2,"269":1,"289":1,"296":1,"306":1,"310":1,"332":3,"368":1,"376":2}}],["avoiding",{"1":{"74":1,"79":1,"279":1}}],["avoid",{"1":{"17":1,"57":1,"66":1,"69":1,"74":3,"75":1,"77":1,"160":2,"189":1,"284":1,"290":1,"325":1,"329":1}}],["ap",{"1":{"198":1}}],["apt",{"1":{"127":8}}],["apache",{"1":{"30":1,"73":1}}],["api`",{"1":{"240":3}}],["apiversion",{"1":{"130":3,"289":1}}],["apirequest",{"1":{"81":1,"82":1}}],["apis",{"1":{"69":1,"80":1,"106":1,"223":1,"301":1}}],["api",{"0":{"129":1,"196":1},"1":{"7":1,"17":1,"21":1,"35":3,"39":1,"40":2,"46":4,"52":1,"61":1,"62":2,"68":1,"72":3,"74":1,"79":1,"82":3,"83":1,"90":1,"102":1,"103":1,"104":2,"108":1,"115":1,"128":1,"129":4,"130":15,"144":1,"148":5,"149":6,"158":1,"166":1,"168":1,"173":9,"174":2,"176":1,"182":1,"188":2,"189":3,"191":1,"192":3,"193":3,"196":2,"201":2,"205":1,"218":4,"219":1,"220":1,"221":1,"223":2,"224":1,"228":4,"230":1,"234":4,"238":4,"240":1,"241":5,"243":1,"246":3,"248":1,"258":6,"268":3,"270":3,"278":1,"297":2,"304":2,"305":2,"307":6,"309":13,"310":1,"312":1,"313":1,"314":1,"315":1,"316":16,"317":1,"318":2,"332":4,"334":1,"335":1,"336":1,"338":1,"339":1,"341":1,"352":2,"356":1,"372":1,"374":2,"376":1},"2":{"68":1}}],["apppasswords",{"1":{"305":1}}],["app=hatchet",{"1":{"278":1}}],["app=caddy",{"1":{"234":1}}],["apps",{"1":{"218":2,"374":2}}],["appear",{"1":{"158":1,"214":1}}],["append",{"1":{"104":1,"105":1,"144":1,"160":1}}],["appends",{"1":{"10":1}}],["applies",{"1":{"153":1,"154":1,"158":3,"174":1}}],["applied",{"1":{"93":1,"156":1,"158":1}}],["application",{"0":{"162":1},"1":{"4":1,"16":1,"23":1,"24":1,"25":1,"30":2,"34":3,"85":1,"100":2,"157":1,"162":2,"174":1,"182":1,"204":1,"230":1,"237":1,"246":1,"298":1}}],["applications",{"1":{"0":1,"125":3}}],["apply",{"1":{"52":1,"72":1,"158":2,"181":1,"268":1,"312":2,"335":1}}],["app",{"0":{"218":1},"1":{"34":1,"127":19,"162":2,"173":4,"217":1,"218":2,"219":7,"220":2,"221":2,"234":1,"238":2,"258":2,"305":1,"374":14},"2":{"162":1,"234":1,"238":1}}],["approximated",{"1":{"288":1}}],["approximately",{"1":{"288":1}}],["approve",{"1":{"123":1}}],["approval",{"1":{"86":1}}],["approvals",{"1":{"32":1}}],["appropriately",{"1":{"69":1}}],["appropriate",{"1":{"67":1,"69":1,"73":1,"105":1,"127":1,"243":1}}],["approach",{"1":{"3":1,"55":1,"114":1}}],["aggregate",{"1":{"338":1}}],["aggregates",{"1":{"130":1}}],["aggressively",{"1":{"296":1}}],["ago",{"1":{"196":1,"291":2}}],["again",{"1":{"40":1,"109":1}}],["against",{"0":{"228":1},"1":{"4":1,"113":1,"228":1,"286":1,"309":1,"333":2}}],["agentic",{"1":{"32":1,"86":1,"101":2}}],["agent",{"0":{"10":1},"1":{"0":1,"3":1,"7":2,"9":1,"10":1,"32":1,"85":1,"86":1,"100":1,"101":4,"104":1,"106":2,"107":1,"191":2,"193":1,"207":1}}],["agents",{"0":{"9":1,"207":1},"1":{"0":1,"3":3,"7":1,"9":2,"10":3,"106":1,"107":1,"207":4}}],["acknowledgement",{"1":{"288":1}}],["acquiring",{"1":{"139":1}}],["acquired",{"1":{"21":1}}],["accidentally",{"1":{"333":1}}],["acc",{"1":{"104":2,"105":2}}],["according",{"1":{"101":1,"176":1,"181":1,"316":12}}],["account",{"1":{"78":2,"243":2,"372":2,"374":1},"2":{"78":1}}],["accessors",{"1":{"332":1,"333":1}}],["accessing",{"0":{"53":1,"59":1,"96":1},"1":{"225":1,"227":1}}],["accessfilterpayload",{"1":{"52":1,"53":1}}],["access",{"0":{"196":1},"1":{"7":1,"9":1,"16":1,"24":1,"25":1,"43":1,"46":1,"53":1,"55":1,"59":1,"74":1,"96":1,"112":1,"113":1,"121":1,"137":1,"139":1,"154":1,"164":1,"182":1,"189":1,"225":1,"227":1,"234":1,"268":2,"282":1,"294":1,"307":1,"323":4,"324":4,"326":1,"328":1,"362":1,"368":1}}],["accepting",{"1":{"88":1,"278":1}}],["accepts",{"1":{"14":1,"192":1,"249":1,"326":2}}],["accept",{"1":{"3":1,"20":1,"62":1,"94":1,"167":1,"374":1}}],["achieve",{"1":{"31":1}}],["acted",{"1":{"191":1}}],["actors",{"1":{"193":1}}],["actor",{"0":{"193":1},"1":{"191":2,"193":1}}],["acts",{"1":{"85":1,"100":1,"289":1}}],["active",{"0":{"171":1},"1":{"79":1,"124":1,"196":1,"237":1,"358":1}}],["activity",{"1":{"38":1,"205":1}}],["actionid",{"1":{"221":2},"2":{"221":1}}],["action=action",{"1":{"221":1}}],["action`",{"1":{"46":1,"216":1}}],["actions",{"0":{"192":1},"1":{"36":5,"69":1,"73":1,"74":1,"119":1,"190":1,"191":1,"192":1,"193":3,"221":11,"322":1,"345":1,"377":3},"2":{"221":1}}],["action",{"1":{"21":1,"34":1,"67":1,"85":1,"121":1,"144":1,"145":2,"191":3,"192":1,"216":7,"221":25},"2":{"221":3}}],["actually",{"1":{"124":1}}],["actual",{"1":{"30":1,"37":1,"42":1,"128":1,"130":1,"173":1,"221":1,"333":1}}],["acyclic",{"1":{"13":1,"85":1,"91":1,"100":1}}],["across",{"1":{"1":1,"5":1,"20":1,"74":2,"75":1,"82":3,"87":1,"101":1,"104":1,"106":3,"107":2,"109":1,"127":1,"129":2,"131":1,"133":1,"187":1,"190":1,"209":1,"248":1,"268":1,"289":1,"296":1,"327":2,"333":2}}],["almost",{"1":{"327":1,"329":1}}],["alpha",{"1":{"284":1}}],["alpine3",{"1":{"127":2}}],["alpine",{"1":{"127":3}}],["aliases",{"0":{"337":1}}],["aliased",{"1":{"144":1}}],["alias",{"1":{"238":1,"326":1,"332":2,"372":1}}],["alive",{"1":{"162":2}}],["alike",{"1":{"160":4,"161":1}}],["alerts",{"1":{"121":1,"302":1}}],["alerting",{"0":{"258":1,"265":1},"1":{"0":1,"258":5,"265":5}}],["allocated",{"1":{"124":2}}],["allocation",{"1":{"75":1}}],["allowed=true",{"1":{"328":1}}],["allowed",{"1":{"64":3,"307":4}}],["allows",{"1":{"3":1,"38":1,"49":1,"51":1,"52":1,"54":2,"55":1,"56":1,"61":2,"66":1,"69":1,"70":1,"71":1,"72":2,"73":1,"76":1,"79":1,"80":2,"83":1,"102":1,"131":1,"134":1,"138":1,"142":1,"146":1,"149":1,"174":1,"243":7,"298":1,"310":2,"322":1,"327":1,"329":1,"333":2,"356":1,"366":1,"370":1}}],["allow",{"1":{"3":1,"34":1,"44":1,"64":1,"77":1,"107":1,"245":1,"254":8,"307":6,"318":1,"322":4,"323":2,"324":2,"326":2,"342":1,"348":1}}],["allowing",{"1":{"0":1,"20":1,"27":1,"34":1,"55":1,"67":1,"83":1,"139":1}}],["algorithm",{"1":{"46":1,"157":3}}],["alarm",{"1":{"34":2,"257":12}}],["alter",{"1":{"247":1}}],["alternative",{"1":{"237":1}}],["alternately",{"1":{"374":1}}],["alternate",{"1":{"11":1,"12":1}}],["although",{"1":{"26":1}}],["always",{"1":{"25":1,"35":1,"58":2,"74":1,"85":1,"102":2,"121":2,"154":1,"199":1,"225":2,"227":1,"231":1,"280":1,"289":1}}],["along",{"1":{"25":1,"159":1,"301":1,"370":1}}],["already",{"1":{"7":1,"21":1,"35":1,"36":1,"102":1,"154":1,"175":1,"218":1,"225":2,"317":1,"364":1}}],["asked",{"0":{"208":1},"1":{"208":2}}],["asterisk",{"1":{"38":1}}],["associate",{"1":{"312":2}}],["associated",{"1":{"43":2,"52":1,"73":1,"311":2,"316":2,"317":2,"333":3,"356":1}}],["asserting",{"1":{"158":1}}],["assert",{"1":{"72":2}}],["assuming",{"1":{"162":1}}],["assumed",{"1":{"65":1}}],["assumes",{"1":{"34":1,"38":1,"49":1,"241":1}}],["assumption",{"1":{"31":1}}],["assumptions",{"0":{"31":1}}],["assigned`",{"1":{"268":1}}],["assigned",{"1":{"92":1,"124":2,"131":1,"132":8,"134":2,"136":1,"268":19,"307":2,"322":2}}],["assign",{"1":{"83":1,"84":2,"134":1,"307":1}}],["assigning",{"1":{"83":2}}],["assigns",{"1":{"18":1}}],["assignment",{"0":{"131":1,"132":1},"1":{"17":1,"19":1,"124":1,"131":2,"132":4,"134":1,"136":2,"176":1,"268":2}}],["assistants",{"1":{"11":1}}],["assistant",{"1":{"7":1}}],["asyncgenerator",{"1":{"153":2,"328":1,"330":2}}],["asynccontextmanager",{"1":{"153":2,"330":2},"2":{"153":1,"330":1}}],["asynchronously",{"1":{"97":2,"322":4,"324":4}}],["asynchronous",{"1":{"69":2,"71":1,"94":1,"97":1,"158":1,"329":1,"330":1}}],["asyncio",{"0":{"325":1},"1":{"68":2,"160":2,"325":9},"2":{"68":1,"160":1,"325":3}}],["async",{"1":{"14":1,"20":1,"24":1,"25":2,"39":1,"40":3,"43":1,"55":1,"58":1,"59":1,"60":1,"65":1,"66":1,"68":3,"72":1,"89":3,"95":3,"96":3,"104":4,"105":10,"109":2,"112":2,"115":1,"116":2,"117":1,"120":1,"121":2,"132":1,"133":2,"135":1,"136":3,"142":2,"153":24,"154":1,"155":66,"156":1,"157":4,"158":1,"160":2,"161":1,"162":2,"214":1,"216":1,"219":1,"220":1,"221":1,"294":2,"323":4,"324":2,"325":10,"326":5,"327":1,"328":2,"330":29,"345":2}}],["as",{"0":{"204":1},"1":{"0":1,"2":1,"11":2,"12":1,"16":1,"18":1,"19":1,"22":1,"24":1,"25":1,"26":2,"27":2,"28":1,"31":1,"32":1,"34":1,"36":1,"37":2,"39":5,"41":1,"42":2,"43":4,"46":5,"49":1,"50":2,"54":1,"55":2,"57":3,"61":1,"63":1,"64":1,"65":3,"66":1,"67":1,"69":1,"71":1,"72":2,"73":1,"74":1,"75":1,"79":1,"80":1,"81":2,"82":3,"85":2,"87":3,"88":1,"89":1,"92":2,"93":1,"94":1,"96":1,"100":1,"101":3,"102":1,"104":2,"105":1,"109":1,"113":1,"116":2,"117":1,"118":6,"120":2,"121":2,"127":4,"132":1,"133":2,"134":2,"136":1,"137":1,"141":1,"144":1,"145":1,"152":3,"153":2,"154":3,"155":2,"157":2,"158":3,"159":2,"160":1,"162":4,"164":1,"175":1,"179":2,"189":1,"192":1,"209":1,"213":1,"221":1,"225":3,"226":2,"230":1,"231":1,"233":1,"234":1,"236":1,"237":1,"246":1,"249":1,"253":1,"268":2,"276":1,"286":1,"288":1,"289":2,"292":1,"293":1,"294":1,"297":2,"307":11,"308":1,"309":2,"314":2,"317":2,"322":16,"324":2,"325":5,"326":2,"328":3,"329":2,"330":4,"331":6,"332":7,"333":4,"334":2,"341":3,"344":1,"345":3,"351":1,"352":1,"354":2,"357":1,"374":3}}],["asindexpage",{"1":{"0":1,"7":1,"211":1}}],["air",{"1":{"175":1,"180":1}}],["aims",{"1":{"175":1,"375":1}}],["aim",{"1":{"69":1}}],["aio",{"1":{"24":1,"25":2,"40":3,"43":1,"55":1,"68":1,"72":3,"89":4,"104":1,"105":2,"109":1,"112":2,"116":1,"133":2,"160":1,"161":1,"162":1,"294":1,"310":3,"326":1},"2":{"24":1,"25":2,"40":3,"43":1,"55":1,"68":1,"89":3,"104":1,"105":1,"109":1,"112":1,"116":1,"133":2,"160":1,"161":1,"162":1,"294":1,"326":1}}],["ai",{"1":{"0":1,"3":1,"7":1,"9":1,"10":1,"11":2,"12":1,"85":1,"106":1,"107":1,"135":3,"136":7,"207":1}}],["tmux",{"1":{"372":2}}],["typical",{"1":{"182":1,"286":1}}],["typically",{"1":{"177":1,"243":1,"294":1,"295":2,"296":1,"297":1}}],["typeerror",{"1":{"61":1}}],["type`",{"1":{"46":2,"214":2,"219":1,"221":1}}],["typed",{"1":{"43":1,"55":1,"326":3}}],["types",{"0":{"193":1},"1":{"15":1,"52":1,"55":4,"58":1,"64":1,"69":1,"74":2,"78":2,"81":2,"82":2,"118":1,"130":1,"132":1,"136":5,"154":3,"155":1,"158":5,"174":1,"193":1,"217":1,"220":1,"308":1,"321":1,"328":1,"331":1,"332":8,"333":1,"345":1,"346":4},"2":{"52":1,"74":2,"78":2,"81":2,"82":2,"132":1,"136":3}}],["typescript",{"1":{"0":1,"14":1,"16":3,"20":1,"21":1,"24":1,"25":2,"35":5,"39":1,"40":3,"43":1,"48":1,"50":1,"52":4,"53":1,"55":1,"58":1,"59":1,"60":1,"61":1,"68":1,"74":1,"78":1,"93":1,"94":1,"95":1,"96":1,"97":1,"104":3,"105":5,"109":1,"110":1,"112":3,"113":3,"116":1,"117":3,"118":1,"120":2,"121":1,"125":1,"135":1,"142":1,"144":2,"152":2,"153":1,"154":3,"155":1,"156":1,"157":1,"158":2,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"270":1,"294":2,"332":2,"345":1,"364":1,"377":6}}],["type",{"0":{"337":1},"1":{"14":4,"15":2,"24":1,"25":1,"46":2,"55":13,"61":1,"62":2,"83":1,"90":2,"104":2,"105":8,"129":1,"130":4,"144":2,"145":1,"146":6,"148":1,"152":2,"153":2,"154":4,"155":7,"157":3,"158":1,"162":2,"186":1,"191":2,"192":1,"196":1,"214":5,"216":5,"219":5,"220":2,"221":5,"268":2,"286":1,"290":1,"307":8,"309":30,"310":3,"311":16,"312":20,"313":4,"314":10,"315":4,"316":46,"317":28,"318":20,"319":10,"320":16,"322":46,"323":6,"324":46,"326":5,"328":2,"331":2,"332":10,"333":17,"334":4,"335":5,"336":1,"337":2,"338":2,"339":2,"340":9,"341":7,"342":5,"343":4,"344":4,"345":16,"346":3}}],["type=myinput",{"1":{"322":1}}],["type=",{"1":{"11":1,"12":1,"162":1,"369":1,"370":1,"371":1}}],["tp",{"1":{"144":1}}],["td",{"1":{"101":2}}],["tui",{"0":{"368":1},"1":{"348":3,"368":5}}],["tui`",{"1":{"206":1,"348":1}}],["tusamma",{"1":{"325":1}}],["tuples",{"1":{"317":1}}],["tuple",{"1":{"317":1}}],["tutorial",{"1":{"226":1,"329":1}}],["tune",{"1":{"209":1}}],["tuned",{"1":{"63":1}}],["tuning",{"1":{"175":2,"177":3,"292":1,"294":3,"296":1}}],["turning",{"1":{"174":1}}],["turnaround",{"1":{"83":1}}],["tiangolo",{"1":{"325":1,"327":1,"329":1}}],["till",{"1":{"278":1}}],["title",{"1":{"216":12},"2":{"216":2}}],["tight",{"1":{"203":1}}],["ties",{"1":{"109":1}}],["tiers",{"1":{"81":1}}],["tier",{"1":{"78":2,"147":1},"2":{"78":1}}],["timing",{"1":{"62":2,"66":1,"124":1}}],["time`",{"1":{"255":1,"316":4}}],["timeline",{"1":{"188":1}}],["timely",{"1":{"69":1}}],["timer",{"1":{"124":1}}],["timed",{"1":{"65":1,"268":6}}],["timedelta`",{"1":{"65":1,"309":1}}],["timedelta",{"1":{"35":2,"66":1,"72":4,"84":1,"89":1,"109":1,"110":1,"113":1,"116":1,"309":1}}],["timeoutoutput",{"1":{"65":2,"66":3}}],["timeoutinput",{"1":{"65":1,"66":1}}],["timeoutworkflow",{"1":{"65":1},"2":{"65":1}}],["timeout=timedelta",{"1":{"65":2,"66":1,"94":1,"95":1,"96":1,"105":1,"121":1,"294":1,"326":1}}],["timeout`",{"1":{"65":4,"254":1,"265":1,"307":4,"309":2,"322":8}}],["timeout",{"0":{"65":1},"1":{"62":2,"65":16,"66":28,"94":2,"105":1,"113":1,"116":1,"118":2,"121":1,"132":1,"174":1,"225":3,"227":2,"231":1,"241":7,"254":1,"265":1,"268":1,"309":3,"333":2},"2":{"65":1,"66":2}}],["timeouts",{"0":{"64":1,"66":1},"1":{"16":3,"17":2,"64":6,"65":6,"92":2,"132":3,"134":3,"175":1,"182":1,"204":1,"268":1,"309":6,"322":1,"345":1,"377":4}}],["timezone",{"1":{"72":2},"2":{"35":1,"72":1,"84":1}}],["timestamp",{"1":{"39":1,"40":1,"129":1,"191":1,"196":1,"283":1,"333":4},"2":{"39":1}}],["times",{"1":{"5":1,"34":1,"35":1,"38":1,"43":1,"56":1,"57":1,"66":1,"68":1,"83":1,"101":1,"104":1,"105":2,"142":2,"146":1,"307":2,"309":2,"322":4,"323":2,"324":2,"329":1,"333":1},"2":{"104":1,"105":1}}],["time",{"0":{"295":1},"1":{"0":1,"17":2,"22":1,"23":1,"25":1,"26":1,"27":2,"28":1,"30":1,"34":7,"35":9,"36":1,"37":7,"38":2,"39":8,"40":3,"42":8,"44":1,"48":1,"49":1,"52":2,"60":4,"64":2,"65":5,"66":3,"68":3,"72":3,"74":4,"78":4,"79":1,"81":1,"84":4,"85":1,"88":1,"89":9,"95":1,"96":1,"109":5,"110":2,"112":2,"113":2,"115":1,"116":7,"118":3,"122":1,"123":1,"128":1,"136":1,"138":2,"141":2,"142":1,"156":1,"158":2,"159":1,"160":2,"162":2,"170":1,"195":1,"196":2,"201":1,"209":2,"218":1,"223":1,"255":1,"268":7,"277":1,"288":8,"289":5,"294":2,"295":1,"307":7,"309":1,"313":4,"314":4,"316":16,"318":1,"322":31,"324":10,"325":3,"329":1,"333":2,"338":4,"342":1,"345":2,"348":1,"368":1},"2":{"35":2,"39":4,"40":2,"65":2,"66":2,"68":2,"72":1,"74":3,"78":3,"84":1,"89":2,"95":1,"96":1,"109":4,"110":1,"112":2,"113":2,"116":4,"118":1,"138":2,"141":1,"142":1,"160":3,"162":1,"325":1}}],["twice",{"1":{"59":3}}],["tworkflowinput",{"1":{"307":8,"309":3,"322":20,"323":1,"324":16}}],["two",{"1":{"5":1,"31":1,"40":1,"43":1,"46":1,"52":1,"64":1,"72":1,"80":1,"83":1,"94":1,"95":4,"96":4,"117":11,"118":1,"132":1,"142":1,"146":1,"154":1,"155":1,"193":1,"209":2,"216":1,"228":1,"279":1,"281":1,"288":1,"294":3,"308":1,"321":1,"325":1,"326":1,"330":2,"333":1,"345":1}}],["tz=timezone",{"1":{"35":2,"72":2,"84":1}}],["tssovi",{"1":{"325":1}}],["ts",{"1":{"21":2,"364":1},"2":{"21":1}}],["tls",{"0":{"262":1,"273":1},"1":{"21":2,"62":2,"166":1,"183":1,"188":2,"189":2,"238":1,"241":6,"262":32,"265":2,"273":11,"376":16}}],["therefore",{"1":{"247":1}}],["themselves",{"1":{"19":1,"323":2,"324":2}}],["than`",{"1":{"136":2}}],["thousands",{"1":{"123":1,"177":1}}],["though",{"1":{"61":1,"216":1,"220":1,"288":1}}],["third",{"1":{"44":1,"154":1,"158":1,"187":1,"318":1,"342":1}}],["thing",{"1":{"47":1}}],["things",{"1":{"31":1,"70":1,"83":1,"121":2,"167":1,"219":1,"321":1,"345":1}}],["think",{"1":{"19":1}}],["threshold=25",{"1":{"296":2}}],["threshold=200",{"1":{"294":1}}],["threshold=500",{"1":{"294":2}}],["threshold=1000",{"1":{"294":2}}],["threshold`",{"1":{"266":1}}],["threshold",{"1":{"118":1,"146":4,"157":2,"266":1,"272":2,"294":5}}],["thread`",{"1":{"325":2}}],["thread",{"1":{"68":2,"325":2,"377":1},"2":{"325":1}}],["three",{"1":{"2":1,"49":1,"113":1,"117":6,"118":2,"164":1,"173":1,"224":1,"240":1}}],["throughputs",{"1":{"289":1,"290":1}}],["throughput",{"0":{"287":1},"1":{"106":1,"173":1,"174":1,"175":2,"177":2,"223":1,"224":1,"248":1,"268":1,"286":1,"287":2,"288":2,"292":1,"293":1,"294":2,"295":2,"298":1}}],["throughout",{"1":{"69":1,"288":1,"331":1}}],["thrown",{"1":{"133":1}}],["throw",{"1":{"58":1,"59":1,"60":1,"61":1,"65":2,"66":2,"68":1,"121":3,"158":1,"334":1,"341":1}}],["throws",{"1":{"57":1,"121":1,"154":1,"158":2,"333":2,"334":1,"341":1}}],["throttled",{"1":{"74":1}}],["throttle",{"1":{"17":1,"106":1}}],["txt",{"0":{"12":1},"1":{"11":7,"12":7,"127":2},"2":{"127":1},"3":{"9":1,"10":1,"11":1,"12":1}}],["toward",{"1":{"288":1}}],["toweb",{"1":{"162":1},"2":{"162":1}}],["todo",{"1":{"234":2,"238":1,"241":2}}],["today",{"1":{"72":2},"2":{"72":1}}],["toggle",{"1":{"219":1,"221":1}}],["together",{"1":{"118":1,"172":1,"191":1}}],["tostring",{"1":{"157":3},"2":{"157":2}}],["touching",{"1":{"152":1}}],["toml",{"1":{"127":1},"2":{"127":1}}],["tomorrow",{"1":{"35":1,"39":1}}],["total`",{"1":{"268":19}}],["totals",{"1":{"130":1}}],["total",{"1":{"105":1,"109":1,"117":1,"129":4,"130":3,"146":15,"158":1,"268":50,"289":2,"294":1},"2":{"130":3}}],["tolower",{"1":{"50":1,"53":1,"94":1,"95":2,"96":2},"2":{"50":1,"53":1}}],["tolowercase",{"1":{"14":1,"65":1,"66":1,"94":1,"105":3},"2":{"14":1,"65":1,"66":1,"94":1,"105":1}}],["took",{"1":{"34":1}}],["tooling",{"1":{"180":1}}],["tools",{"1":{"32":1,"55":1,"207":1,"268":1}}],["toolbox",{"1":{"29":1}}],["tool",{"1":{"11":1,"12":1,"347":1}}],["token>",{"1":{"21":1}}],["token=your",{"1":{"289":3}}],["token=os",{"1":{"62":1}}],["token=",{"1":{"21":1,"372":1,"373":1}}],["tokens`",{"1":{"234":1,"238":1}}],["tokens",{"1":{"21":1,"183":1,"188":1,"189":1,"205":1,"228":1,"376":1}}],["token`",{"1":{"21":1,"166":1,"191":1,"192":2,"234":1,"238":1,"270":1,"289":2,"376":1}}],["token",{"1":{"7":1,"21":1,"62":1,"129":3,"130":10,"148":3,"164":2,"166":2,"168":3,"192":2,"228":5,"230":1,"234":5,"238":5,"268":1,"270":5,"289":5,"352":4,"356":1,"372":1,"373":1,"376":2}}],["topk",{"1":{"268":2}}],["topology",{"1":{"177":1}}],["top",{"1":{"4":1,"11":1,"12":1,"36":1,"41":1,"164":1,"249":1,"268":1}}],["t",{"1":{"7":1,"18":1,"21":1,"29":1,"46":1,"61":1,"64":1,"65":1,"85":2,"92":1,"100":2,"104":2,"114":1,"139":1,"170":1,"175":1,"189":1,"204":1,"209":1,"214":1,"216":2,"218":1,"225":6,"227":3,"243":7,"279":1,"333":1,"373":2,"377":1}}],["tail=50",{"1":{"278":1}}],["tackle",{"1":{"211":1}}],["talk",{"1":{"174":1,"325":1}}],["tag`",{"1":{"278":4}}],["tags",{"1":{"278":2,"279":1,"284":2}}],["tag",{"1":{"157":6,"229":1,"278":2,"283":2,"369":1,"370":1,"371":1}}],["tagging",{"1":{"151":1}}],["targeted",{"1":{"279":1}}],["target",{"0":{"283":1},"1":{"278":1,"279":1,"282":2,"283":4,"284":2}}],["targets",{"1":{"146":1,"268":1}}],["targetvalue",{"1":{"130":3}}],["taks",{"1":{"132":1}}],["takes",{"1":{"94":3,"109":1,"214":1,"278":1,"289":1,"358":1}}],["take",{"0":{"277":1},"1":{"46":1,"67":1,"69":1,"73":1,"87":1,"93":1,"123":1,"155":1,"279":1,"329":1,"330":1}}],["tab",{"1":{"65":3,"66":3,"79":1,"81":3,"82":6,"84":9,"132":3,"133":3,"136":6,"138":1,"142":2,"149":6,"166":1,"169":1,"228":1,"234":2,"238":2}}],["tables",{"1":{"266":2,"296":1}}],["table",{"1":{"32":1,"288":1,"296":1,"328":1}}],["tabs",{"1":{"7":1}}],["task<",{"1":{"345":1}}],["taskrunexternalid",{"1":{"333":1}}],["taskrunref",{"1":{"324":2}}],["taskname",{"1":{"333":1}}],["taskname`",{"1":{"145":1}}],["taskworkflowdeclaration",{"1":{"332":8,"333":2,"345":5,"346":4}}],["taskwithmiddleware",{"1":{"155":1,"156":2}}],["taskoneoutput",{"1":{"136":4}}],["taskoutput",{"1":{"25":1,"55":4,"121":1,"155":1,"328":3},"2":{"25":1}}],["taskconditionworkflow",{"1":{"110":1,"113":2,"117":4,"118":1},"2":{"110":1,"113":1,"117":1,"118":1}}],["task`",{"1":{"88":1,"89":1,"130":1,"307":2,"321":1,"322":8,"331":1}}],["task1",{"1":{"82":5}}],["task2",{"1":{"81":5}}],["taskinput",{"1":{"55":3,"155":2}}],["task",{"0":{"14":1,"39":1,"58":1,"59":1,"84":1,"89":1,"94":1,"95":1,"96":1,"123":1,"129":1,"167":1,"210":1,"261":1,"323":1},"1":{"0":1,"2":1,"3":2,"7":1,"8":1,"10":1,"13":6,"14":13,"15":8,"16":4,"17":6,"18":4,"19":2,"20":3,"24":15,"25":6,"27":1,"29":1,"30":5,"31":2,"32":4,"34":7,"35":2,"37":4,"38":9,"39":11,"40":4,"42":6,"43":16,"48":2,"49":9,"50":3,"51":1,"52":2,"53":6,"55":18,"56":3,"57":11,"58":15,"59":7,"60":8,"61":16,"62":7,"63":2,"64":4,"65":22,"66":15,"67":5,"68":22,"69":6,"71":1,"72":19,"73":4,"74":8,"75":8,"76":7,"77":1,"78":4,"79":2,"80":1,"81":7,"82":3,"83":5,"84":2,"85":4,"87":10,"88":3,"89":11,"90":6,"91":1,"92":4,"93":4,"94":9,"95":12,"96":13,"97":1,"100":6,"101":15,"102":8,"103":2,"104":17,"105":26,"106":3,"108":2,"109":12,"110":11,"111":3,"112":17,"113":23,"115":8,"116":9,"117":41,"118":17,"119":2,"120":5,"121":17,"122":1,"123":18,"124":14,"127":1,"128":2,"129":12,"130":10,"131":1,"132":15,"133":11,"134":3,"135":1,"136":11,"137":6,"138":6,"141":1,"142":7,"144":1,"145":15,"149":4,"150":3,"151":3,"152":5,"153":6,"154":15,"155":28,"156":3,"157":4,"158":19,"159":1,"160":8,"161":3,"162":3,"167":2,"169":3,"173":1,"174":1,"176":4,"212":1,"214":6,"216":5,"217":1,"219":4,"220":4,"221":4,"223":1,"228":1,"234":1,"238":1,"254":1,"257":6,"266":2,"267":2,"268":9,"294":2,"300":1,"301":2,"307":49,"308":1,"309":49,"313":8,"314":30,"316":71,"321":1,"322":75,"323":28,"324":41,"326":7,"327":1,"328":5,"329":3,"330":5,"331":4,"332":11,"333":18,"336":2,"338":8,"340":14,"345":49,"346":4,"372":4,"374":1},"2":{"14":1,"24":4,"25":1,"39":1,"43":5,"52":1,"53":2,"55":3,"58":3,"59":3,"60":2,"61":2,"65":2,"66":2,"68":2,"74":1,"78":1,"81":2,"82":2,"84":1,"89":2,"94":2,"95":3,"96":3,"104":1,"105":7,"109":1,"110":3,"112":1,"113":3,"115":1,"116":2,"117":4,"118":3,"120":1,"121":5,"130":1,"132":2,"133":3,"135":1,"136":3,"138":1,"141":1,"142":2,"155":4,"160":1,"161":2,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":2,"322":1,"326":5,"328":1,"330":1,"331":1,"345":2},"3":{"13":1,"14":1,"15":1,"16":1,"17":1,"18":1}}],["taskstat",{"1":{"314":2}}],["tasks`",{"1":{"56":2,"268":2,"316":2,"322":2,"324":2}}],["tasks",{"0":{"13":1,"18":1,"23":1,"55":1,"86":1,"90":1,"102":1,"104":1,"105":1,"120":1,"121":1,"133":1,"155":1,"169":1,"228":1},"1":{"0":1,"2":5,"5":1,"8":1,"13":3,"15":2,"17":2,"18":2,"19":3,"20":3,"22":4,"23":5,"25":1,"26":2,"29":1,"30":1,"31":2,"32":2,"34":1,"35":1,"36":1,"38":2,"39":1,"40":2,"41":1,"43":5,"48":2,"49":5,"50":1,"52":1,"55":4,"56":2,"63":4,"64":1,"65":1,"67":2,"68":2,"69":3,"70":1,"71":2,"72":4,"74":6,"76":1,"78":1,"83":5,"84":2,"85":3,"87":3,"89":3,"90":3,"91":1,"92":6,"93":1,"94":3,"95":2,"96":2,"98":1,"100":2,"101":3,"102":3,"103":7,"104":4,"105":9,"106":2,"107":1,"108":3,"110":2,"111":4,"113":3,"114":2,"115":3,"116":1,"117":4,"118":1,"119":3,"120":2,"121":2,"122":1,"123":10,"124":6,"129":3,"130":2,"131":3,"132":2,"133":3,"134":1,"136":1,"137":3,"138":2,"139":3,"142":1,"143":2,"152":3,"155":3,"156":1,"158":6,"159":1,"167":3,"169":4,"170":2,"174":3,"175":1,"176":1,"177":2,"179":1,"181":2,"204":1,"209":1,"210":1,"212":1,"222":1,"223":1,"228":1,"246":1,"268":60,"271":1,"289":1,"294":1,"301":5,"306":1,"307":6,"308":5,"321":2,"322":9,"323":10,"324":12,"325":1,"326":2,"327":4,"329":4,"330":2,"331":2,"332":6,"333":7,"340":1,"345":6,"348":1,"368":1}}],["terraform",{"1":{"248":1}}],["terminated",{"1":{"137":1}}],["terminate",{"1":{"69":1,"138":1,"189":1}}],["terminating",{"1":{"67":1}}],["termination",{"1":{"67":1,"69":1,"71":1,"106":1,"171":1}}],["terminal",{"1":{"11":1,"206":2,"348":1,"360":1,"368":2}}],["term",{"1":{"29":1,"73":1}}],["tens",{"1":{"177":1}}],["tenancy",{"1":{"164":3}}],["tenant`",{"1":{"254":1}}],["tenantid",{"1":{"129":1,"148":1,"332":1}}],["tenants`",{"1":{"265":1}}],["tenants",{"1":{"46":1,"81":1,"82":1,"129":2,"130":3,"148":4,"196":1,"205":1,"206":1,"265":1,"268":4,"332":1,"348":1,"351":1}}],["tenant",{"0":{"148":1,"265":1},"1":{"7":6,"46":3,"129":1,"130":3,"145":2,"147":1,"154":1,"164":4,"166":1,"168":1,"188":4,"192":2,"195":1,"196":1,"225":1,"228":1,"243":4,"251":1,"254":7,"255":6,"257":2,"265":5,"268":73,"291":2,"302":1,"307":1,"312":4,"314":16,"318":4,"319":4,"320":4,"332":5,"338":4,"339":2,"340":1,"342":1,"343":1,"344":1,"372":2,"373":1,"376":3},"2":{"332":1}}],["tenacity",{"1":{"62":3}}],["tenacity=tenacityconfig",{"1":{"62":1}}],["tenacityconfig",{"1":{"62":1}}],["tell",{"1":{"46":1,"214":1}}],["template",{"1":{"345":1}}],["template=feature",{"1":{"305":1}}],["temporary",{"1":{"57":1,"63":1}}],["temp",{"1":{"39":1,"40":1}}],["text",{"1":{"11":1,"12":1,"121":3,"148":2,"160":2,"161":1,"162":4,"219":8,"220":12,"268":1,"314":6,"338":1},"2":{"219":1,"220":1}}],["teardown",{"1":{"307":1}}],["team",{"1":{"78":2,"179":1,"294":1}}],["teams",{"1":{"0":1,"164":1,"175":1}}],["teach",{"1":{"10":1}}],["testevent",{"1":{"294":2},"2":{"294":1}}],["tested",{"1":{"1":1,"5":1,"188":1,"286":1}}],["test3",{"1":{"294":1}}],["test2",{"1":{"294":1}}],["test1",{"1":{"294":1}}],["testing2",{"1":{"294":1}}],["testing",{"1":{"164":1,"187":1,"188":1,"224":1,"243":1,"289":1,"294":1,"329":1,"358":1,"363":1,"366":1,"372":1}}],["tests",{"1":{"55":1,"158":1,"206":1,"288":1,"290":1}}],["test",{"1":{"2":1,"21":1,"24":2,"82":3,"127":1,"135":1,"136":2,"142":2,"149":2,"158":4,"214":3,"216":1,"225":3,"227":2,"231":1,"278":1,"280":1,"289":4,"294":6,"323":4,"324":4,"326":1,"328":1,"372":1}}],["tr",{"1":{"238":1}}],["troubleshooting",{"0":{"165":1},"1":{"165":1}}],["treat",{"1":{"189":1}}],["treated",{"1":{"64":1,"81":1,"307":3,"322":4}}],["tree",{"1":{"101":3,"106":2,"158":1,"283":1}}],["trusted",{"1":{"189":1}}],["trust",{"0":{"186":1},"1":{"182":2,"186":2,"188":2}}],["truth",{"1":{"82":1,"173":1}}],["true",{"1":{"0":1,"7":1,"52":9,"55":1,"65":1,"66":1,"68":2,"89":1,"113":2,"118":2,"121":1,"132":1,"133":3,"136":4,"142":1,"153":1,"211":1,"219":3,"241":5,"243":4,"309":8,"333":2,"343":1,"364":3}}],["try",{"0":{"120":1},"1":{"68":1,"104":2,"105":2,"119":1,"120":3,"153":4,"154":1,"162":1,"330":4,"333":1,"373":1}}],["trailing",{"1":{"376":1}}],["trap",{"1":{"238":1}}],["tradeoffs",{"0":{"175":1},"1":{"184":1}}],["traditional",{"1":{"0":1,"30":1}}],["tracerprovider`",{"1":{"144":1}}],["tracerprovider",{"1":{"144":1}}],["tracer",{"1":{"144":2}}],["traces",{"1":{"143":1,"144":3,"166":1}}],["trace",{"0":{"301":1},"1":{"142":3,"144":2,"264":2,"301":1}}],["tracks",{"1":{"109":1,"375":1}}],["trackable",{"1":{"107":1}}],["tracked",{"1":{"92":1,"181":1,"188":1}}],["tracking",{"1":{"82":1,"287":1}}],["track",{"1":{"73":1}}],["traffic",{"1":{"74":1,"188":1}}],["transactionally",{"1":{"176":1}}],["transit",{"1":{"188":1}}],["transitions",{"1":{"173":1,"174":1,"176":2}}],["transient",{"1":{"17":1,"57":1,"63":1,"176":1,"333":2}}],["transform",{"1":{"307":4,"322":8}}],["transforms",{"1":{"158":2}}],["transformedoutput",{"1":{"74":2,"78":2}}],["transformedmessage",{"1":{"14":1,"24":1,"43":5,"55":1,"65":1,"66":1,"94":1,"95":1,"96":1,"105":4,"294":3},"2":{"24":1,"95":1,"96":1}}],["transformed",{"1":{"14":3,"95":1,"96":1}}],["transport",{"1":{"62":13}}],["trigger`",{"1":{"206":1,"366":1,"367":2}}],["triggerauthentication",{"1":{"130":1}}],["triggerat`",{"1":{"341":2}}],["triggerat",{"1":{"35":2,"341":2}}],["triggerworkflowoptions",{"1":{"84":3,"105":1,"133":1,"149":1},"2":{"84":1,"105":1,"133":1,"149":1}}],["triggerpayload",{"1":{"52":1}}],["triggerscope",{"1":{"52":2}}],["triggers",{"0":{"40":1,"41":1,"50":1},"1":{"36":1,"38":3,"40":5,"41":2,"42":1,"47":1,"48":1,"49":1,"50":1,"130":3,"145":2,"182":1,"205":1,"216":1,"307":6,"311":4,"322":6,"324":7,"332":3,"333":3,"334":2,"341":1,"345":4,"366":1,"367":1}}],["triggering",{"0":{"28":1,"366":1},"1":{"10":2,"23":1,"24":1,"25":2,"40":1,"55":1,"74":1,"75":1,"121":1,"149":1,"154":1,"157":1,"158":1,"173":1,"193":1,"207":1,"288":1,"314":2,"331":1,"333":1,"338":1,"340":1,"366":1}}],["trigger",{"0":{"49":1},"1":{"7":4,"10":1,"18":1,"24":1,"25":1,"34":1,"35":4,"38":1,"39":2,"40":24,"42":1,"44":1,"46":4,"48":1,"49":3,"50":2,"51":1,"52":2,"55":5,"81":1,"84":3,"119":1,"121":1,"145":2,"149":1,"174":1,"184":1,"204":1,"206":1,"212":1,"213":1,"214":3,"216":1,"311":26,"316":8,"317":9,"318":1,"322":5,"324":5,"334":6,"342":1,"366":1,"367":5},"2":{"40":1},"3":{"7":1,"8":1,"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["triggeredbyevent",{"1":{"333":1}}],["triggered",{"1":{"7":1,"15":1,"24":1,"25":3,"27":1,"35":1,"36":1,"39":1,"42":1,"47":1,"49":2,"50":1,"51":1,"52":1,"53":1,"55":3,"67":1,"74":2,"75":2,"76":2,"83":2,"84":1,"145":1,"192":1,"309":3,"316":4,"317":5,"318":4,"322":7,"324":6,"329":1,"330":1,"333":2,"338":1,"345":2}}],["hl=en",{"1":{"305":1}}],["html",{"1":{"277":2,"293":1,"303":1,"305":1,"325":1}}],["httpmethod",{"1":{"62":4},"2":{"62":2}}],["http",{"1":{"44":1,"62":7,"158":1,"162":7,"173":1,"191":2,"225":4,"227":2,"243":6,"268":4,"272":1,"318":1,"332":1,"342":1},"2":{"162":7}}],["https",{"1":{"0":2,"4":1,"7":2,"11":3,"12":3,"16":1,"29":1,"30":1,"38":2,"43":1,"46":2,"68":1,"74":1,"88":1,"90":1,"112":1,"113":1,"118":1,"127":1,"129":1,"130":4,"143":1,"147":1,"148":1,"152":1,"153":1,"154":1,"155":1,"156":1,"158":1,"175":2,"177":3,"182":2,"186":1,"188":1,"189":1,"198":1,"199":1,"201":1,"214":2,"216":1,"218":3,"219":1,"220":1,"221":1,"225":3,"227":1,"228":1,"234":2,"236":1,"237":2,"238":1,"240":2,"241":3,"243":1,"246":1,"248":1,"249":1,"268":1,"277":4,"283":1,"284":1,"293":1,"303":1,"305":6,"325":4,"326":1,"327":1,"329":1,"332":1,"349":1,"359":1,"374":2}}],["hints",{"1":{"326":1,"331":1}}],["histogram",{"1":{"268":5}}],["historical",{"1":{"182":1}}],["history",{"1":{"167":1,"181":1,"201":1,"205":1}}],["hipaa",{"1":{"186":1}}],["him",{"1":{"160":4,"161":1}}],["highly",{"1":{"325":1,"326":1}}],["highlights",{"1":{"185":1}}],["highest",{"1":{"136":3}}],["higher",{"1":{"83":2,"136":1,"174":1,"175":1,"177":1,"289":1,"290":1,"294":1,"295":2,"307":3}}],["high",{"0":{"248":1,"294":1},"1":{"83":1,"84":11,"170":1,"173":1,"175":2,"177":1,"182":2,"202":2,"210":1,"223":1,"248":6,"249":1,"268":1,"278":1,"293":1,"294":5,"296":1,"297":2,"298":1,"301":1},"2":{"84":1}}],["hctx",{"1":{"104":3,"105":3,"120":1}}],["h",{"1":{"87":4,"95":1,"96":1,"129":1,"148":1,"289":1,"362":2}}],["hmac",{"1":{"46":2,"318":2}}],["husband",{"1":{"160":8,"161":2}}],["human",{"1":{"32":2,"86":1,"111":1,"118":1,"123":2,"145":1}}],["hundreds",{"1":{"5":1,"177":1,"292":1}}],["her",{"1":{"160":4,"161":1}}],["heres",{"1":{"30":1}}],["hex",{"1":{"46":1,"89":1,"116":1,"157":1}}],["helm",{"0":{"242":1,"249":1},"1":{"224":2,"234":2,"240":1,"242":1,"243":1,"249":7,"278":4}}],["held",{"1":{"123":1,"169":1}}],["hello",{"1":{"24":5,"25":4,"35":3,"40":1,"43":10,"48":2,"52":2,"55":5,"97":1,"105":6,"112":2,"113":2,"149":3,"294":5,"322":1,"323":2,"324":2,"326":3,"331":3},"2":{"25":3,"331":1}}],["helping",{"1":{"56":1}}],["helpful",{"1":{"19":1,"35":1,"55":1,"209":1}}],["helpers",{"1":{"345":2}}],["helper",{"1":{"16":1,"43":1,"157":1,"307":3,"308":1,"309":2,"333":3}}],["help",{"1":{"3":1,"22":1,"57":1,"74":4,"146":6,"148":1,"161":1,"189":1,"208":1,"209":1,"211":1,"289":2,"316":4,"326":1,"348":1,"362":4,"368":1}}],["helps",{"1":{"0":1,"74":1,"75":1,"176":1,"179":1}}],["headroom",{"1":{"290":1}}],["head",{"1":{"278":1,"374":1}}],["headers>",{"1":{"372":1}}],["headers=<optional",{"1":{"372":1}}],["headers",{"1":{"46":3,"162":1,"216":2,"338":1}}],["header",{"1":{"11":1,"12":1,"46":3,"162":3,"216":1,"241":1,"264":1},"2":{"162":1}}],["heavily",{"1":{"177":1,"209":1,"326":1}}],["heavier",{"1":{"158":1}}],["heavy",{"1":{"74":1,"297":4,"298":1,"325":1}}],["healthy",{"1":{"146":5,"166":1,"225":2,"227":3,"278":1}}],["health`",{"1":{"146":3,"272":2}}],["healthcheck`",{"1":{"254":1}}],["healthcheck",{"0":{"272":1},"1":{"146":5,"225":3,"227":2,"231":1,"254":3,"272":5}}],["health",{"0":{"146":1},"1":{"146":8,"205":1,"210":1,"266":2}}],["heartbeats",{"0":{"210":1},"1":{"168":1,"210":2}}],["heartbeat",{"1":{"21":1,"166":1,"210":2},"3":{"208":1,"209":1,"210":1}}],["hood",{"1":{"307":1}}],["hooks",{"1":{"152":3,"153":2,"154":8,"156":1,"158":8,"345":1}}],["hook",{"1":{"152":1,"153":8,"154":6,"155":1,"157":3,"158":8}}],["hoc",{"1":{"205":1}}],["house",{"1":{"160":8,"161":2}}],["hour`",{"1":{"266":1}}],["hours",{"1":{"38":1,"65":1,"86":1,"88":1,"89":2,"109":4,"123":2,"124":1,"147":1,"177":1,"196":1}}],["hours=2",{"1":{"35":1}}],["hours=1",{"1":{"35":1}}],["hour",{"1":{"5":1,"38":1,"65":1,"79":1,"88":1,"89":1,"109":1,"266":1}}],["homepage",{"1":{"374":1}}],["home",{"1":{"158":1,"228":1,"234":1,"238":1,"306":1,"309":3,"332":1}}],["homebrew",{"1":{"7":1,"349":2}}],["holds",{"1":{"123":1}}],["hold",{"1":{"101":1,"103":1,"104":1,"123":3,"124":1}}],["holding",{"1":{"86":2}}],["however",{"1":{"55":1,"83":1,"93":1,"133":1,"158":1,"292":1,"297":1,"325":1}}],["hot",{"1":{"21":1,"177":1,"206":1,"373":1}}],["horizontally",{"1":{"209":1,"297":1}}],["horizontal",{"1":{"20":1,"176":1},"3":{"19":1,"20":1,"21":1,"22":1}}],["host`",{"1":{"255":1,"258":1,"358":2}}],["host=your",{"1":{"252":1}}],["hosts",{"1":{"241":4}}],["hostname",{"1":{"225":1,"227":2,"230":1,"231":1}}],["hosted",{"0":{"183":1,"189":1,"202":1},"1":{"7":1,"21":2,"173":2,"174":1,"179":1,"180":1,"182":1,"185":1,"187":1,"200":1,"222":1,"223":1,"268":1,"275":1}}],["host",{"1":{"4":1,"7":1,"189":1,"202":1,"223":1,"224":1,"241":2,"245":1,"252":2,"253":1,"255":2,"258":1,"270":4,"304":1,"358":1,"376":3},"2":{"252":1}}],["hosting",{"0":{"4":1,"223":1},"1":{"0":2,"4":3,"6":2,"7":1,"21":2,"164":1,"167":1,"175":2,"178":2,"182":3,"184":2,"189":2,"202":3,"222":2,"224":3,"225":2,"226":1,"248":1,"253":1,"268":1,"279":1,"280":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["hasworkflow",{"1":{"333":1}}],["hash",{"1":{"153":1,"154":1,"155":1,"261":2}}],["hatfield",{"1":{"325":1}}],["hatch",{"1":{"316":2,"317":2}}],["hatchetworker",{"1":{"332":1}}],["hatchetwithmiddlewarechaining",{"1":{"153":1}}],["hatchetwithmiddleware",{"1":{"153":1,"155":2,"156":2},"2":{"155":1,"156":1}}],["hatchetv1",{"1":{"332":1}}],["hatchetmiddleware",{"1":{"153":4,"157":3}}],["hatchetclient",{"1":{"153":3,"154":1,"157":3,"332":2},"2":{"153":1,"154":1,"157":1}}],["hatchetcontext",{"1":{"74":1,"78":1,"132":2,"133":1,"136":2,"294":1},"2":{"74":1,"78":1,"132":1,"133":1,"136":1,"294":1}}],["hatchetotel",{"1":{"144":2},"2":{"144":1}}],["hatchetinstrumentor",{"1":{"144":5,"341":1}}],["hatchet",{"0":{"0":1,"36":1,"41":1,"126":1,"188":1,"201":1,"225":1,"228":1,"268":1,"297":1,"307":1,"358":1,"360":1,"361":1,"364":1,"368":1},"1":{"0":8,"2":2,"3":5,"4":4,"5":4,"6":1,"7":26,"9":3,"10":4,"11":10,"12":6,"13":2,"14":6,"15":3,"16":4,"18":4,"19":3,"20":9,"21":6,"22":2,"23":1,"24":1,"25":5,"26":2,"27":1,"29":3,"31":1,"34":6,"35":18,"36":1,"37":3,"38":10,"39":5,"40":8,"41":1,"42":3,"43":4,"44":1,"46":5,"48":5,"50":7,"51":2,"52":17,"53":1,"55":20,"56":1,"57":2,"58":4,"59":3,"60":5,"61":5,"62":9,"63":2,"64":4,"65":4,"66":2,"67":3,"68":4,"69":1,"70":1,"71":2,"72":28,"73":3,"74":8,"78":5,"79":2,"80":1,"81":4,"82":9,"83":3,"84":7,"85":1,"87":3,"89":1,"90":1,"91":1,"92":2,"93":3,"94":3,"95":3,"102":2,"104":3,"105":19,"106":1,"109":4,"110":3,"111":1,"112":12,"113":17,"116":5,"117":10,"118":5,"120":1,"121":4,"122":1,"123":5,"124":2,"125":2,"126":3,"127":7,"128":1,"130":9,"132":5,"133":7,"135":4,"136":6,"137":1,"138":4,"140":1,"141":12,"142":12,"143":2,"144":15,"145":2,"146":9,"147":3,"149":6,"150":1,"151":1,"152":2,"153":2,"154":2,"155":7,"156":1,"157":3,"158":6,"159":3,"160":5,"161":5,"162":9,"163":1,"164":8,"165":2,"166":2,"167":4,"168":3,"170":1,"171":1,"172":1,"173":7,"174":4,"175":3,"176":2,"177":4,"178":2,"179":3,"180":2,"181":1,"182":9,"184":1,"185":2,"186":3,"187":2,"188":7,"189":6,"190":2,"195":1,"197":1,"198":1,"199":1,"200":1,"201":5,"202":1,"203":1,"204":1,"206":2,"207":2,"208":1,"209":2,"210":2,"211":1,"212":2,"213":1,"214":13,"215":1,"216":9,"217":1,"218":2,"219":7,"220":7,"221":6,"222":5,"223":1,"224":7,"225":51,"226":5,"227":62,"228":6,"230":3,"231":4,"234":15,"238":14,"240":7,"241":16,"243":2,"244":1,"246":4,"247":8,"248":8,"249":9,"250":1,"251":1,"252":1,"253":5,"256":1,"268":45,"269":1,"275":2,"276":1,"278":22,"279":6,"280":1,"281":2,"283":6,"284":4,"285":9,"286":4,"288":4,"289":26,"290":3,"291":1,"292":2,"294":7,"297":6,"298":4,"301":2,"302":1,"304":1,"305":2,"306":2,"307":20,"308":1,"309":13,"310":1,"311":1,"312":1,"313":1,"314":1,"315":2,"316":1,"317":1,"318":2,"319":1,"320":1,"321":1,"322":18,"323":2,"324":2,"325":2,"326":11,"327":3,"328":6,"329":2,"330":1,"331":4,"332":7,"333":1,"334":1,"335":1,"336":1,"338":1,"339":2,"340":1,"341":1,"342":2,"343":1,"344":1,"345":7,"346":3,"347":5,"348":3,"349":5,"350":2,"351":2,"352":3,"353":1,"354":4,"355":2,"356":1,"357":1,"358":4,"359":1,"360":2,"361":2,"362":22,"363":2,"365":4,"367":1,"368":4,"369":1,"370":1,"371":1,"372":2,"373":2,"374":6,"376":1},"2":{"7":1,"14":2,"20":2,"25":1,"35":7,"39":5,"40":6,"43":2,"48":2,"50":5,"52":10,"53":1,"55":7,"58":3,"59":3,"60":4,"61":2,"62":1,"65":4,"66":5,"68":3,"72":3,"74":2,"78":2,"81":3,"82":6,"84":3,"89":1,"93":1,"94":1,"95":2,"104":1,"105":6,"109":2,"110":4,"112":5,"113":10,"116":3,"117":4,"118":7,"120":1,"121":2,"132":4,"133":5,"135":5,"136":1,"138":2,"141":1,"142":2,"144":2,"149":3,"153":1,"155":3,"157":1,"160":2,"161":3,"162":3,"214":3,"216":3,"219":3,"220":3,"221":3,"225":2,"227":1,"240":2,"241":2,"294":3,"322":1,"326":4,"328":2,"330":1,"331":1,"345":2,"349":1}}],["ha",{"0":{"249":1},"1":{"248":2,"249":3,"268":1,"278":2}}],["ha`",{"1":{"243":1,"245":1}}],["happening",{"1":{"205":1,"219":1}}],["happens",{"1":{"49":1,"120":1,"158":1}}],["happy",{"1":{"160":4,"161":1}}],["having",{"1":{"83":2,"106":1,"329":1}}],["haven",{"1":{"7":1}}],["hardware",{"1":{"286":1}}],["hard",{"1":{"32":1}}],["handy",{"1":{"206":1}}],["handling",{"0":{"119":1},"1":{"57":1,"60":1,"63":1,"69":1,"71":2,"73":1,"104":2,"105":2,"119":2,"120":4,"145":1,"221":1,"326":1}}],["handlegithubpr",{"1":{"216":1}}],["handlefunc",{"1":{"162":1},"2":{"162":1}}],["handler",{"1":{"121":2}}],["handled=true",{"1":{"219":1}}],["handled",{"1":{"69":1,"130":1,"158":1,"219":6}}],["handleslackinteraction",{"1":{"221":1}}],["handleslackcommand",{"1":{"220":1}}],["handleslackmention",{"1":{"219":1}}],["handlestripepayment",{"1":{"214":1}}],["handles",{"1":{"21":1,"124":1,"145":1,"157":1,"169":1,"170":1,"204":1,"212":1,"217":1,"219":1}}],["handle",{"1":{"3":2,"17":1,"56":1,"58":1,"63":1,"68":2,"69":1,"72":1,"73":1,"104":1,"105":1,"115":1,"120":4,"121":3,"122":1,"126":1,"132":1,"170":1,"177":1,"212":1,"214":6,"216":7,"219":5,"220":5,"221":5,"292":1,"316":4,"376":1}}],["hand",{"1":{"25":1,"154":1,"209":1}}],["c1",{"1":{"238":1}}],["cncf",{"1":{"237":1}}],["cnf",{"1":{"118":1}}],["cycles",{"1":{"170":1}}],["cm",{"1":{"153":20,"155":40,"330":22}}],["cmd`",{"1":{"365":1}}],["cmd",{"1":{"127":9,"225":3,"227":2,"231":1,"253":1,"283":1,"364":1,"365":1,"372":1,"373":1}}],["cmdutils",{"1":{"20":1},"2":{"20":1}}],["cpython",{"1":{"146":1}}],["cpu",{"0":{"294":1},"1":{"139":1,"170":1,"209":4,"210":1,"286":1,"287":2,"289":1,"290":3,"292":1,"294":5}}],["c$",{"1":{"142":1}}],["c`",{"1":{"118":1}}],["cd",{"1":{"98":1}}],["cipher",{"1":{"157":4},"2":{"157":3}}],["ciphertext",{"1":{"157":6}}],["ci`",{"1":{"127":1}}],["ci",{"1":{"98":1,"127":2}}],["circular",{"1":{"91":1}}],["circumstances",{"1":{"71":1}}],["circumvent",{"1":{"61":1}}],["circuit",{"1":{"63":1}}],["c",{"1":{"87":1,"101":1,"105":1,"118":3,"225":2,"227":1,"231":1,"252":1,"289":1,"294":2,"374":1},"2":{"294":1}}],["cert`",{"1":{"262":2,"376":1}}],["cert",{"1":{"241":7,"262":4,"273":1,"376":1}}],["certificates",{"1":{"241":1}}],["certificate",{"1":{"241":2,"262":2,"273":1,"376":3}}],["certs",{"1":{"227":8}}],["certain",{"1":{"34":1,"51":1,"62":3,"79":1,"131":1,"132":1,"134":1,"154":1,"158":1}}],["central",{"1":{"222":1}}],["centralized",{"1":{"188":1}}],["center",{"0":{"186":1},"1":{"182":1,"186":1,"188":1}}],["cel",{"1":{"46":4,"81":7,"88":2,"112":3,"113":3,"217":1,"220":1,"318":8,"333":2}}],["ctx|",{"1":{"14":1,"39":1,"52":1,"53":1,"58":1,"59":1,"60":1,"61":3,"65":1,"66":1,"68":2,"74":1,"78":2,"81":1,"82":1,"84":1,"94":2,"95":2,"96":2,"105":3,"109":1,"110":1,"112":2,"113":2,"116":2,"117":4,"118":1,"121":2,"132":2,"133":2,"136":1,"138":1,"142":2,"155":5,"160":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["ctx",{"1":{"14":2,"39":3,"43":4,"50":1,"52":1,"53":7,"55":2,"58":3,"59":7,"60":4,"61":2,"65":4,"66":8,"68":10,"74":1,"78":1,"81":2,"82":2,"84":1,"89":6,"94":2,"95":12,"96":11,"97":2,"104":1,"105":17,"109":7,"110":2,"112":11,"113":4,"115":5,"116":7,"117":42,"118":2,"121":10,"132":11,"133":9,"135":1,"136":16,"138":5,"141":1,"142":11,"153":22,"155":26,"160":6,"161":2,"162":2,"214":2,"216":2,"219":2,"220":2,"221":2,"294":5,"322":1,"326":6,"328":3,"330":9,"331":1,"333":1},"2":{"43":1,"52":1,"53":3,"59":2,"60":1,"66":2,"68":5,"84":1,"89":3,"95":2,"96":2,"105":3,"109":3,"112":3,"115":1,"116":3,"117":4,"121":6,"132":3,"133":2,"136":6,"138":2,"142":2,"155":1,"160":3,"294":2,"326":1,"328":1,"333":1}}],["cto",{"1":{"5":1}}],["chunking",{"1":{"316":4}}],["chunk",{"1":{"160":7,"161":3,"316":4}}],["chunks",{"1":{"160":11,"316":16},"2":{"160":1}}],["chugs",{"1":{"159":1}}],["chap",{"1":{"277":1}}],["chart",{"0":{"242":1,"249":1},"1":{"240":1,"242":1,"249":3}}],["charts",{"1":{"224":1,"234":1,"243":1,"245":1,"249":2,"278":3}}],["character",{"1":{"52":12}}],["channel",{"1":{"219":8,"220":1},"2":{"219":1}}],["channel`",{"1":{"219":1}}],["channels`",{"1":{"219":1}}],["channels",{"1":{"219":1}}],["changing",{"1":{"58":1,"71":1,"102":1}}],["changed",{"1":{"123":1,"243":1}}],["change",{"1":{"35":1,"55":1,"72":1,"102":2,"115":1,"131":1,"134":1,"152":1,"176":1,"210":1,"238":1,"243":5,"247":1,"254":1,"277":1,"327":1,"329":1,"356":1}}],["changes",{"0":{"182":1},"1":{"21":1,"55":1,"106":1,"136":2,"212":1,"254":1,"268":1,"279":2,"281":1,"298":1,"347":1,"348":1,"365":1}}],["challenge",{"1":{"164":1,"219":1}}],["chains",{"1":{"158":1}}],["chain",{"1":{"153":1}}],["chaining",{"1":{"153":2,"154":1}}],["chained",{"1":{"153":12,"155":38,"330":12}}],["chat",{"1":{"76":1}}],["chosen",{"1":{"290":2}}],["choosing",{"0":{"100":1},"1":{"216":1}}],["choose",{"0":{"209":1},"1":{"27":1,"72":1,"100":1,"104":1,"180":2,"216":1,"218":1,"252":1}}],["choice",{"1":{"32":1,"158":1}}],["checkbox",{"1":{"374":1}}],["checker",{"1":{"326":1}}],["checked",{"1":{"15":1,"374":1}}],["checkmark",{"1":{"219":1}}],["checklist",{"0":{"166":1}}],["checking",{"1":{"68":3,"117":1,"294":1,"309":2,"328":1,"350":1}}],["checks",{"0":{"146":1},"1":{"55":2,"69":1,"71":1,"146":1,"158":1,"166":1,"205":1,"333":1,"374":2}}],["check",{"0":{"256":1},"1":{"33":1,"67":1,"68":2,"69":1,"75":1,"76":1,"117":2,"146":3,"158":2,"166":3,"169":2,"170":2,"238":1,"248":1,"256":4,"278":1,"284":1,"306":1,"309":5,"325":1,"332":1,"333":1,"343":2}}],["checkpointed",{"1":{"102":1}}],["checkpoints",{"1":{"32":1,"85":1,"86":1,"87":2,"90":1,"101":2,"102":1,"104":1,"115":1,"123":1}}],["checkpoint",{"1":{"3":1,"30":2,"87":5,"102":5,"112":1,"115":1,"123":3}}],["checkpointing",{"1":{"2":1,"88":1,"124":1}}],["childkey",{"1":{"333":1}}],["childindex",{"1":{"333":1}}],["childinput",{"1":{"104":6,"105":16,"120":2,"133":1,"294":2,"326":3}}],["childstep1",{"1":{"136":2}}],["child3",{"1":{"105":3}}],["child2",{"1":{"105":4}}],["child1",{"1":{"105":4}}],["childoutput",{"1":{"104":5,"105":13,"133":3},"2":{"105":1,"133":1}}],["childworkflows",{"1":{"294":3}}],["childworkflow",{"1":{"104":3,"105":3,"120":1,"133":1,"135":1,"136":3},"2":{"104":1,"105":1,"120":1,"133":1,"135":1,"136":2}}],["childresult",{"1":{"104":1,"105":2,"133":2},"2":{"105":1,"133":1}}],["childres",{"1":{"104":6,"105":6,"120":2},"2":{"104":2,"105":2,"120":1}}],["children",{"1":{"32":2,"85":1,"86":1,"87":5,"90":2,"100":1,"101":3,"103":1,"104":7,"106":2,"115":1,"120":1,"123":6,"141":1,"145":1,"149":1,"301":1,"322":2,"326":1,"333":3}}],["child",{"0":{"90":1,"103":1,"133":1},"1":{"13":2,"20":3,"24":5,"31":2,"32":1,"43":7,"85":2,"87":7,"88":1,"90":4,"92":1,"101":1,"102":1,"103":3,"104":27,"105":70,"106":8,"115":6,"120":12,"122":1,"123":1,"131":1,"132":2,"133":18,"135":1,"136":3,"141":1,"145":9,"158":4,"294":12,"301":2,"309":1,"314":2,"326":12,"333":13},"2":{"24":4,"43":4,"104":7,"105":10,"115":1,"120":3,"132":1,"133":4,"294":2,"326":3}}],["cut",{"1":{"238":1}}],["cutting",{"1":{"158":1}}],["customized",{"1":{"322":1,"345":1}}],["customize",{"1":{"180":1}}],["customer=customer",{"1":{"214":1}}],["customer",{"1":{"32":1,"34":3,"35":2,"40":8,"46":2,"162":1,"214":19},"2":{"214":1}}],["customers",{"1":{"17":1,"27":1,"35":1,"38":1,"40":1,"83":1}}],["custom",{"1":{"10":1,"130":1,"141":1,"144":2,"145":1,"146":2,"175":1,"237":1,"268":1,"362":4,"377":1}}],["curr",{"1":{"104":2,"105":2},"2":{"104":1,"105":1}}],["currently",{"1":{"40":1,"62":1,"67":1,"74":1,"76":1,"131":1,"134":1,"142":1,"192":1,"198":1,"224":1,"233":1,"236":1,"333":2,"341":1,"347":1}}],["current",{"0":{"198":1},"1":{"3":1,"36":1,"59":3,"123":1,"129":1,"134":1,"138":2,"145":1,"154":5,"182":1,"189":1,"233":1,"236":1,"268":2,"277":1,"282":2,"284":2,"293":1,"307":1,"309":22,"314":6,"333":14,"338":2,"339":2,"340":1,"342":1,"358":1}}],["curious",{"1":{"29":1}}],["cursortablabel",{"1":{"9":1}}],["cursormcpconfig",{"1":{"9":1}}],["cursordeeplinkbutton",{"1":{"9":1}}],["cursor",{"1":{"7":1,"11":1},"3":{"9":1,"10":1,"11":1,"12":1}}],["curl",{"1":{"7":1,"127":2,"129":1,"146":2,"148":1,"349":1}}],["cross",{"1":{"158":1,"177":1,"268":1}}],["cronclient",{"1":{"332":1,"334":1}}],["cronlist",{"1":{"40":1}}],["cronid",{"1":{"40":2}}],["croninput",{"1":{"39":2}}],["cronoutput",{"1":{"39":2}}],["cronworkflowslist",{"1":{"334":1}}],["cronworkflows",{"1":{"311":2,"334":4}}],["cronworkflowlistparams",{"1":{"40":1},"2":{"40":1}}],["cronworkflow",{"1":{"39":3}}],["cron`",{"1":{"39":3,"322":4,"324":4}}],["cron",{"0":{"38":1,"39":1,"40":1,"41":1,"42":1,"266":1,"311":1},"1":{"26":3,"38":19,"39":18,"40":50,"41":5,"42":4,"55":2,"83":1,"84":12,"174":1,"257":4,"266":9,"307":5,"311":41,"322":19,"324":16,"332":3,"334":12,"341":1,"345":8},"2":{"39":1,"40":11,"55":1,"84":2,"345":1},"3":{"23":1,"24":1,"25":1,"26":1,"27":1,"28":1}}],["crons`",{"1":{"307":3}}],["crons=",{"1":{"39":2}}],["crons",{"0":{"26":1,"334":1},"1":{"23":1,"26":1,"39":3,"40":6,"84":1,"332":3},"2":{"40":3,"84":1,"332":1}}],["cryptokeys",{"1":{"372":1}}],["crypto",{"1":{"153":2,"157":1},"2":{"153":1}}],["crawlers",{"1":{"106":1}}],["crashing",{"1":{"167":1}}],["crash",{"1":{"86":1,"166":1,"210":1}}],["crashes",{"1":{"30":1,"85":1}}],["credentials",{"1":{"183":1,"189":3,"225":1,"227":1,"234":1,"246":1,"252":1,"259":2,"268":1,"372":1}}],["credits`",{"1":{"49":1}}],["creation",{"1":{"52":1,"213":1,"254":1,"374":1}}],["creating",{"0":{"35":1,"40":1,"46":1,"352":1},"1":{"7":2,"10":1,"21":1,"34":1,"35":2,"38":1,"40":3,"47":1,"52":1,"55":1,"65":1,"74":1,"105":1,"126":1,"144":2,"158":1,"212":1,"216":2,"227":1,"326":1,"330":1,"332":15,"334":1,"346":1,"362":1,"372":1,"374":3}}],["createtaskworkflow",{"1":{"346":1}}],["createtaskopts",{"1":{"333":1}}],["createworkflow",{"1":{"332":1,"346":1}}],["createchunks",{"1":{"160":4}}],["createcipheriv",{"1":{"157":2}}],["createcrontrigger",{"1":{"40":1,"84":1},"2":{"40":1,"84":1}}],["createratelimitopts",{"1":{"82":1},"2":{"82":1}}],["create`",{"1":{"50":1,"311":2,"312":2,"316":2,"317":2,"318":2}}],["createdurabletaskworkflow",{"1":{"346":1}}],["createdworkflows",{"1":{"294":1}}],["createdecipheriv",{"1":{"157":2}}],["created`",{"1":{"46":2,"54":1,"214":3}}],["createdcron",{"1":{"40":2},"2":{"40":1}}],["created",{"1":{"35":2,"36":1,"39":1,"46":4,"47":1,"52":1,"54":1,"105":1,"144":1,"145":1,"155":1,"156":1,"158":1,"192":2,"214":3,"216":1,"218":1,"219":1,"225":1,"227":1,"268":12,"291":2,"307":8,"311":2,"312":2,"317":3,"318":2,"322":4,"324":4,"334":1,"335":1,"341":1,"342":1,"364":1}}],["createscheduledruntrigger",{"1":{"35":1,"84":1},"2":{"35":1,"84":1}}],["creates",{"1":{"10":1,"30":1,"87":1,"101":1,"106":1,"144":2,"145":2,"307":2,"317":4,"322":5,"332":6,"334":1,"335":1,"341":1,"342":1,"345":1,"346":3}}],["create",{"1":{"7":2,"8":1,"20":1,"28":1,"34":2,"35":4,"36":2,"38":1,"40":7,"41":2,"43":4,"46":7,"48":4,"50":4,"52":10,"55":3,"66":1,"72":1,"78":1,"81":1,"82":1,"84":4,"104":2,"105":5,"107":1,"112":4,"113":4,"117":1,"125":1,"127":1,"130":2,"136":2,"141":1,"142":3,"144":3,"145":1,"149":4,"153":1,"158":1,"160":2,"161":1,"162":1,"164":2,"214":3,"216":3,"218":1,"219":1,"220":2,"221":1,"225":1,"227":1,"228":4,"234":1,"238":4,"247":4,"253":1,"254":1,"277":1,"294":7,"307":11,"311":4,"312":4,"318":4,"322":14,"324":14,"326":2,"331":1,"332":1,"334":1,"335":2,"341":1,"342":2,"352":1,"364":1,"372":4,"373":1,"374":4},"2":{"40":2,"43":2,"52":2,"84":1,"104":2,"105":2,"127":1,"294":1,"326":1}}],["criteria",{"1":{"34":1,"40":1,"134":3,"135":1,"136":1,"151":1,"311":4,"312":2,"318":2}}],["critical",{"1":{"0":1,"3":1,"275":1,"276":1,"279":1,"280":1,"282":1}}],["cluster",{"1":{"233":1,"234":1,"236":1,"241":2,"248":1,"290":1}}],["cleared",{"1":{"357":1}}],["clear",{"1":{"162":1}}],["cleans",{"1":{"327":1}}],["clean",{"1":{"69":1,"127":2,"238":1,"247":1,"328":1}}],["cleaning",{"1":{"67":1,"328":1}}],["cleanup",{"1":{"39":4,"40":4,"69":1,"119":1,"120":1,"121":6,"137":1,"238":3}}],["closing",{"1":{"167":1}}],["closed",{"1":{"255":1}}],["close",{"1":{"37":1,"42":1,"167":1,"176":2,"197":1}}],["closer",{"1":{"30":1,"177":1}}],["clock",{"1":{"115":1}}],["cloudkms",{"1":{"252":3,"259":3,"372":5}}],["cloud",{"0":{"179":1,"183":1,"188":1,"201":1},"1":{"0":2,"4":2,"5":1,"6":1,"7":10,"38":1,"46":1,"129":1,"130":3,"147":1,"148":1,"167":1,"173":1,"175":4,"179":2,"180":1,"182":1,"185":1,"187":1,"188":4,"197":1,"198":3,"200":1,"201":2,"210":1,"222":1,"237":1,"241":1,"248":1,"252":1,"259":3,"268":1,"277":2},"2":{"129":1,"130":1,"148":1,"241":1,"268":1},"3":{"7":1,"8":1}}],["classes",{"1":{"308":1,"331":2,"333":1}}],["classdef",{"1":{"173":3}}],["class",{"1":{"14":2,"50":1,"55":4,"74":1,"78":1,"89":1,"93":1,"105":2,"141":3,"142":1,"173":3,"214":4,"216":4,"219":3,"220":2,"221":4,"307":1,"308":1,"322":1,"326":3,"328":2,"331":2,"333":3}}],["claudecodetablabel",{"1":{"9":1}}],["claudecodecommand",{"1":{"9":1}}],["claude",{"1":{"7":1,"11":2},"3":{"9":1,"10":1,"11":1,"12":1}}],["clicked",{"1":{"221":4}}],["click",{"1":{"36":1,"41":1,"72":3,"218":1,"228":2,"234":2,"238":2}}],["clientconfig",{"1":{"62":1,"141":2}}],["clients",{"1":{"52":1,"74":2,"75":1,"144":1,"183":1,"307":1,"332":12}}],["client",{"0":{"62":1,"306":1,"307":1,"332":1},"1":{"14":2,"20":2,"21":5,"35":5,"39":1,"40":3,"48":2,"50":3,"52":10,"55":6,"58":1,"59":1,"60":1,"61":1,"62":14,"66":1,"72":1,"74":3,"78":1,"81":1,"82":4,"84":4,"93":1,"105":8,"109":1,"111":1,"112":3,"113":2,"116":1,"132":3,"133":4,"135":1,"141":2,"142":2,"144":1,"145":6,"146":4,"149":3,"152":2,"153":1,"154":1,"155":2,"156":2,"157":1,"158":9,"161":3,"162":3,"166":1,"214":1,"216":1,"219":1,"220":1,"221":1,"225":1,"227":1,"238":1,"241":3,"251":2,"253":1,"260":8,"262":18,"265":4,"270":5,"272":3,"273":5,"274":6,"289":10,"294":3,"307":18,"311":3,"312":3,"313":3,"314":3,"315":2,"316":3,"317":3,"318":2,"319":7,"320":7,"332":32,"334":3,"335":3,"336":3,"338":3,"339":1,"340":3,"341":2,"342":2,"343":3,"344":3,"345":1,"346":3,"373":1,"374":4,"376":15},"2":{"14":1,"20":1,"35":1,"39":1,"40":1,"48":1,"50":2,"52":6,"55":2,"58":1,"59":1,"60":1,"61":1,"66":1,"72":1,"74":1,"78":1,"81":1,"82":2,"84":4,"93":1,"105":2,"109":1,"112":2,"113":1,"116":1,"132":2,"133":2,"135":1,"141":2,"142":1,"149":2,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":3}}],["cli",{"0":{"206":1},"1":{"7":4,"9":4,"10":4,"11":1,"21":3,"206":4,"207":1,"224":2,"225":2,"228":1,"347":3,"348":3,"349":1,"350":1,"351":1,"355":1,"358":1,"362":4,"363":1,"368":1,"369":1,"370":1,"371":1,"373":2}}],["calculates",{"1":{"268":1}}],["callback`",{"1":{"374":1}}],["callback",{"1":{"374":1}}],["callable",{"1":{"152":1,"153":1,"307":6,"322":4}}],["calls",{"1":{"61":1,"62":5,"74":1,"79":1,"89":1,"102":1,"123":1,"153":1,"158":1,"268":1}}],["calling",{"1":{"35":2,"48":1,"94":1,"109":1,"126":1,"158":1,"307":1,"377":2}}],["call",{"1":{"15":1,"24":4,"25":4,"43":2,"66":2,"87":1,"90":1,"102":5,"104":1,"108":1,"111":1,"120":1,"144":1,"154":1,"159":1,"160":1,"162":1,"307":3,"309":4,"321":1,"334":1,"341":1,"345":1,"373":1}}],["caller",{"1":{"154":2,"155":1,"157":2,"158":1}}],["callers",{"1":{"15":1,"154":4,"158":1}}],["called",{"1":{"14":2,"94":1,"110":1,"113":1,"121":1,"138":1,"145":1,"171":1,"238":1,"241":1,"307":2,"323":4}}],["callout",{"1":{"0":1,"7":1}}],["ca",{"1":{"262":6,"265":2,"273":2,"376":3}}],["ca`",{"1":{"262":2,"376":1}}],["caddy",{"1":{"234":1},"2":{"234":1}}],["cat",{"1":{"238":1,"372":1}}],["categorizing",{"1":{"151":1}}],["catch",{"0":{"120":1},"1":{"57":1,"68":1,"104":1,"105":1,"119":1,"120":3,"154":1,"162":1,"326":1,"333":1}}],["captures",{"1":{"191":1}}],["captured",{"1":{"141":1}}],["capacity",{"1":{"82":1,"123":1,"124":1,"296":2}}],["capabilities",{"1":{"63":1,"100":1,"134":1,"135":1,"322":1}}],["causing",{"1":{"167":1}}],["causes",{"1":{"277":1,"279":1,"292":1}}],["caused",{"1":{"167":1,"325":1}}],["cause",{"1":{"73":1,"166":1,"170":1,"278":1,"294":1,"307":6,"322":4,"329":1}}],["caution",{"1":{"137":1,"309":2}}],["carrier",{"1":{"158":1}}],["carrying",{"1":{"160":4,"161":1}}],["carry",{"1":{"145":1}}],["careful",{"1":{"325":1}}],["care",{"1":{"136":1,"216":1}}],["cards",{"1":{"7":1,"211":1}}],["card",{"1":{"7":1}}],["cache",{"1":{"127":1,"162":4,"251":1,"255":1}}],["cached",{"1":{"30":1,"92":1,"100":1}}],["came",{"1":{"46":1}}],["canonical",{"1":{"106":1}}],["cannot",{"1":{"30":1,"32":1,"55":1,"61":2,"73":1}}],["canceled",{"1":{"67":1,"68":2,"322":10}}],["cancels",{"1":{"67":1,"77":1,"145":1,"340":1}}],["canceling",{"1":{"67":1,"76":1}}],["cancelling",{"1":{"68":2,"72":3,"145":1,"377":1}}],["cancelled",{"1":{"64":1,"65":5,"66":1,"68":14,"76":1,"113":4,"117":2,"134":1,"268":5,"301":1,"309":3,"314":4,"338":1,"340":1},"2":{"68":1}}],["cancellationoutput",{"1":{"68":3}}],["cancellationinput",{"1":{"68":1}}],["cancellation",{"0":{"67":1,"68":1,"69":1},"1":{"16":1,"65":2,"67":4,"68":7,"69":12,"70":2,"71":5,"113":1,"117":1,"268":1,"308":1,"333":2,"377":1},"2":{"68":1}}],["cancellations",{"0":{"72":1},"1":{"16":1,"70":1,"72":1,"213":1,"301":1}}],["cancel`",{"1":{"50":1,"72":3,"309":2,"316":6}}],["cancel",{"0":{"76":1,"77":1},"1":{"20":2,"40":1,"67":1,"68":6,"70":2,"72":31,"74":4,"76":2,"117":1,"120":1,"145":2,"154":2,"309":8,"316":20,"340":2},"2":{"68":2,"72":1}}],["cast",{"1":{"309":1,"326":1,"328":3}}],["case",{"1":{"46":1,"49":1,"118":1,"121":2,"136":1,"168":1,"209":2,"294":1,"325":2,"326":1,"331":1}}],["cases",{"0":{"3":1,"107":1,"139":1,"151":1},"1":{"22":1,"26":1,"32":1,"34":1,"38":1,"46":1,"49":1,"51":1,"52":1,"55":1,"58":1,"61":1,"66":1,"72":1,"75":1,"83":1,"84":1,"85":1,"121":1,"137":1,"139":1,"151":1,"161":1,"162":2,"209":1,"225":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["cask",{"1":{"7":1,"349":1}}],["cost",{"1":{"296":2}}],["coordinated",{"1":{"322":1,"345":1}}],["coordinates",{"1":{"222":1}}],["cookies",{"1":{"243":2}}],["cookie",{"1":{"225":4,"227":2,"238":3,"240":2,"241":8,"243":2,"252":1,"253":1,"260":8}}],["cookbooks",{"1":{"45":3,"211":2}}],["columns",{"1":{"279":1}}],["color",{"1":{"173":3}}],["collision",{"1":{"46":1}}],["collecting",{"1":{"266":3}}],["collection",{"1":{"264":1}}],["collections",{"1":{"146":5}}],["collected",{"1":{"146":7}}],["collector`",{"1":{"144":1}}],["collector",{"1":{"143":2,"144":8,"264":5}}],["collects",{"1":{"101":1}}],["collect",{"1":{"24":1,"32":1,"105":2,"106":1,"120":1,"266":2,"312":2,"318":2}}],["couple",{"1":{"83":1,"105":2}}],["counter",{"1":{"146":3,"148":1,"268":24,"327":2}}],["count`",{"1":{"145":1,"254":1,"309":1,"323":2,"324":2}}],["counts",{"1":{"129":1,"338":2}}],["count",{"0":{"59":1},"1":{"16":1,"22":1,"59":8,"60":2,"105":4,"129":1,"169":1,"209":3,"254":1,"266":4,"268":3,"307":2,"309":3,"333":2},"2":{"59":1,"60":1,"105":1}}],["covered",{"1":{"94":1}}],["cover",{"1":{"33":1,"93":1}}],["covers",{"1":{"9":1,"165":1,"275":1,"280":1}}],["coroutinelike",{"1":{"307":4,"322":4}}],["correlates",{"1":{"294":1}}],["correlation",{"1":{"191":1,"193":1}}],["correct",{"1":{"278":1}}],["correctly",{"1":{"29":1,"30":2,"102":1,"279":1,"284":1,"350":1}}],["corresponds",{"1":{"54":2,"246":1,"295":1,"309":2,"345":2}}],["corresponding",{"1":{"25":1,"84":1,"214":1,"268":1,"301":1}}],["cores",{"1":{"294":1}}],["core",{"0":{"31":1,"174":1,"176":1},"1":{"29":2,"31":1,"62":1,"103":1,"104":1,"181":1,"223":1,"254":1}}],["copy",{"1":{"7":2,"10":2,"11":2,"46":1,"47":1,"127":20,"214":1,"216":1,"219":1,"220":1,"225":4,"227":1,"231":1,"238":1,"372":1}}],["copied",{"1":{"7":1,"220":1}}],["coding",{"0":{"9":1,"207":1},"1":{"7":2,"9":2,"10":1,"11":1,"207":5}}],["codebase",{"1":{"15":1,"187":1}}],["code",{"0":{"204":1},"1":{"1":1,"2":1,"4":1,"7":2,"11":2,"21":1,"35":1,"36":1,"55":5,"57":2,"58":1,"61":1,"62":1,"72":1,"73":1,"82":1,"87":1,"106":1,"112":1,"115":1,"123":2,"126":1,"158":2,"169":1,"173":1,"174":1,"176":1,"179":1,"181":1,"182":1,"183":1,"203":1,"204":1,"317":1,"325":2,"348":1},"2":{"112":1},"3":{"9":1,"10":1,"11":1,"12":1}}],["co",{"1":{"5":1}}],["conjunction",{"1":{"322":2,"324":2}}],["conjunctive",{"1":{"118":2}}],["conns=50",{"1":{"298":1}}],["conns=200",{"1":{"298":1}}],["conns`",{"1":{"255":8,"293":1,"295":1,"299":2}}],["conn",{"1":{"255":2,"328":2},"2":{"328":1}}],["connects",{"1":{"170":1,"253":1}}],["connectivity",{"1":{"166":1,"170":1}}],["connecting",{"0":{"230":1,"245":1},"1":{"95":1,"215":1}}],["connectionpool",{"1":{"328":2}}],["connections`",{"1":{"293":1}}],["connections=1000",{"1":{"227":1,"231":1}}],["connections=200",{"1":{"225":2}}],["connections",{"1":{"82":1,"188":1,"255":8,"278":1,"295":1,"299":2}}],["connection",{"0":{"293":1},"1":{"62":2,"82":1,"137":1,"144":1,"162":2,"166":1,"167":2,"177":1,"210":1,"216":1,"251":1,"252":2,"255":4,"264":1,"293":2,"299":3,"304":1,"327":1,"328":1},"2":{"328":1}}],["connected",{"1":{"21":1,"174":1,"205":1,"371":1}}],["connect",{"1":{"7":1,"174":2,"179":1,"181":1,"222":1,"223":1,"234":1,"238":1,"241":1,"245":2,"284":1,"376":1}}],["cond",{"1":{"110":1,"113":2,"117":3,"118":1}}],["conditionally",{"1":{"254":1}}],["conditional",{"1":{"114":1,"333":1}}],["conditions`",{"1":{"310":1}}],["conditions",{"0":{"110":1,"113":1,"114":1,"117":1},"1":{"74":3,"88":3,"89":2,"110":8,"111":1,"113":8,"114":2,"116":2,"117":3,"118":11,"124":6,"160":2,"309":2,"310":2,"322":6,"333":6}}],["condition",{"1":{"66":1,"101":1,"106":1,"108":1,"110":3,"113":7,"117":10,"118":8,"133":1,"145":1,"225":2,"227":8,"230":1,"310":2,"322":6,"333":2},"2":{"110":1,"113":1,"117":1,"118":1}}],["convert",{"1":{"268":1}}],["converted",{"1":{"25":1,"268":1}}],["convenience",{"1":{"88":1}}],["confused",{"1":{"297":1}}],["confusion",{"1":{"160":4,"161":1}}],["confusing",{"1":{"29":1}}],["conf",{"1":{"225":3,"227":3},"2":{"225":2,"227":2}}],["conflicts",{"1":{"164":1}}],["confluent",{"1":{"30":1}}],["confirm",{"1":{"166":1,"169":1,"278":1,"279":1,"284":1}}],["confirming",{"1":{"21":1,"219":1}}],["configdict",{"1":{"326":2,"328":1}}],["configmaps",{"1":{"246":1}}],["config`",{"1":{"226":1,"230":1,"326":1}}],["configs",{"1":{"146":2,"268":2}}],["config=clientconfig",{"1":{"62":1,"141":2}}],["config",{"0":{"243":1},"1":{"62":1,"127":2,"144":1,"154":1,"225":6,"227":16,"228":3,"243":1,"253":1,"262":1,"293":1,"307":1,"319":4,"320":4,"323":6,"324":6,"326":1,"328":1,"373":1},"2":{"62":1}}],["configurable",{"1":{"62":1,"92":1,"188":1,"294":3}}],["configurations",{"1":{"307":2,"322":4}}],["configuration",{"0":{"17":1,"126":1,"231":1,"250":1,"253":1,"254":1,"255":1,"256":1,"257":1,"258":1,"259":1,"260":1,"261":1,"262":1,"263":1,"264":1,"265":1,"266":1,"267":1,"269":1,"270":1,"271":1,"273":1,"274":1,"299":1,"304":1},"1":{"7":1,"21":2,"46":1,"55":1,"57":1,"58":2,"62":1,"75":1,"76":1,"81":2,"82":3,"93":1,"94":1,"126":1,"146":1,"155":1,"156":2,"166":1,"167":1,"169":1,"176":1,"181":1,"189":1,"205":1,"225":2,"227":1,"240":1,"245":1,"246":1,"248":1,"249":3,"250":1,"251":5,"268":1,"269":1,"278":2,"285":2,"290":1,"294":1,"305":1,"318":2,"330":1,"332":9,"345":11,"346":2,"358":1,"364":1,"375":1}}],["configures",{"1":{"164":1}}],["configure",{"1":{"46":1,"62":1,"144":1,"146":1,"216":1,"240":1,"243":1,"246":1,"248":1,"249":1,"268":1,"291":1,"301":1,"302":1}}],["configured",{"1":{"17":1,"22":1,"25":1,"46":1,"61":1,"62":1,"126":1,"129":1,"158":1,"169":1,"170":1,"220":1,"241":1,"243":1,"250":1,"269":1,"290":1,"291":1,"353":2}}],["configuring",{"0":{"242":1},"1":{"39":1,"62":1,"218":1,"240":2,"242":1,"248":2,"249":1}}],["consolidate",{"1":{"375":1}}],["console",{"1":{"24":1,"25":2,"35":2,"43":2,"53":1,"59":1,"68":1,"81":1,"82":1,"109":2,"112":1,"116":1,"121":2,"135":2,"136":2,"153":3,"155":4,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1},"2":{"24":1,"25":1,"35":1,"43":1,"53":1,"59":1,"68":1,"81":1,"82":1,"109":1,"112":1,"116":1,"121":1,"135":1,"136":1,"153":1,"155":1,"214":1,"216":1,"219":1,"220":1,"221":1}}],["consecutive",{"1":{"144":1}}],["consumption",{"1":{"82":2,"162":1,"268":1}}],["consuming",{"0":{"161":1},"1":{"81":1,"82":1,"161":1}}],["consumer",{"1":{"145":3,"159":1,"160":1,"161":1,"309":2}}],["consumes",{"1":{"90":1,"104":1,"123":1,"124":1}}],["consumed",{"1":{"82":2,"108":1,"124":2,"160":1,"268":1}}],["consume",{"1":{"64":1,"79":1,"92":1,"123":3,"161":1}}],["consistency",{"1":{"131":1,"133":1}}],["consistent",{"1":{"127":2,"176":1}}],["consistently",{"1":{"69":1}}],["consists",{"1":{"38":1}}],["considers",{"1":{"210":1}}],["considered",{"1":{"64":2}}],["consider",{"1":{"37":1,"42":1,"55":1,"69":3,"118":1,"132":1,"158":1,"167":2,"295":1,"326":1}}],["considerations",{"0":{"37":1,"42":1},"1":{"4":1,"37":1,"42":1,"62":1,"185":1}}],["constructed",{"1":{"255":1}}],["constructor",{"1":{"126":1,"142":2}}],["constraint",{"1":{"40":1}}],["constraints",{"1":{"34":1,"38":1}}],["constant",{"1":{"46":2,"307":3,"322":4}}],["const",{"1":{"14":1,"20":1,"24":1,"25":3,"35":4,"39":1,"40":3,"43":3,"48":1,"50":2,"52":1,"55":1,"58":1,"59":2,"60":1,"61":1,"65":2,"66":2,"68":3,"74":1,"78":1,"81":1,"82":2,"84":5,"93":1,"94":1,"95":1,"96":1,"97":3,"104":9,"105":15,"109":1,"110":1,"112":4,"113":3,"116":2,"117":9,"118":1,"120":2,"121":1,"132":2,"133":2,"135":3,"136":4,"142":4,"144":1,"149":2,"153":5,"154":1,"155":1,"156":1,"157":21,"160":4,"161":3,"162":3,"214":2,"216":4,"219":2,"220":1,"221":2,"294":3,"345":3}}],["contributing",{"0":{"372":1},"1":{"372":1}}],["contributors",{"1":{"237":1}}],["controlling",{"1":{"307":6,"322":4}}],["controllers`",{"1":{"254":1}}],["controllers",{"1":{"249":1,"254":1,"297":1}}],["controller",{"1":{"65":2,"66":2}}],["controls",{"1":{"106":1,"188":1,"254":1,"322":1,"345":1,"369":1,"370":1,"371":1}}],["control",{"1":{"3":1,"4":1,"10":1,"22":2,"62":3,"64":1,"67":1,"72":1,"74":9,"78":2,"79":1,"101":1,"115":1,"118":1,"120":1,"140":1,"157":1,"162":2,"180":2,"182":1,"222":2,"223":1,"224":1}}],["contents",{"1":{"374":1}}],["contention",{"1":{"290":1,"294":3}}],["contenttype",{"1":{"157":1}}],["content",{"1":{"46":1,"160":12,"161":4,"162":4,"216":2,"217":1},"2":{"160":2}}],["context`",{"1":{"358":1}}],["contextworker",{"1":{"333":3}}],["contextmanager",{"1":{"153":2,"330":2},"2":{"153":1,"330":1}}],["context<any",{"1":{"117":1}}],["context",{"0":{"16":1,"88":1,"308":1,"309":1,"333":1},"1":{"11":1,"14":4,"16":8,"24":1,"25":1,"39":3,"40":3,"43":2,"50":1,"53":2,"55":5,"58":2,"59":3,"60":2,"61":2,"65":2,"66":3,"68":3,"73":1,"81":2,"82":2,"89":1,"94":5,"95":3,"96":3,"104":1,"105":5,"110":2,"112":1,"113":4,"117":8,"118":2,"121":5,"132":2,"133":2,"136":1,"138":3,"141":2,"142":7,"144":1,"145":1,"153":10,"154":1,"155":1,"158":2,"160":2,"161":1,"162":1,"207":1,"214":2,"216":2,"219":2,"220":2,"221":2,"233":1,"236":1,"294":2,"307":2,"308":5,"322":3,"326":5,"328":2,"330":9,"331":1,"333":11,"358":2},"2":{"14":1,"24":1,"25":1,"35":1,"39":1,"40":1,"43":1,"48":1,"50":1,"52":1,"53":1,"55":2,"58":1,"59":1,"60":1,"61":1,"65":1,"66":1,"68":1,"81":1,"82":1,"84":1,"94":1,"95":1,"105":1,"110":1,"112":1,"113":2,"117":1,"118":1,"121":1,"138":1,"142":1,"144":1,"149":1,"160":1,"161":1,"162":1,"214":1,"216":1,"219":1,"220":1,"221":1,"294":1}}],["contact",{"1":{"38":1,"175":2,"177":2,"189":2,"190":1,"199":2,"304":1}}],["containerport",{"1":{"234":2,"238":1}}],["container",{"1":{"224":1,"234":2,"238":1,"246":1,"255":2,"289":7}}],["containers",{"1":{"174":1,"231":1,"234":2,"238":1,"278":1,"289":3,"362":4},"2":{"234":1,"238":1}}],["contains",{"1":{"61":1,"94":1,"250":1,"269":1}}],["contain",{"1":{"38":1,"322":1,"345":1}}],["containing",{"1":{"35":1,"40":1,"46":2,"129":1,"310":1,"317":3,"332":1,"333":6,"345":2}}],["continuing",{"1":{"111":1,"298":1}}],["continuously",{"1":{"32":1}}],["continues",{"1":{"57":1,"87":1,"112":1,"123":1,"138":1}}],["continue",{"1":{"8":1,"24":1,"25":1,"57":1,"89":1,"101":1,"106":2,"138":1,"307":2,"322":4}}],["concern",{"1":{"158":1}}],["concepts",{"0":{"2":1},"1":{"2":1,"33":1},"3":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1}}],["concept",{"1":{"0":1,"17":1,"64":1,"73":1}}],["concatenate",{"1":{"307":4,"322":4}}],["concat",{"1":{"157":1},"2":{"157":1}}],["conclusion",{"0":{"63":1,"71":1}}],["concurrent`",{"1":{"266":1}}],["concurrently",{"1":{"22":3,"74":2,"137":2,"209":1}}],["concurrent",{"1":{"20":1,"74":3,"82":1,"261":2,"266":1,"271":2}}],["concurrency=",{"1":{"78":1}}],["concurrency=concurrencyexpression",{"1":{"74":1}}],["concurrencyworkflowlevel",{"1":{"78":2}}],["concurrencyexpression",{"1":{"74":1,"78":4,"307":6,"322":4},"2":{"74":1,"78":1}}],["concurrencyinput",{"1":{"74":1,"78":1}}],["concurrencylimitstrategy",{"1":{"74":1,"78":2},"2":{"74":1,"78":1}}],["concurrencydemoworkflowrr",{"1":{"74":2}}],["concurrency",{"0":{"74":1,"78":1},"1":{"3":1,"17":2,"34":1,"37":2,"38":1,"42":2,"67":3,"74":23,"75":1,"76":2,"78":12,"106":2,"129":6,"137":1,"139":1,"169":3,"174":1,"176":2,"181":1,"182":1,"204":1,"254":6,"289":2,"307":9,"322":9,"325":1,"327":1,"345":1},"2":{"74":2,"78":2},"3":{"19":1,"20":1,"21":1,"22":1}}],["com`",{"1":{"241":2,"255":1}}],["combining",{"1":{"110":2,"113":1,"118":1}}],["combinations",{"1":{"118":1}}],["combination",{"1":{"88":1,"114":1,"118":2}}],["combines",{"1":{"216":1}}],["combined",{"1":{"110":1,"113":1,"118":2}}],["combine",{"1":{"78":1,"89":1,"105":1,"106":1,"116":1}}],["comes",{"1":{"31":1,"37":1,"89":1,"95":1,"110":1,"113":1,"116":1,"118":2,"140":1}}],["coming",{"1":{"16":1,"43":1,"144":1,"152":1,"153":1,"154":1,"155":1,"156":1,"188":1}}],["communication",{"1":{"223":1,"268":1}}],["community",{"1":{"177":1}}],["comments",{"1":{"55":1,"148":1,"374":1}}],["committing",{"1":{"189":1}}],["commit",{"1":{"10":1,"374":1}}],["commontasks",{"1":{"277":1}}],["commonly",{"1":{"73":1,"144":1,"243":1}}],["common",{"0":{"106":1},"1":{"7":1,"9":1,"17":1,"24":1,"26":1,"32":1,"45":1,"49":1,"55":1,"81":1,"83":1,"102":1,"117":1,"118":2,"120":1,"123":1,"139":1,"151":1,"152":1,"157":1,"162":1,"165":1,"177":1,"207":1,"208":1,"210":1,"211":1,"292":1,"305":1}}],["command=input",{"1":{"220":1}}],["command`",{"1":{"220":2}}],["commands`",{"1":{"220":1}}],["commands",{"0":{"220":1},"1":{"9":1,"45":1,"212":1,"217":2,"220":4,"234":1,"247":1,"348":1,"354":1,"355":1,"358":1,"363":1,"364":1,"368":1,"373":2}}],["command",{"1":{"7":5,"10":1,"11":3,"127":1,"206":2,"220":30,"225":3,"227":6,"228":2,"229":1,"231":1,"234":1,"238":1,"241":2,"282":1,"284":1,"289":2,"347":1,"348":3,"352":2,"353":1,"354":1,"356":1,"357":1,"360":1,"361":1,"362":2,"365":2,"366":2,"367":2,"372":1,"373":1},"2":{"220":1}}],["comprehensive",{"1":{"250":1}}],["compensating",{"1":{"119":1,"121":1}}],["computation",{"1":{"90":1,"104":1,"107":1}}],["computations",{"1":{"74":1,"106":1}}],["computed",{"1":{"154":1}}],["compute",{"1":{"86":1,"137":1,"333":1}}],["computing",{"1":{"82":1}}],["complicated",{"1":{"310":1,"333":2}}],["compliant",{"1":{"186":1}}],["compliance",{"1":{"74":1,"182":1,"186":1,"188":1,"199":1}}],["complementary",{"1":{"117":1}}],["complete",{"1":{"24":1,"31":2,"65":1,"66":2,"76":1,"77":1,"87":1,"92":1,"95":1,"110":3,"158":1,"170":1,"284":1,"289":1,"322":26,"324":18,"333":2,"345":1}}],["completes",{"1":{"18":1,"24":5,"30":1,"75":1,"87":1,"101":1,"123":2,"124":1,"138":1,"288":1}}],["completed",{"1":{"8":1,"32":1,"65":2,"66":2,"68":3,"81":1,"82":1,"85":1,"87":1,"92":1,"105":2,"121":1,"123":1,"124":3,"137":1,"138":1,"142":2,"160":1,"161":1,"174":1,"227":5,"291":1,"314":4,"325":1,"338":1}}],["completion",{"1":{"10":1,"124":1,"268":1,"322":2,"324":2,"332":1,"345":2}}],["complexity",{"1":{"100":1}}],["complex",{"1":{"0":1,"57":1,"63":1,"78":1,"80":1,"81":1,"86":1,"115":1,"118":3,"177":1,"322":1,"345":1}}],["company",{"1":{"186":1}}],["companies",{"1":{"5":1}}],["comparator",{"1":{"136":1}}],["comparatorptr",{"1":{"136":1},"2":{"136":1}}],["comparator=workerlabelcomparator",{"1":{"136":1}}],["comparison",{"1":{"136":1,"268":1}}],["compared",{"1":{"5":1}}],["compatible",{"1":{"131":1,"166":1}}],["compatibility",{"0":{"377":1},"1":{"102":1}}],["component",{"1":{"250":1,"278":1}}],["components",{"0":{"174":1,"221":1},"1":{"0":3,"7":4,"9":2,"45":1,"107":1,"172":1,"173":1,"174":1,"177":1,"182":1,"211":1,"217":2,"221":1,"278":1}}],["composition",{"1":{"13":1}}],["compose`",{"1":{"230":1}}],["composed",{"1":{"31":1}}],["compose",{"0":{"226":1},"1":{"2":1,"13":1,"224":2,"225":10,"226":3,"227":5,"228":1,"229":2,"231":1,"253":3,"255":1,"278":7,"279":2,"285":4},"2":{"227":1}}],["com",{"1":{"0":1,"16":1,"55":1,"68":1,"88":1,"112":1,"113":1,"144":1,"158":1,"214":2,"216":1,"218":4,"219":1,"220":1,"221":1,"225":2,"227":2,"234":1,"237":1,"240":12,"241":16,"243":3,"248":1,"277":3,"283":1,"284":1,"304":4,"305":9,"325":3,"327":1,"329":1,"359":1},"2":{"55":1,"68":1,"144":1,"225":1,"227":1,"234":1,"240":4,"241":3,"243":1,"283":1,"304":2}}],["🪓",{"0":{"0":1},"1":{"21":4}}]],"serializationVersion":2}