diff --git a/.agents/tasks/2025/08/21-0939-codetype-interface b/.agents/tasks/2025/08/21-0939-codetype-interface
index 2814139..b47493a 100644
--- a/.agents/tasks/2025/08/21-0939-codetype-interface
+++ b/.agents/tasks/2025/08/21-0939-codetype-interface
@@ -40,3 +40,61 @@ Implement the CodeObjectWrapper as designed. Update the Tracer trait as well as
There is an issue in the current implementation. We don't use caching effectively, since we create a new CodeObjectWrapper at each callback_xxx call. We need a global cache, probably keyed by the code object id. Propose design changes and update the design documents. Don't implement the changes themselves before I approve them.
--- FOLLOW UP TASK ---
Implement the global code object registry.
+
+--- FOLLOW UP TASK ---
+Take a look at agents.just and scripts/. Also at the design docs for jj-workspaces. This is a proof-of-concept solution for defining and running agent workflows defined by the user. I want to create a real product based on this s proof of concept.\n\nUsers should be able to define their own workflows. They should also be able to publish workflows and fetch them from a central repository. It should be possible to parallelize work - be able to work on several workflows at once.\n\nCreate a technical specification for the components that need to be developed for this product. We will use Rust as our main programming language
+--- FOLLOW UP TASK ---
+In previous turns, the user had the following interaction with the agent:
+
+
+
+Take a look at agents.just and scripts/. Also at the design docs for jj-workspaces. This is a proof-of-concept solution for defining and running agent workflows defined by the user. I want to create a real product based on this s proof of concept.
+
+Users should be able to define their own workflows. They should also be able to publish workflows and fetch them from a central repository. It should be possible to parallelize work - be able to work on several workflows at once.
+
+Create a technical specification for the components that need to be developed for this product. We will use Rust as our main programming language
+
+
+
+**Summary**
+* Authored a comprehensive workflow automation platform specification covering product goals, requirements, and a full Rust-based architecture spanning workflow definitions, execution engine, workspace manager, CLI, registry services, observability, and packaging considerations.
+
+
+* Documented migration steps from the existing proof of concept along with open questions to guide the transition to the production platform.
+
+
+
+**Testing**
+* ⚠️ Not run (not requested for documentation-only change).
+
+
+
+
+
+The user was unsatisfied with the code that the agent previously produced, which can be found as the latest commit in the git history.
+
+
+docs: add workflow automation platform spec
+
+
+
+## Summary
+- add a technical specification for the workflow automation product
+- describe requirements, system architecture, and crate breakdown for a Rust implementation
+- cover registry service, CLI, executor, workspace management, observability, and migration steps
+
+## Testing
+- not run (documentation-only change)
+
+
+
+
+diff --git a/.agents/tasks/2025/08/21-0939-codetype-interface b/.agents/tasks/2025/08/21-0939-codetype-interface
+(File changed with 3 additions, 0 deletions)
+diff --git a/None b/design-docs/workflow-platform-spec.md
+(File changed with 283 additions, 0 deletions)
+
+
+Please address any inline comments on the diff, as well as any additional instructions below.
+
+Now write a PR/FAQ document for this product. If there are important questions whose answers are unclear add them to the FAQ but don't write an answer
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 71629a1..a8b71c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,4 +5,5 @@
**/target/
build
*~
-.idea/
\ No newline at end of file
+.idea/
+.agent-tools/
diff --git a/AGENTS.md b/AGENTS.md
index c56a4c1..bb2fb0a 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -17,6 +17,30 @@ Then to run the tests do
just test
```
+## Agent Workspaces
+
+Automation now runs inside dedicated Jujutsu workspaces that live outside of the
+repository tree. Use the helper recipes to inspect and control them:
+
+- `just agents::consolidate ` – run the
+ consolidate workflow inside a workspace. The workspace is trusted with `direnv`
+ so the nix environment loads automatically.
+- `just agents::workspace-status []` – list all workspaces for this
+ repository or show metadata for a single workspace.
+- `just agents::workspace-shell ` – attach an interactive shell to a
+ workspace (the environment is prepared with `direnv allow`).
+- `just agents::workspace-clean ` – forget the workspace and delete
+ its cached directory once the work is integrated.
+- `just agents::workspace-sync-tools ` – refresh the copied automation
+ bundle inside a workspace without re-running the workflow.
+
+Workspaces are stored under `${AI_WORKSPACES_ROOT:-$XDG_CACHE_HOME/ai-workspaces}`
+using a repository-specific namespace. Each workspace contains a `.agent-tools/`
+directory with the current automation (`agents.just`, `scripts/`, `rules/`). The helper
+copies these files before every run so workflows see the latest tooling even when the
+target change is older. See `design-docs/jj-workspaces.md` for full rationale and
+lifecycle details.
+
# Code quality guidelines
- Strive to achieve high code quality.
diff --git a/agents.just b/agents.just
index a164392..c43b8cb 100644
--- a/agents.just
+++ b/agents.just
@@ -1,14 +1,26 @@
-consolidate start_change_id end_change_id:
+# Runs the given task in a workspace
+
+workspace_just := "just -d . --justfile .agent-tools/agents.just"
+
+[private]
+run workspace_id workflow inner_workflow *ARGS:
+ bash scripts/agent-workspace.sh run {{workspace_id}} --workflow {{workflow}} -- {{workspace_just}} {{inner_workflow}} {{ARGS}}
+
+
+codex := "codex exec --skip-git-repo-check --full-auto --config model_reasoning_effort=high"
+
+consolidate workspace_id start_change_id end_change_id: (run workspace_id "consolidate" "consolidate-inner" start_change_id end_change_id)
+
+consolidate-inner start_change_id end_change_id:
#!/usr/bin/env sh
-
read -r -d '' INSTRUCTIONS <<-EOF
You will be given a diff with 'jj diff --from {{start_change_id}} --to {{end_change_id}}'. The task is to write a specification which describes the introduced changes.
- You need to produce a full detailed specification in the folder 'specs/'. Imagine that the changes that the diff shows disappeared and we had to implement the functionality from scratch. The specification must contain all necessary details in order to be ample to implement equivalent functionality.
+ You need to produce a full detailed specification in the folder 'specs/'. Imagine that the changes that the diff shows disappeared and we had to implement the functionality from scratch. The specification must contain all necessary details in order to be possible to implement equivalent functionality.
Abide by the following rules:
- `cat rules/spec.md`
+ `cat "${AGENT_TOOL_COPY_ROOT:-.}/rules/spec.md"`
Here is a log of the changes in question:
@@ -23,18 +35,106 @@ consolidate start_change_id end_change_id:
jj new -r {{start_change_id}} -m "Consolidation in progress"
- echo "$INSTRUCTIONS" | codex exec --full-auto --config model_reasoning_effort=high
+ echo "$INSTRUCTIONS" | {{codex}}
+
+workspace-status workspace_id='':
+ @if [ -z "{{workspace_id}}" ]; then \
+ scripts/agent-workspace.sh status; \
+ else \
+ scripts/agent-workspace.sh status {{workspace_id}}; \
+ fi
+
+workspace-shell workspace_id:
+ scripts/agent-workspace.sh shell {{workspace_id}}
+
+workspace-clean workspace_id:
+ scripts/agent-workspace.sh clean {{workspace_id}}
+
+workspace-sync-tools workspace_id:
+ scripts/agent-workspace.sh sync-tools {{workspace_id}}
+
+questions-for-pm workspace_id rev='@': (run workspace_id "questions-for-pm" "questions-for-pm-inner" rev)
+
+questions-for-pm-inner rev='@':
+ #!/usr/bin/env sh
+ CURRENT_CHANGE=`jj log -r @ --template 'change_id' --no-graph`
+ echo BBBB: `pwd`
+
+ read -r -d '' INSTRUCTIONS_1 <<-EOF
+ 1. Read the current state of open issues in issues.md
+ 2. Read relevant state of the code base
+ 3. Read 'pm-faq.md' which contains previous questions asked to the product manager and her answers.
+ 4. Create questions to the product manager to clarify unclear design choices, priorities and so on. If everything is clear and no questions are needed do nothing.
+ 5. Append the new questions to the end of 'pm-faq.md' file
+ EOF
+
+ jj new -r {{rev}} -m "Questions for the PM"
+ PM_CHANGE=`jj log -r @ --template 'change_id' --no-graph`
+
+ # Add a dated header once
+ echo "# Questions from `date -I`" >> pm-faq.md
+
+ # Initialize iteration counter for commit messages
+ ITERATION=1
+
+ # Keep asking until no further changes are made to pm-faq.md
+ while true; do
+ PREV_HASH=`[ -f pm-faq.md ] && sha256sum pm-faq.md | cut -d' ' -f1 || echo ""`
+
+ echo "$INSTRUCTIONS_1" | {{codex}}
+
+ CURR_HASH=`[ -f pm-faq.md ] && sha256sum pm-faq.md | cut -d' ' -f1 || echo ""`
+ [ "$CURR_HASH" = "$PREV_HASH" ] && break
+
+ # Optional manual edits
+ jj edit $CURRENT_CHANGE && {{workspace_just}} edit-inner $PM_CHANGE
+ # Update issues based on PM responses
+ {{workspace_just}} pm-flow-update-inner $PM_CHANGE
+
+ # Start a new iteration change, include a counter in the message
+ jj new -r @ -m "Questions for the PM (iteration ${ITERATION})"
+ PM_CHANGE=`jj log -r @ --template 'change_id' --no-graph`
+ ITERATION=$((ITERATION + 1))
+ done
+
+ # Abandon the last PM_CHANGE since it must have been empty
+ jj abandon $PM_CHANGE
+
+ # Return to original change
+ jj edit $CURRENT_CHANGE
+
+
+pm-flow-update workspace_id rev='@': (run workspace_id "pm-flow-update" "pm-flow-update-inner" rev)
-next-issue:
+pm-flow-update-inner rev='@':
+ #!/usr/bin/env sh
+ read -r -d '' INSTRUCTIONS_2 <<-EOF
+ 1. Read the current state of open issues in 'issues.md'
+ 2. Read the conversation with the product manager so far in 'pm-faq.md'
+ 3. If necessary read relevant parts of the code
+ 4. Update the issues database in 'issues.md' to match the instructions provided by the product manager.
+ EOF
+
+ CURRENT_CHANGE=`jj log -r @ --template 'change_id' --no-graph`
+
+ jj new -r {{rev}} -m "Updating Issue database"
+ UPDATE_CHANGE=`jj log -r @ --template 'change_id' --no-graph`
+
+ echo "$INSTRUCTIONS_2" | {{codex}}
+
+
+next-issue workspace_id: (run workspace_id "next-issue" "next-issue-inner")
+
+next-issue-inner:
#!/usr/bin/env sh
[ -f next-issue.md ] || exit 1
- codex exec --full-auto --config model_reasoning_effort=high <//`.
+- Metadata file: `.agent-tools/.agent-workflow.json` within every workspace, recording the workflow
+ name, status, timestamps, the command that is running, and the copied tooling hash.
+
+Workspaces are never created under the repository itself. This keeps the main tree
+clean and prevents the permission issues we ran into when nesting workspaces inside
+tracking directories.
+
+## Helper Script Responsibilities
+
+`scripts/agent-workspace.sh` centralises workspace management. Key behaviours:
+
+- `run`: Create or reattach to the workspace, call `jj workspace add` if needed,
+ optionally pin the workspace to a starting change, run `direnv allow`, and then
+ execute the specified command via `direnv exec` so the nix shell is active.
+- Metadata updates before and after the command capture runtime details. Failures are
+ recorded as `status: "error"` for easier triage.
+- `status`: Summarise all known workspaces or dump a single metadata file for inspection.
+- `shell`: Attach an interactive shell to an existing workspace (after running
+ `direnv allow`). This is handy for manual interventions mid-workflow.
+- `sync-tools`: Refresh the copied tooling bundle inside a workspace.
+- `clean`: Remove the workspace after telling Jujutsu to forget it.
+
+### Tool Copy Bundle
+
+Each workspace receives a `.agent-tools/` directory containing the automation files
+needed for the workflows (currently `agents.just`, `scripts/`, and `rules/`). The helper
+copies these from the repository root (or from `AGENT_TOOLS_SOURCE`) before every run:
+
+- The copy lives inside the workspace so the workflow sees consistent tooling even when
+ the target change predates recent automation changes.
+- The directory is ignored via `.gitignore` so `jj status` stays clean.
+- `AGENT_TOOL_COPY_ROOT` points to the copy (defaults to `/.agent-tools`).
+- `AGENT_TOOLS_VERSION` exposes the hash of the copied bundle. Metadata stores this
+ together with `tools_source` and `tools_copy` for traceability.
+
+Every command run inside a workspace receives environment variables describing where it
+is running: `AGENT_WORKSPACE_ID`, `AGENT_WORKSPACE_PATH`, `AGENT_WORKSPACE_METADATA`,
+`AGENT_WORKSPACE_REPO_ROOT`, `AGENT_TOOL_COPY_ROOT`, and `AGENT_TOOLS_VERSION`.
+
+## Using the Workflows
+
+- Most automation recipes now take `` as their first parameter and are
+ backed by a matching `*-inner` recipe. Examples include
+ `questions-for-pm`, `pm-flow-update`, `next-issue`, `review-change`, `tidy-issues`,
+ `archive-issues`, `pick-next-issue`, `edit`, `human-work-step`, `ai-work-step`,
+ `work`, and `continue-work`.
+- `just agents:: ...` shells out to
+ `scripts/agent-workspace.sh run` before delegating to `-inner`, keeping all
+ nested steps inside the same workspace copy of the tooling.
+- `just agents::consolidate ` follows the
+ same pattern and delegates to `consolidate-inner`.
+- Nested workflows should pass the same `workspace_id` down via `--set` so that every
+ automated step stays inside the same working copy.
+- `just agents::workspace-status` lists all workspaces for the repo. Add an ID to view
+ the raw metadata, e.g. `just agents::workspace-status wf-123`.
+- `just agents::workspace-shell ` opens an interactive, nix-enabled shell
+ rooted at the workspace.
+- `just agents::workspace-clean ` forgets the workspace in Jujutsu and
+ removes the cached directory.
+- `just agents::workspace-sync-tools ` refreshes the copied tooling for a
+ workspace without relaunching a workflow.
+
+The helper does not auto-clean finished workspaces so that results can be inspected or
+rebased manually. Once the work is integrated, run the cleanup recipe to delete the
+working copy.
diff --git a/design-docs/workflow-platform-prfaq.md b/design-docs/workflow-platform-prfaq.md
new file mode 100644
index 0000000..d1fca5a
--- /dev/null
+++ b/design-docs/workflow-platform-prfaq.md
@@ -0,0 +1,74 @@
+# Workflow Automation Platform PR/FAQ
+
+**Date:** August 21, 2025 \
+**Prepared by:** Workflow Platform Team
+
+## Press Release (Internal)
+
+**For Immediate Release — Codetracer Announces the Workflow Automation Platform**
+
+San Francisco, CA — Codetracer today unveiled the Workflow Automation Platform, a Rust-powered system that lets technical teams define, execute, and share complex AI-assisted development workflows with confidence. Building on the success of our internal proof of concept, the new platform introduces production-grade workflow definitions, a secure sharing registry, and an execution engine that scales across concurrent projects.
+
+Teams can now author declarative `workflow.toml` files, validate them locally, and execute them through a streamlined CLI that provisions isolated workspaces automatically. Runs can be parallelized safely, enabling multiple initiatives—such as code migrations, documentation sprints, or large-scale refactors—to advance simultaneously without resource conflicts.
+
+The Workflow Registry makes collaboration effortless. Customers publish vetted workflows with versioned metadata, discover community best practices, and fetch updates directly into their local environments. Integrated authentication and provenance tracking ensure organizations only run trusted automation.
+
+"Our customers told us they want to move faster without sacrificing control," said Taylor Morgan, Codetracer Head of Product. "By combining declarative authoring, repeatable execution, and visibility into every run, we are giving engineering teams a workflow copilot they can rely on."
+
+Early adopters report dramatic improvements in delivery cadence. During pilot programs, teams reduced workflow setup time by 60%, increased cross-team reuse of automation, and gained observability across dozens of simultaneous runs. With OpenTelemetry-powered instrumentation and actionable CLI insights, stakeholders see real-time progress, logs, and artifacts for every workflow.
+
+The Workflow Automation Platform enters private beta this quarter with a general availability target in early 2026. Interested teams can request access at codetracer.ai/workflows. Beta participants receive migration tooling from existing `agents.just` scripts, guided onboarding, and direct input into roadmap priorities like workflow daemons, artifact retention policies, and enterprise-grade access controls.
+
+## FAQ
+
+### Customer Experience
+
+**Q: Who is the primary customer for the Workflow Automation Platform?**
+A: Software engineering teams and AI operations groups that orchestrate repeatable automation across repositories, particularly those already using Codetracer tooling and seeking stronger governance.
+
+**Q: How do users author and validate workflows?**
+A: Customers describe workflows in declarative `workflow.toml` files, then rely on the CLI to parse, lint, and surface validation errors before execution. Validation covers dependency graphs, parameter types, and required tooling.
+
+**Q: What does execution look like for a developer?**
+A: Developers invoke `workflow run ` from the CLI, which provisions an isolated workspace, resolves dependencies, and streams logs, status updates, and artifacts back to the terminal.
+
+**Q: How does the platform enable running multiple workflows at once?**
+A: The executor schedules DAG-based workflows asynchronously, using separate workspaces and resource quotas so concurrent runs do not conflict.
+
+**Q: How can teams share workflows with other groups?**
+A: They publish signed workflow bundles to the central Workflow Registry, where peers can search, review metadata, and pull trusted versions into their local cache.
+
+### Business and Go-To-Market
+
+**Q: What is the rollout plan?**
+A: Launch with a private beta for existing Codetracer customers, iterate on registry, daemon, and observability features, then expand to general availability once enterprise security and compliance requirements are satisfied.
+
+**Q: How will we measure success?**
+A: Key metrics include active workflows published, number of concurrent runs per customer, execution success rate, and reduction in setup time for new automation initiatives.
+
+**Q: What are the monetization levers?**
+A: Pricing will bundle per-seat access to the CLI/daemon with consumption-based tiers for registry storage, artifact retention, and premium observability dashboards.
+
+**Q: What partnerships or ecosystem integrations are planned?**
+A: We aim to integrate with major source hosting platforms, artifact stores, and identity providers to streamline publishing and authentication flows.
+
+### Technical & Operational
+
+**Q: Why build the platform in Rust?**
+A: Rust delivers predictable performance, memory safety, and strong cross-platform tooling, letting us ship a single runtime that scales across Linux and macOS while remaining secure.
+
+**Q: How are workspaces isolated and managed?**
+A: The workspace manager provisions Jujutsu-based clones outside the repository tree, copies vetted automation bundles, and enforces cleanup policies to avoid cross-run contamination.
+
+**Q: How does observability work?**
+A: Structured logs, metrics, and traces flow through OpenTelemetry exporters so teams can monitor run latency, success/failure patterns, and resource utilization.
+
+**Q: What safeguards exist around workflow execution?**
+A: Workflow bundles are signed, registry access is authenticated, and step execution honors allow-lists for commands, ensuring only approved actions run inside workspaces.
+
+### Open Questions (Answers Needed)
+
+- **Q:** What service-level objectives (SLOs) will we commit to for workflow execution latency and registry uptime?
+- **Q:** Which identity providers and authentication standards will the beta support (e.g., OAuth2, SAML, SCIM)?
+- **Q:** How will workflow sandboxing interact with customer-provided plugins that require elevated privileges?
+- **Q:** What is the long-term strategy for Windows support and container-based execution environments?
diff --git a/design-docs/workflow-platform-spec.md b/design-docs/workflow-platform-spec.md
new file mode 100644
index 0000000..eb2cf12
--- /dev/null
+++ b/design-docs/workflow-platform-spec.md
@@ -0,0 +1,283 @@
+# Workflow Automation Platform Technical Specification
+
+## 1. Background and Goals
+
+The current proof of concept orchestrates AI-assisted workflows through ad-hoc `just`
+recipes (`agents.just`) and a Bash helper script (`scripts/agent-workspace.sh`) that
+manages per-run Jujutsu workspaces outside the repository tree.
+To deliver a production-grade product we must transform this tooling into a cohesive,
+extensible Rust platform that supports:
+
+- User-authored workflows with first-class configuration, validation, and reuse.
+- Publishing, discovering, and pulling shared workflows from a central registry.
+- Safe parallel execution of multiple workflows using isolated workspaces.
+- Operational observability, access control, and lifecycle management expected from a
+ real product.
+
+## 2. Product Requirements
+
+### 2.1 Functional
+
+1. **Workflow authoring** – Users can define workflows locally using a declarative file
+ format (`workflow.toml`), including parameterized steps, dependencies, conditional
+ execution, and reusable actions.
+2. **Execution** – Users can run workflows locally via CLI, passing parameters and
+ environment overrides. Runs use isolated workspaces derived from a Jujutsu change or
+ repository head.
+3. **Parallelization** – Users can queue or execute multiple workflows concurrently with
+ per-run resource isolation and status tracking.
+4. **Publishing** – Users can publish workflows (including metadata, version, license,
+ and documentation) to a central registry service after authentication.
+5. **Discovery & Fetching** – Users can search the registry, view metadata, download a
+ workflow into their local cache, and upgrade to new versions.
+6. **Introspection** – CLI and APIs expose run status, logs, workspace locations, and
+ artifacts. Users can attach interactive shells to running workspaces.
+7. **Lifecycle management** – Users can clean up workspaces, cancel runs, and configure
+ retention policies for cached tooling and artifacts.
+
+### 2.2 Non-Functional
+
+- **Language** – Rust 2021 edition across all crates.
+- **Portability** – Linux and macOS support (Windows optional, planned).
+- **Security** – Signed workflow bundles, authenticated registry access, sandboxed step
+ execution with configurable allow-lists.
+- **Scalability** – Scheduler supports dozens of concurrent workflows on a single node;
+ registry handles thousands of workflows and metadata queries.
+- **Observability** – Structured logging, OpenTelemetry tracing, and metrics export.
+- **Extensibility** – Pluggable actions and custom step types without recompiling the
+ core runtime.
+
+## 3. System Architecture Overview
+
+The platform is organized into cooperating Rust crates/services:
+
+- `workflow-core` – Parsing, validation, and planning for workflow definitions.
+- `workspace-manager` – Rust port of workspace provisioning, replacing
+ `agent-workspace.sh` while preserving the features documented in the workspace design
+ doc.
+- `workflow-executor` – Asynchronous engine that schedules workflow graphs, creates
+ workspaces, executes steps, and collects results.
+- `workflow-cli` – End-user interface built with `clap`, orchestrating local commands.
+- `workflow-registry-server` – Central service exposing REST/JSON APIs for workflow
+ publish, fetch, and discovery.
+- `workflow-registry-client` – Shared library used by CLI/executor to interact with the
+ registry, manage auth tokens, and cache bundles.
+- `workflowd` (optional) – Long-running daemon that the CLI can delegate to for
+ background execution and concurrency control on shared machines.
+
+Core data flow:
+
+1. User invokes `workflow run ` (local or fetched). CLI loads definition via
+ `workflow-core`, resolves dependencies, and submits run to `workflowd` or local
+ executor.
+2. Executor requests workspace allocation from `workspace-manager`, which creates or
+ reuses a workspace, copies tooling, and returns metadata.
+3. Executor schedules steps according to DAG dependencies, running actions (shell,
+ built-in Rust code, or plugin) inside the workspace with environment variables
+ matching the current proof of concept.
+4. Run state, logs, and artifacts stream to local storage; optional upload to registry or
+ external artifact store.
+5. CLI polls or receives events to display progress. Upon completion, metadata is
+ persisted; optional cleanup occurs per policy.
+
+## 4. Component Specifications
+
+### 4.1 Workflow Definition Format (`workflow.toml`)
+
+- **Schema**
+ - `id` (string, semantic version optional) and `name`.
+ - `description`, `tags`, `license`, `homepage`.
+ - `parameters` (name, type, default, required, validation regex).
+ - `artifacts` declarations (path patterns, retention policy).
+ - `steps`: map from step id → struct with `uses` (action reference), `inputs`,
+ `env`, `run` (command), `needs` (dependencies), `when` (expression), and
+ `workspace` (inherit, ephemeral, or custom path).
+ - `actions`: reusable step templates referencing built-in adapters or external
+ binaries.
+ - `requirements`: toolchain prerequisites (e.g., nix profile, docker image, python
+ packages) for validation before run.
+- **Parser** – Implemented with `serde` + `toml`. Provide JSON schema export for editor
+ tooling.
+- **Validation** – Ensure DAG acyclicity, parameter resolution, and compatibility with
+ workspace policies. Emit actionable diagnostics.
+- **Extensibility** – Support plugin-defined parameter types and validators via dynamic
+ registration.
+
+### 4.2 `workflow-core` Crate
+
+- Modules:
+ - `model` – Rust structs representing workflows, steps, actions, parameters.
+ - `parser` – Functions to load from file/URL, merge overrides, and surface line/column
+ errors.
+ - `validator` – Graph validation, parameter type checking, requirement resolution.
+ - `planner` – Convert workflow definitions + runtime parameters into executable DAG
+ plans with resolved command strings and environment.
+- Exposes stable API consumed by CLI, executor, and registry server.
+- Provides `serde` serialization for storing compiled plans in the registry.
+- Includes unit tests covering parsing edge cases, invalid graphs, and parameter
+ substitution.
+
+### 4.3 `workspace-manager` Crate
+
+- Reimplements responsibilities of `agent-workspace.sh` in Rust:
+ - Manage workspace root discovery, hashed repo namespace, and metadata persistence in
+ `.agent-tools/.agent-workflow.json`.
+ - Provide APIs: `ensure_workspace(id, base_change, direnv_policy)`,
+ `update_status(status, workflow, command)`, `cleanup(id)`, `sync_tools(id)`.
+ - Copy automation bundles (`agents.just`, `scripts/`, `rules/` by default) into
+ `.agent-tools/`, hashing contents to avoid redundant copies.
+ - Execute commands via `direnv exec` when available; fall back to plain execution if
+ disabled.
+ - Emit structured events for workspace lifecycle (created, reused, direnv allowed,
+ tooling hash, cleanup).
+- Implementation details:
+ - Use `tokio::process` for subprocess management.
+ - Use `serde_json` for metadata file compatibility with current schema.
+ - Provide CLI subcommands reused by `workflow-cli` for manual inspection.
+
+### 4.4 `workflow-executor` Crate
+
+- Built atop `tokio` runtime with cooperative scheduling.
+- Responsibilities:
+ - Accept execution plans from `workflow-core`.
+ - Allocate workspaces per workflow run or per-step when `workspace = "ephemeral"`.
+ - Manage concurrency using per-run DAG scheduler; configurable max parallel steps.
+ - Execute actions:
+ - **Shell command**: spawn process with inherited/captured stdio; enforce timeouts
+ and environment.
+ - **Built-in adapters**: native Rust functions implementing actions like `jj diff`
+ summarization.
+ - **Plugins**: load dynamic libraries (`cdylib`) conforming to trait `ActionPlugin`.
+ - Collect logs, exit codes, produced artifacts; stream to observers.
+ - Handle cancellation, retries, backoff, and failure propagation (fail-fast or
+ continue modes per step).
+- Provides event stream (`RunEvent`) consumed by CLI/daemon for status updates.
+- Maintains run metadata store (SQLite via `sqlx` or `rusqlite`) capturing history and
+ enabling queries.
+
+### 4.5 `workflow-cli` Crate
+
+Commands (subset):
+
+- `workflow init` – Scaffold new `workflow.toml` with templates.
+- `workflow validate [file]` – Run parser and validator.
+- `workflow run [--param key=value] [--workspace-id ...] [--parallel N]` –
+ Execute workflows, optionally delegating to daemon.
+- `workflow status [run-id]` – Show active runs, including workspace paths and metadata.
+- `workflow logs ` – Stream logs and step outputs.
+- `workflow workspace ` – User-facing wrappers around
+ `workspace-manager` operations.
+- `workflow publish [--registry]` – Package and upload to registry.
+- `workflow fetch ` – Download to local cache.
+- `workflow registry login` – Acquire/store auth token securely (OS keyring).
+
+Implementation notes:
+
+- Built with `clap` derive, asynchronous commands using `tokio`.
+- CLI communicates with daemon via Unix domain socket/gRPC (tonic) when running in
+ background mode; falls back to in-process executor.
+- Provides colored terminal UI (indicatif) for progress bars and summary tables.
+
+### 4.6 `workflow-registry-server`
+
+- Rust service built with `axum` + `tower`.
+- Stores workflow bundles (TOML + optional assets) and metadata in PostgreSQL or SQLite.
+- REST API endpoints:
+ - `POST /v1/workflows` – Publish new version (requires auth, accepts signed tarball).
+ - `GET /v1/workflows` – Search by tag, owner, text.
+ - `GET /v1/workflows/{id}` – Fetch metadata and available versions.
+ - `GET /v1/workflows/{id}/{version}/download` – Stream bundle.
+ - `PUT /v1/workflows/{id}/{version}/deprecate` – Mark version as deprecated.
+ - `GET /v1/tags` – Enumerate tags/categories.
+- Authentication via OAuth2 access tokens or PAT; integrate with identity provider.
+- Supports content-addressed storage (CAS) for deduplicated bundles (S3-compatible
+ backend optional).
+- Provides audit logs and signed metadata (Ed25519). Server verifies bundle signature
+ and publishes signature chain for clients.
+
+### 4.7 `workflow-registry-client`
+
+- Shared crate handling:
+ - Auth token storage and refresh.
+ - HTTP client (reqwest) with retry/backoff, TLS pinning optional.
+ - Local cache of downloaded bundles under `$XDG_CACHE_HOME/workflows//`.
+ - Signature verification before unpacking.
+ - Integration with CLI/executor to auto-update cached workflows.
+
+### 4.8 `workflowd` Daemon (Optional but recommended)
+
+- Runs locally as background service; manages queue of workflow runs and enforces
+ concurrency limits.
+- Exposes control API over gRPC/Unix socket: submit run, stream events, cancel, list
+ runs, attach shell (spawn using `workspace-manager`).
+- Persists state in local SQLite to survive restarts.
+- Implements cooperative scheduling across workflows, respecting per-user and global
+ limits.
+
+### 4.9 Observability & Telemetry
+
+- Unified logging via `tracing` crate with JSON output option.
+- Emit OpenTelemetry spans for major operations (parsing, workspace allocation, step
+ execution) with context propagation from CLI to daemon to registry calls.
+- Metrics (Prometheus exporter) for run success rates, queue depth, workspace lifecycle.
+- Artifact metadata includes checksums and retention metadata for cleaning policies.
+
+### 4.10 Packaging and Distribution
+
+- Provide `cargo` workspace with crates listed above; enable `--features daemon` etc.
+- Offer standalone binaries via `cargo dist` or `nix` flake integration.
+- Provide container image for registry server and optional `workflowd`.
+- Ensure integration with existing `just` recipes for compatibility during migration.
+
+## 5. Parallel Execution & Scheduling
+
+- Scheduler maintains run queue prioritized by submission time and priority class.
+- Per-run concurrency derived from workflow definition; defaults to sequential.
+- Implement resource leasing to avoid oversubscribing CPU/memory; allow configuration
+ via CLI/daemon.
+- Guarantee workspace uniqueness per run; share read-only caches (e.g., tool bundles)
+ to minimize duplication.
+- Provide cancellation tokens; steps respond promptly to interrupts.
+
+## 6. Security & Permissions
+
+- Workflow bundles signed with user-specific keys; registry verifies signatures.
+- CLI validates signatures and optionally enforces allow-list for publishers.
+- Sandboxed execution options:
+ - Support running steps inside container runtimes (e.g., `nix develop`, `podman`).
+ - Provide file access policies per workspace (readonly host repo except workspace
+ copy).
+- Secrets management: CLI loads env secrets from OS keyring or `.env` with opt-in.
+- Auditing: persist run metadata (who ran what, when, with which workflow version).
+
+## 7. Testing and Quality Strategy
+
+- Unit tests in each crate; property tests for parser and planner.
+- Integration tests using temporary repositories and mocked registry server.
+- End-to-end tests executed via `cargo nextest` that run sample workflows through CLI
+ and executor using fixture registry data.
+- Provide smoke-test command `workflow self-test` to validate installation.
+
+## 8. Migration from Proof of Concept
+
+1. Implement `workspace-manager` crate mirroring Bash functionality, validated against
+ scenarios in `agent-workspace.sh` (run, status, shell, clean, sync-tools).
+2. Port representative workflows from `agents.just` into `workflow.toml` definitions to
+ ensure feature parity (workspace-aware steps, iterative loops, interactive shells).
+3. Wrap existing `just` recipes to call new CLI for backward compatibility during
+ transition period.
+4. Deprecate Bash script once Rust manager is stable; mark `agents.just` workflows as
+ legacy and document migration path.
+
+## 9. Open Questions
+
+- Should the workflow format support embedded Python/Rust scripts, or require external
+ files?
+- How do we support long-running interactive steps (e.g., human-in-the-loop) within the
+ DAG while preserving resumability?
+- What identity provider(s) should the registry integrate with, and do we need
+ fine-grained ACLs per workflow?
+- Do we require distributed execution (multi-machine) in the initial release, or is
+ single-host parallelism sufficient?
+- How should artifact storage integrate with external systems (S3, OCI registries)?
+
diff --git a/scripts/agent-workspace.sh b/scripts/agent-workspace.sh
new file mode 100755
index 0000000..0068369
--- /dev/null
+++ b/scripts/agent-workspace.sh
@@ -0,0 +1,617 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+usage() {
+ cat <<'USAGE'
+Usage:
+ agent-workspace.sh run [--workflow NAME] [--base-change CHANGE] [--cleanup] [--no-direnv] -- COMMAND [ARG...]
+ agent-workspace.sh status []
+ agent-workspace.sh shell
+ agent-workspace.sh clean
+ agent-workspace.sh sync-tools
+USAGE
+}
+
+fail() {
+ echo "agent-workspace: $*" >&2
+ exit 1
+}
+
+require() {
+ if [[ "$1" != "0" ]]; then
+ fail "$2"
+ fi
+}
+
+repo_root=$(jj root)
+repo_root=${repo_root%$'\n'}
+repo_basename=$(basename "$repo_root")
+repo_hash=$(printf '%s' "$repo_root" | sha256sum | cut -c1-10)
+repo_slug="${repo_basename}-${repo_hash}"
+
+cache_root_default=${XDG_CACHE_HOME:-"$HOME/.cache"}
+workspace_root=${AI_WORKSPACES_ROOT:-"$cache_root_default/ai-workspaces"}
+workspace_repo_root="$workspace_root/$repo_slug"
+
+tools_source_root="${AGENT_TOOLS_SOURCE:-$repo_root}"
+tools_relative_paths=("agents.just" "rules" "scripts")
+
+sanitise_workspace_id() {
+ local id="$1"
+ [[ "$id" =~ ^[A-Za-z0-9._-]+$ ]] || fail "workspace id '$id' contains invalid characters"
+ echo "$id"
+}
+
+workspace_path_for() {
+ local workspace_id="$1"
+ echo "$workspace_repo_root/$workspace_id"
+}
+
+metadata_path_for() {
+ local workspace_id="$1"
+ echo "$(workspace_path_for "$workspace_id")/.agent-tools/.agent-workflow.json"
+}
+
+workspace_registered() {
+ local workspace_id="$1"
+ jj workspace list -T 'name ++ "\n"' | grep -Fx "$workspace_id" >/dev/null 2>&1
+}
+
+compute_tools_hash() {
+ python - "$tools_source_root" "${tools_relative_paths[@]}" <<'PY'
+import hashlib
+import os
+import sys
+from pathlib import Path
+
+root = Path(sys.argv[1]).resolve()
+paths = sys.argv[2:]
+
+hasher = hashlib.sha256()
+
+def add_file(path: Path):
+ rel = path.relative_to(root)
+ hasher.update(str(rel).encode('utf-8'))
+ hasher.update(b'\0')
+ with path.open('rb') as fh:
+ while True:
+ chunk = fh.read(65536)
+ if not chunk:
+ break
+ hasher.update(chunk)
+
+if not paths:
+ print('')
+ raise SystemExit(0)
+
+for rel in paths:
+ src = root / rel
+ if not src.exists():
+ print(f"missing:{rel}", file=sys.stderr)
+ raise SystemExit(1)
+ if src.is_file():
+ add_file(src)
+ else:
+ for file_path in sorted(src.rglob('*')):
+ if file_path.is_file():
+ add_file(file_path)
+
+print(hasher.hexdigest())
+PY
+}
+
+copy_tools_payload() {
+ local dest="$1"
+ echo "*" > "$dest/.gitignore"
+ python - "$tools_source_root" "$dest" "${tools_relative_paths[@]}" <<'PY'
+import shutil
+import sys
+from pathlib import Path
+
+root = Path(sys.argv[1]).resolve()
+dest = Path(sys.argv[2]).resolve()
+paths = sys.argv[3:]
+
+for rel in paths:
+ src = root / rel
+ if not src.exists():
+ raise SystemExit(f"tool path missing: {rel}")
+ target = dest / rel
+ if src.is_dir():
+ shutil.copytree(src, target, dirs_exist_ok=True)
+ else:
+ target.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(src, target)
+PY
+}
+
+prepare_tools_copy() {
+ local workspace_path="$1"
+ local force_copy="${2:-false}"
+
+ local tools_dest="$workspace_path/.agent-tools"
+ TOOLS_COPY_PATH="$tools_dest"
+
+ local desired_hash
+ desired_hash=$(compute_tools_hash) || fail "failed to hash tool sources"
+ TOOLS_VERSION="$desired_hash"
+
+ local version_file="$tools_dest/.version"
+ local current_hash=""
+ if [[ -f "$version_file" ]]; then
+ current_hash=$(cat "$version_file")
+ fi
+
+ if [[ "$force_copy" == "true" ]]; then
+ current_hash=""
+ fi
+
+ if [[ "$current_hash" != "$desired_hash" ]]; then
+ case "$tools_dest" in
+ "$workspace_path"/*) ;;
+ *) fail "tool copy path outside workspace: $tools_dest" ;;
+ esac
+ rm -rf "$tools_dest"
+ mkdir -p "$tools_dest"
+ copy_tools_payload "$tools_dest"
+ printf '%s\n' "$desired_hash" > "$version_file"
+ else
+ mkdir -p "$tools_dest"
+ fi
+}
+
+ensure_workspace() {
+ local workspace_id="$1"
+ local workspace_path="$2"
+ local created_var="$3"
+
+ mkdir -p "$workspace_repo_root"
+
+ if workspace_registered "$workspace_id"; then
+ local metadata_path
+ metadata_path=$(metadata_path_for "$workspace_id")
+ if [[ -f "$metadata_path" ]]; then
+ local recorded_path
+ recorded_path=$(python - "$metadata_path" <<'PY'
+import json
+import sys
+path = sys.argv[1]
+with open(path, 'r', encoding='utf-8') as fh:
+ data = json.load(fh)
+print(data.get('workspace_path', ''))
+PY
+ )
+ if [[ -n "$recorded_path" && "$recorded_path" != "$workspace_path" ]]; then
+ fail "workspace '$workspace_id' already exists at '$recorded_path'"
+ fi
+ fi
+ printf -v "$created_var" '%s' "false"
+ else
+ jj workspace add --name "$workspace_id" "$workspace_path"
+ printf -v "$created_var" '%s' "true"
+ fi
+}
+
+update_metadata() {
+ local metadata_path="$1"
+ local status="$2"
+ local timestamp
+ timestamp=$(date -Iseconds)
+
+ STATUS="$status" TIMESTAMP="$timestamp" WORKSPACE_ID="$WORKSPACE_ID" REPO_ROOT="$repo_root" \
+ WORKSPACE_PATH="$WORKSPACE_PATH" WORKFLOW_NAME="${WORKFLOW_NAME:-}" COMMAND_JSON="$COMMAND_JSON" \
+ DIRENV_ALLOWED="$DIRENV_ALLOWED" BASE_CHANGE="${BASE_CHANGE:-}" TOOLS_SOURCE="${TOOLS_SOURCE:-}" \
+ TOOLS_COPY="${TOOLS_COPY:-}" TOOLS_VERSION="${TOOLS_VERSION:-}" \
+ python - "$metadata_path" <<'PY'
+import json
+import os
+import sys
+
+path = sys.argv[1]
+now = os.environ["TIMESTAMP"]
+status = os.environ["STATUS"]
+workspace_id = os.environ["WORKSPACE_ID"]
+repo_root = os.environ["REPO_ROOT"]
+workspace_path = os.environ["WORKSPACE_PATH"]
+workflow_name = os.environ.get("WORKFLOW_NAME") or None
+direnv_allowed = os.environ.get("DIRENV_ALLOWED", "false").lower() == "true"
+command = json.loads(os.environ.get("COMMAND_JSON", "[]"))
+base_change = os.environ.get("BASE_CHANGE") or None
+tools_source = os.environ.get("TOOLS_SOURCE") or None
+tools_copy = os.environ.get("TOOLS_COPY") or None
+tools_version = os.environ.get("TOOLS_VERSION") or None
+
+try:
+ with open(path, "r", encoding="utf-8") as fh:
+ data = json.load(fh)
+except Exception:
+ data = {}
+
+if "created_at" not in data:
+ data["created_at"] = now
+
+data.update({
+ "workspace_id": workspace_id,
+ "repo_root": repo_root,
+ "workspace_path": workspace_path,
+ "status": status,
+ "direnv_allowed": direnv_allowed,
+ "command": command,
+ "updated_at": now,
+})
+
+if workflow_name is None:
+ data.pop("workflow", None)
+else:
+ data["workflow"] = workflow_name
+
+if base_change is None:
+ data.pop("base_change", None)
+else:
+ data["base_change"] = base_change
+
+if tools_source is None:
+ data.pop("tools_source", None)
+else:
+ data["tools_source"] = tools_source
+
+if tools_copy is None:
+ data.pop("tools_copy", None)
+else:
+ data["tools_copy"] = tools_copy
+
+if tools_version is None:
+ data.pop("tools_version", None)
+else:
+ data["tools_version"] = tools_version
+
+with open(path, "w", encoding="utf-8") as fh:
+ json.dump(data, fh, indent=2)
+ fh.write("\n")
+PY
+}
+
+run_subcommand() {
+ local workspace_id_raw="$1"
+ shift || fail "missing options and command"
+
+ local workspace_id
+ workspace_id=$(sanitise_workspace_id "$workspace_id_raw")
+ WORKSPACE_ID="$workspace_id"
+
+ local workflow_name=""
+ local base_change=""
+ local cleanup="false"
+ local use_direnv="true"
+ local positional_found="false"
+ local cmd=()
+
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --workflow)
+ [[ $# -ge 2 ]] || fail "--workflow requires a value"
+ workflow_name="$2"
+ shift 2
+ ;;
+ --base-change)
+ [[ $# -ge 2 ]] || fail "--base-change requires a value"
+ base_change="$2"
+ shift 2
+ ;;
+ --cleanup)
+ cleanup="true"
+ shift
+ ;;
+ --no-direnv)
+ use_direnv="false"
+ shift
+ ;;
+ --)
+ shift
+ positional_found="true"
+ cmd=("$@")
+ break
+ ;;
+ *)
+ fail "unknown option '$1'"
+ ;;
+ esac
+ done
+
+ [[ "$positional_found" == "true" ]] || fail "missing command after '--'"
+ [[ ${#cmd[@]} -gt 0 ]] || fail "command must not be empty"
+
+ WORKFLOW_NAME="$workflow_name"
+ BASE_CHANGE="$base_change"
+
+ local workspace_path="$workspace_repo_root/$workspace_id"
+ WORKSPACE_PATH="$workspace_path"
+ local metadata_path="$workspace_path/.agent-tools/.agent-workflow.json"
+ METADATA_PATH="$metadata_path"
+
+ local created_flag
+ ensure_workspace "$workspace_id" "$workspace_path" created_flag
+
+ if [[ -n "$base_change" && "$created_flag" == "true" ]]; then
+ (cd "$workspace_path" && jj edit "$base_change")
+ fi
+
+ prepare_tools_copy "$workspace_path"
+
+ local direnv_status="false"
+ if [[ "$use_direnv" == "true" ]]; then
+ command -v direnv >/dev/null 2>&1 || fail "direnv is required but not installed"
+ (
+ cd "$workspace_path"
+ direnv allow .
+ )
+ direnv_status="true"
+ fi
+ DIRENV_ALLOWED="$direnv_status"
+ TOOLS_SOURCE="$tools_source_root"
+
+ COMMAND_JSON=$(python - <<'PY' "${cmd[@]}"
+import json
+import sys
+print(json.dumps(sys.argv[1:]))
+PY
+)
+
+ TOOLS_COPY="$TOOLS_COPY_PATH"
+ update_metadata "$metadata_path" "running"
+
+ local exit_code
+ (
+ cd "$workspace_path"
+ if [[ "$use_direnv" == "true" ]]; then
+ AGENT_WORKSPACE_ID="$workspace_id" \
+ AGENT_WORKSPACE_PATH="$workspace_path" \
+ AGENT_WORKSPACE_METADATA="$metadata_path" \
+ AGENT_WORKSPACE_REPO_ROOT="$repo_root" \
+ AGENT_TOOL_COPY_ROOT="$TOOLS_COPY_PATH" \
+ AGENT_TOOLS_VERSION="$TOOLS_VERSION" \
+ AGENT_TOOLS_SOURCE="$tools_source_root" \
+ direnv exec . "${cmd[@]}"
+ else
+ AGENT_WORKSPACE_ID="$workspace_id" \
+ AGENT_WORKSPACE_PATH="$workspace_path" \
+ AGENT_WORKSPACE_METADATA="$metadata_path" \
+ AGENT_WORKSPACE_REPO_ROOT="$repo_root" \
+ AGENT_TOOL_COPY_ROOT="$TOOLS_COPY_PATH" \
+ AGENT_TOOLS_VERSION="$TOOLS_VERSION" \
+ AGENT_TOOLS_SOURCE="$tools_source_root" \
+ "${cmd[@]}"
+ fi
+ )
+ exit_code=$?
+
+ if [[ "$exit_code" == "0" ]]; then
+ update_metadata "$metadata_path" "done"
+ else
+ update_metadata "$metadata_path" "error"
+ fi
+
+ if [[ "$cleanup" == "true" && "$exit_code" == "0" ]]; then
+ jj workspace forget "$workspace_id"
+ rm -rf "$workspace_path"
+ fi
+
+ return "$exit_code"
+}
+
+status_subcommand() {
+ if [[ $# -gt 1 ]]; then
+ fail "status accepts zero or one workspace id"
+ fi
+
+ if [[ $# -eq 1 ]]; then
+ local workspace_id
+ workspace_id=$(sanitise_workspace_id "$1")
+ local metadata_path
+ metadata_path=$(metadata_path_for "$workspace_id")
+ if [[ ! -f "$metadata_path" ]]; then
+ fail "workspace '$workspace_id' has no metadata at '$metadata_path'"
+ fi
+ python - "$metadata_path" <<'PY'
+import json
+import sys
+path = sys.argv[1]
+with open(path, 'r', encoding='utf-8') as fh:
+ data = json.load(fh)
+print(json.dumps(data, indent=2))
+PY
+ return 0
+ fi
+
+ python - "$workspace_repo_root" <<'PY'
+import json
+import os
+import sys
+from pathlib import Path
+
+root = Path(sys.argv[1])
+if not root.exists():
+ print("No workspaces registered.")
+ raise SystemExit(0)
+
+rows = []
+for child in sorted(root.iterdir()):
+ if not child.is_dir():
+ continue
+ meta_path = child / '.agent-tools' / '.agent-workflow.json'
+ if not meta_path.exists():
+ continue
+ try:
+ data = json.loads(meta_path.read_text(encoding='utf-8'))
+ except Exception:
+ status = 'unknown'
+ workflow = '-'
+ updated = '-'
+ else:
+ status = data.get('status') or '-'
+ workflow = data.get('workflow') or '-'
+ updated = data.get('updated_at') or '-'
+ rows.append((child.name, status, workflow, updated))
+
+if not rows:
+ print("No workspaces registered.")
+ raise SystemExit(0)
+
+name_width = max(len(r[0]) for r in rows)
+print(f"{'WORKSPACE'.ljust(name_width)} STATUS WORKFLOW UPDATED")
+for name, status, workflow, updated in rows:
+ print(f"{name.ljust(name_width)} {status.ljust(10)} {workflow.ljust(16)} {updated}")
+PY
+}
+
+shell_subcommand() {
+ if [[ $# -ne 1 ]]; then
+ fail "shell requires a workspace id"
+ fi
+
+ local workspace_id
+ workspace_id=$(sanitise_workspace_id "$1")
+ local workspace_path
+ workspace_path=$(workspace_path_for "$workspace_id")
+ [[ -d "$workspace_path" ]] || fail "workspace '$workspace_id' does not exist"
+
+ if ! workspace_registered "$workspace_id"; then
+ fail "workspace '$workspace_id' is not registered with jj"
+ fi
+
+ command -v direnv >/dev/null 2>&1 || fail "direnv is required but not installed"
+
+ local shell_cmd
+ shell_cmd=${SHELL:-/bin/sh}
+
+ (
+ cd "$workspace_path"
+ direnv allow .
+ echo "Attaching to workspace '$workspace_id' at '$workspace_path'." >&2
+ exec direnv exec . "$shell_cmd" -i
+ )
+}
+
+clean_subcommand() {
+ if [[ $# -ne 1 ]]; then
+ fail "clean requires a workspace id"
+ fi
+
+ local workspace_id
+ workspace_id=$(sanitise_workspace_id "$1")
+ local workspace_path
+ workspace_path=$(workspace_path_for "$workspace_id")
+
+ if workspace_registered "$workspace_id"; then
+ jj workspace forget "$workspace_id"
+ fi
+
+ if [[ -d "$workspace_path" ]]; then
+ case "$workspace_path" in
+ "$workspace_repo_root"/*) ;;
+ *) fail "refusing to remove path outside workspace cache: $workspace_path" ;;
+ esac
+ rm -rf "$workspace_path"
+ fi
+}
+
+sync_tools_subcommand() {
+ if [[ $# -ne 1 ]]; then
+ fail "sync-tools requires a workspace id"
+ fi
+
+ local workspace_id
+ workspace_id=$(sanitise_workspace_id "$1")
+ local workspace_path
+ workspace_path=$(workspace_path_for "$workspace_id")
+
+ [[ -d "$workspace_path" ]] || fail "workspace '$workspace_id' does not exist"
+
+ if ! workspace_registered "$workspace_id"; then
+ fail "workspace '$workspace_id' is not registered with jj"
+ fi
+
+ prepare_tools_copy "$workspace_path" "true"
+
+ local metadata_path
+ metadata_path=$(metadata_path_for "$workspace_id")
+
+ local prev_status="idle"
+ local prev_workflow=""
+ local prev_command="[]"
+ local prev_direnv="false"
+ local prev_base=""
+
+ if [[ -f "$metadata_path" ]]; then
+ local -a meta_info=()
+ mapfile -t meta_info < <(python - "$metadata_path" <<'PY'
+import json
+import sys
+
+path = sys.argv[1]
+with open(path, 'r', encoding='utf-8') as fh:
+ data = json.load(fh)
+
+print(data.get('status', 'idle'))
+print(data.get('workflow') or '')
+print(json.dumps(data.get('command', [])))
+print('true' if data.get('direnv_allowed') else 'false')
+print(data.get('base_change') or '')
+PY
+ )
+ if [[ ${#meta_info[@]} -ge 5 ]]; then
+ prev_status="${meta_info[0]}"
+ prev_workflow="${meta_info[1]}"
+ prev_command="${meta_info[2]}"
+ prev_direnv="${meta_info[3]}"
+ prev_base="${meta_info[4]}"
+ fi
+ fi
+
+ WORKSPACE_ID="$workspace_id"
+ WORKSPACE_PATH="$workspace_path"
+ METADATA_PATH="$metadata_path"
+ WORKFLOW_NAME="$prev_workflow"
+ BASE_CHANGE="$prev_base"
+ COMMAND_JSON="$prev_command"
+ DIRENV_ALLOWED="$prev_direnv"
+ TOOLS_SOURCE="$tools_source_root"
+ TOOLS_COPY="$TOOLS_COPY_PATH"
+ TOOLS_VERSION="$TOOLS_VERSION"
+ update_metadata "$metadata_path" "$prev_status"
+}
+
+main() {
+ if [[ $# -lt 1 ]]; then
+ usage
+ exit 1
+ fi
+
+ local subcommand="$1"
+ shift
+
+ case "$subcommand" in
+ run)
+ [[ $# -ge 1 ]] || fail "run requires a workspace id"
+ run_subcommand "$@"
+ ;;
+ status)
+ status_subcommand "$@"
+ ;;
+ shell)
+ shell_subcommand "$@"
+ ;;
+ clean)
+ clean_subcommand "$@"
+ ;;
+ sync-tools)
+ sync_tools_subcommand "$@"
+ ;;
+ *)
+ usage
+ fail "unknown subcommand '$subcommand'"
+ ;;
+ esac
+}
+
+main "$@"