hello world

This commit is contained in:
kuberwastaken 2026-04-01 01:20:27 +05:30
commit c99507ca1e
84 changed files with 54252 additions and 0 deletions

View file

@ -0,0 +1,33 @@
[package]
name = "cc-tools"
version.workspace = true
edition.workspace = true
[dependencies]
cc-core = { workspace = true }
cc-api = { workspace = true }
cc-mcp = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
tokio-util = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
anyhow = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
async-trait = { workspace = true }
regex = { workspace = true }
glob = { workspace = true }
walkdir = { workspace = true }
similar = { workspace = true }
tempfile = { workspace = true }
bytes = { workspace = true }
uuid = { workspace = true }
chrono = { workspace = true }
once_cell = { workspace = true }
parking_lot = { workspace = true }
dashmap = { workspace = true }
which = { workspace = true }
dirs = { workspace = true }
reqwest = { workspace = true }

View file

@ -0,0 +1,7 @@
// AgentTool is defined in cc-query to avoid a circular dependency:
// cc-tools → cc-query → cc-tools would be circular.
//
// The AgentTool implementation lives in crates/query/src/agent_tool.rs and is
// re-exported from cc-query as `cc_query::AgentTool`.
//
// This file exists only as a placeholder to keep the directory tidy.

View file

@ -0,0 +1,79 @@
// AskUserQuestion tool: ask the human operator a question and wait for a response.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct AskUserQuestionTool;
#[derive(Debug, Deserialize)]
struct AskUserInput {
question: String,
#[serde(default)]
options: Option<Vec<String>>,
}
#[async_trait]
impl Tool for AskUserQuestionTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_ASK_USER
}
fn description(&self) -> &str {
"Ask the user a question and wait for their response. Use this when you \
need clarification, confirmation, or additional information from the user. \
The question will be displayed and the user can type their answer."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::None
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question to ask the user"
},
"options": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of choices for multiple-choice questions"
}
},
"required": ["question"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: AskUserInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
debug!(question = %params.question, "Asking user");
// In non-interactive mode we cannot ask the user.
if ctx.non_interactive {
return ToolResult::error(
"Cannot ask user questions in non-interactive mode".to_string(),
);
}
// The actual prompt/response is handled at the TUI layer, which will
// intercept this tool result and display the question. We return a
// placeholder that the query loop replaces.
let meta = json!({
"question": params.question,
"options": params.options,
"type": "ask_user",
});
ToolResult::success(format!("Question: {}", params.question))
.with_metadata(meta)
}
}

View file

@ -0,0 +1,199 @@
// Bash tool: execute shell commands with timeout and streaming output.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use std::process::Stdio;
use std::time::Duration;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tracing::{debug, warn};
pub struct BashTool;
#[derive(Debug, Deserialize)]
struct BashInput {
command: String,
#[serde(default)]
description: Option<String>,
#[serde(default = "default_timeout")]
timeout: u64,
#[serde(default)]
run_in_background: bool,
}
fn default_timeout() -> u64 {
120_000 // 2 minutes in ms
}
#[async_trait]
impl Tool for BashTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_BASH
}
fn description(&self) -> &str {
"Executes a given bash command and returns its output. The working directory \
persists between commands, but shell state does not. Avoid using interactive \
commands. Use this tool for running shell commands, scripts, git operations, \
and system tasks."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::Execute
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The bash command to execute"
},
"description": {
"type": "string",
"description": "Clear, concise description of what this command does"
},
"timeout": {
"type": "number",
"description": "Optional timeout in milliseconds (max 600000, default 120000)"
},
"run_in_background": {
"type": "boolean",
"description": "Set to true to run command in the background"
}
},
"required": ["command"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: BashInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
// Permission check
let desc = params
.description
.as_deref()
.unwrap_or(&params.command);
if let Err(e) = ctx.check_permission(self.name(), desc, false) {
return ToolResult::error(e.to_string());
}
let timeout_ms = params.timeout.min(600_000);
let timeout_dur = Duration::from_millis(timeout_ms);
// Determine shell
let (shell, flag) = if cfg!(windows) {
("cmd", "/C")
} else {
("bash", "-c")
};
debug!(command = %params.command, "Executing bash command");
let mut child = match Command::new(shell)
.arg(flag)
.arg(&params.command)
.current_dir(&ctx.working_dir)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stdin(Stdio::null())
.spawn()
{
Ok(c) => c,
Err(e) => return ToolResult::error(format!("Failed to spawn command: {}", e)),
};
let stdout = child.stdout.take();
let stderr = child.stderr.take();
// Collect output with a timeout
let result = tokio::time::timeout(timeout_dur, async {
let mut stdout_lines = Vec::new();
let mut stderr_lines = Vec::new();
if let Some(stdout) = stdout {
let reader = BufReader::new(stdout);
let mut lines = reader.lines();
while let Ok(Some(line)) = lines.next_line().await {
stdout_lines.push(line);
}
}
if let Some(stderr) = stderr {
let reader = BufReader::new(stderr);
let mut lines = reader.lines();
while let Ok(Some(line)) = lines.next_line().await {
stderr_lines.push(line);
}
}
let status = child.wait().await;
(stdout_lines, stderr_lines, status)
})
.await;
match result {
Ok((stdout_lines, stderr_lines, status)) => {
let exit_code = status
.map(|s| s.code().unwrap_or(-1))
.unwrap_or(-1);
let mut output = String::new();
if !stdout_lines.is_empty() {
output.push_str(&stdout_lines.join("\n"));
}
if !stderr_lines.is_empty() {
if !output.is_empty() {
output.push_str("\n");
}
output.push_str("STDERR:\n");
output.push_str(&stderr_lines.join("\n"));
}
if output.is_empty() {
output = "(no output)".to_string();
}
// Truncate very long output
const MAX_OUTPUT_LEN: usize = 100_000;
if output.len() > MAX_OUTPUT_LEN {
let half = MAX_OUTPUT_LEN / 2;
let start = &output[..half];
let end = &output[output.len() - half..];
output = format!(
"{}\n\n... ({} characters truncated) ...\n\n{}",
start,
output.len() - MAX_OUTPUT_LEN,
end
);
}
if exit_code != 0 {
ToolResult::error(format!(
"Command exited with code {}\n{}",
exit_code, output
))
} else {
ToolResult::success(output)
}
}
Err(_) => {
// Timeout try to kill the child
let _ = child.kill().await;
ToolResult::error(format!(
"Command timed out after {}ms",
timeout_ms
))
}
}
}
}

View file

@ -0,0 +1,151 @@
// BriefTool: send a formatted message to the user, optionally with file attachments.
//
// This is the model's way of proactively communicating status, completions, or
// findings without being asked. The message is returned as a tool result and
// the TUI renders it prominently.
//
// Status can be:
// "normal" reply to what the user just said
// "proactive" unsolicited update (task done, blocker, status ping)
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::path::Path;
use tracing::debug;
pub struct BriefTool;
#[derive(Debug, Deserialize)]
struct BriefInput {
message: String,
#[serde(default)]
attachments: Vec<String>,
#[serde(default = "default_status")]
status: String,
}
fn default_status() -> String { "normal".to_string() }
#[derive(Debug, Serialize)]
struct AttachmentMeta {
path: String,
size: u64,
is_image: bool,
}
#[async_trait]
impl Tool for BriefTool {
fn name(&self) -> &str { "Brief" }
fn description(&self) -> &str {
"Send a formatted message to the user, optionally with file attachments. \
Use status=\"proactive\" when surfacing something the user hasn't asked for \
(task completion, a blocker, an unsolicited update). \
Use status=\"normal\" when replying to something the user just said."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "The message to send. Supports Markdown."
},
"attachments": {
"type": "array",
"items": { "type": "string" },
"description": "Optional file paths to attach (images, diffs, logs)"
},
"status": {
"type": "string",
"enum": ["normal", "proactive"],
"description": "Use 'proactive' for unsolicited updates, 'normal' for direct replies"
}
},
"required": ["message", "status"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: BriefInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
if params.message.trim().is_empty() {
return ToolResult::error("Message cannot be empty.".to_string());
}
// Resolve and validate attachments
let mut resolved: Vec<AttachmentMeta> = Vec::new();
let mut errors: Vec<String> = Vec::new();
for raw_path in &params.attachments {
let path = ctx.resolve_path(raw_path);
match resolve_attachment(&path).await {
Ok(meta) => resolved.push(meta),
Err(e) => errors.push(format!("{}: {}", raw_path, e)),
}
}
if !errors.is_empty() {
return ToolResult::error(format!(
"Failed to resolve attachments:\n{}",
errors.join("\n")
));
}
debug!(
status = %params.status,
attachments = resolved.len(),
"Brief message"
);
// Build result payload
let now = chrono::Utc::now().to_rfc3339();
let mut result = json!({
"message": params.message,
"status": params.status,
"sentAt": now,
});
if !resolved.is_empty() {
result["attachments"] = serde_json::to_value(&resolved).unwrap_or_default();
}
ToolResult::success(params.message).with_metadata(result)
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
async fn resolve_attachment(path: &Path) -> Result<AttachmentMeta, String> {
let meta = tokio::fs::metadata(path)
.await
.map_err(|e| e.to_string())?;
if !meta.is_file() {
return Err("not a file".to_string());
}
let size = meta.len();
let is_image = path
.extension()
.and_then(|e| e.to_str())
.map(|e| matches!(e.to_lowercase().as_str(), "png" | "jpg" | "jpeg" | "gif" | "webp" | "svg"))
.unwrap_or(false);
Ok(AttachmentMeta {
path: path.display().to_string(),
size,
is_image,
})
}

View file

@ -0,0 +1,573 @@
//! Bundled skill definitions for the Skill tool.
//!
//! Each entry in `BUNDLED_SKILLS` mirrors one of the TypeScript
//! `registerXxxSkill()` calls under `src/skills/bundled/`. Only publicly
//! invocable, user-facing skills are included; internal or ANT-only skills
//! (stuck, remember, verify) are omitted from the user-visible list but are
//! still present as documentation stubs so callers can discover them.
//!
//! The `SkillTool` checks bundled skills *before* scanning disk directories,
//! so bundled names take precedence over same-named `.md` files.
/// A single bundled skill definition.
#[derive(Debug, Clone)]
pub struct BundledSkill {
/// Primary name used to invoke the skill (e.g. `"simplify"`).
pub name: &'static str,
/// One-line description shown in `/skill list` output and to the model.
pub description: &'static str,
/// Additional names that map to this skill.
pub aliases: &'static [&'static str],
/// Optional guidance for the model about when to auto-invoke.
pub when_to_use: Option<&'static str>,
/// Placeholder shown next to the skill name in help text.
pub argument_hint: Option<&'static str>,
/// The prompt template. `$ARGUMENTS` is replaced at call time.
/// `$ARGUMENTS_SUFFIX` expands to `": <args>"` when args are non-empty,
/// or `""` otherwise.
pub prompt_template: &'static str,
/// If `Some`, only these tool names are available during the skill run.
pub allowed_tools: Option<&'static [&'static str]>,
/// Whether a human user can invoke this skill via `/skill <name>`.
pub user_invocable: bool,
}
/// All bundled skills.
pub const BUNDLED_SKILLS: &[BundledSkill] = &[
// -----------------------------------------------------------------------
// simplify
// -----------------------------------------------------------------------
BundledSkill {
name: "simplify",
description: "Review changed code for reuse, quality, and efficiency, then fix any issues found.",
aliases: &[],
when_to_use: Some("After writing code, when you want a quality review and cleanup pass."),
argument_hint: None,
prompt_template: r#"# Simplify: Code Review and Cleanup
Review all changed files for reuse, quality, and efficiency. Fix any issues found.
## Phase 1: Identify Changes
Run `git diff` (or `git diff HEAD` if there are staged changes) to see what changed.
If there are no git changes, review the most recently modified files that were
mentioned or edited earlier in this conversation.
## Phase 2: Launch Three Review Agents in Parallel
Use the Agent tool to launch all three agents concurrently in a single message.
Pass each agent the full diff so it has complete context.
### Agent 1: Code Reuse Review
For each change:
1. **Search for existing utilities and helpers** that could replace newly written code.
2. **Flag any new function that duplicates existing functionality.**
3. **Flag any inline logic that could use an existing utility** hand-rolled string
manipulation, manual path handling, custom environment checks, etc.
### Agent 2: Code Quality Review
Review the same changes for hacky patterns:
1. **Redundant state** that duplicates existing state.
2. **Parameter sprawl** new parameters instead of restructuring.
3. **Copy-paste with slight variation** that should be unified.
4. **Leaky abstractions** exposing internal details.
5. **Stringly-typed code** where constants or enums already exist.
6. **Unnecessary comments** narrating what code does (not why).
### Agent 3: Efficiency Review
Review the same changes for efficiency:
1. **Unnecessary work** redundant computations, duplicate reads.
2. **Missed concurrency** independent operations run sequentially.
3. **Hot-path bloat** blocking work added to startup or per-request paths.
4. **Recurring no-op updates** unconditional updates in polling loops.
5. **Memory** unbounded data structures, missing cleanup.
## Phase 3: Fix Issues
Wait for all three agents to complete. Aggregate findings and fix each issue.
If a finding is a false positive, note it and move on.
When done, briefly summarize what was fixed (or confirm the code was already clean).
$ARGUMENTS_SUFFIX"#,
allowed_tools: None,
user_invocable: true,
},
// -----------------------------------------------------------------------
// remember
// -----------------------------------------------------------------------
BundledSkill {
name: "remember",
description: "Review auto-memory entries and propose promotions to CLAUDE.md, CLAUDE.local.md, or shared memory.",
aliases: &["mem", "save"],
when_to_use: Some("When the user wants to review, organise, or promote their auto-memory entries."),
argument_hint: Some("[additional context]"),
prompt_template: r#"# Memory Review
## Goal
Review the user's memory landscape and produce a clear report of proposed changes,
grouped by action type. Do NOT apply changes present proposals for user approval.
## Steps
### 1. Gather all memory layers
Read CLAUDE.md and CLAUDE.local.md from the project root (if they exist).
Your auto-memory content is already in your system prompt review it there.
### 2. Classify each auto-memory entry
| Destination | What belongs there |
|---|---|
| **CLAUDE.md** | Project conventions all contributors should follow |
| **CLAUDE.local.md** | Personal instructions specific to this user |
| **Stay in auto-memory** | Working notes, temporary context, uncertain patterns |
### 3. Identify cleanup opportunities
- **Duplicates**: auto-memory entries already in CLAUDE.md propose removing
- **Outdated**: CLAUDE.md entries contradicted by newer auto-memory propose updating
- **Conflicts**: contradictions between layers propose resolution
### 4. Present the report
Output a structured report grouped by: Promotions, Cleanup, Ambiguous, No action needed.
## Rules
- Present ALL proposals before making any changes
- Do NOT modify files without explicit user approval
- Ask about ambiguous entries don't guess
$ARGUMENTS_SUFFIX"#,
allowed_tools: Some(&["Read", "Write", "Edit", "Glob"]),
user_invocable: true,
},
// -----------------------------------------------------------------------
// debug
// -----------------------------------------------------------------------
BundledSkill {
name: "debug",
description: "Enable debug logging for this session and help diagnose issues.",
aliases: &["diagnose"],
when_to_use: Some("When there is an error, bug, or unexpected behaviour to investigate."),
argument_hint: Some("[issue description or error message]"),
prompt_template: r#"# Debug Skill
Help the user debug an issue they are encountering.
## Issue Description
$ARGUMENTS
## Systematic Debugging Approach
1. **Reproduce** Confirm the exact error / behaviour.
2. **Locate** Find the relevant code (read files, grep for error messages).
3. **Hypothesize** Form 23 hypotheses about the root cause.
4. **Test** Verify each hypothesis systematically.
5. **Fix** Implement the fix for the confirmed root cause.
6. **Verify** Confirm the fix resolves the issue.
## Settings Reference
Settings files are in:
- User: ~/.claude/settings.json
- Project: .claude/settings.json
- Local: .claude/settings.local.json
Read the relevant files before making any changes."#,
allowed_tools: Some(&["Read", "Grep", "Glob"]),
user_invocable: true,
},
// -----------------------------------------------------------------------
// stuck
// -----------------------------------------------------------------------
BundledSkill {
name: "stuck",
description: "Help get unstuck when you don't know how to proceed.",
aliases: &["help-me", "unblock"],
when_to_use: Some("When you are stuck, confused, or don't know how to proceed."),
argument_hint: Some("[what you're trying to do]"),
prompt_template: r#"The user is stuck$ARGUMENTS_SUFFIX. Help them get unstuck:
1. Clarify what they are trying to achieve (if unclear).
2. Identify why they might be stuck (missing context, unclear requirements, technical blocker).
3. Suggest 23 concrete next steps in order of likelihood of success.
4. If a technical blocker: propose specific debugging steps or workarounds.
5. Ask clarifying questions if needed.
Be direct and actionable. Focus on unblocking, not on explaining concepts."#,
allowed_tools: None,
user_invocable: true,
},
// -----------------------------------------------------------------------
// batch
// -----------------------------------------------------------------------
BundledSkill {
name: "batch",
description: "Research and plan a large-scale change, then execute it in parallel across isolated worktree agents that each open a PR.",
aliases: &[],
when_to_use: Some("When the user wants to make a sweeping, mechanical change across many files that can be decomposed into independent parallel units."),
argument_hint: Some("<instruction>"),
prompt_template: r#"# Batch: Parallel Work Orchestration
You are orchestrating a large, parallelisable change across this codebase.
## User Instruction
$ARGUMENTS
## Phase 1: Research and Plan (Plan Mode)
Enter plan mode, then:
1. **Understand the scope.** Launch subagents to deeply research what this instruction
touches. Find all files, patterns, and call sites that need to change.
2. **Decompose into independent units.** Break the work into 530 self-contained units.
Each unit must be independently implementable in an isolated git worktree and
mergeable on its own without depending on another unit's PR landing first.
3. **Determine the e2e test recipe.** Figure out how a worker can verify its change
actually works end-to-end. If you cannot find a concrete path, ask the user.
4. **Write the plan.** Include: research summary, numbered work units, e2e recipe,
and the exact worker instructions.
## Phase 2: Spawn Workers (After Plan Approval)
Spawn one background agent per work unit using the Agent tool with
`isolation: "worktree"` and `run_in_background: true`. Launch them all in a single
message block so they run in parallel. Each agent prompt must be fully self-contained.
After each agent finishes, parse the `PR: <url>` line from its result and render
a status table. When all agents have reported, print a final summary."#,
allowed_tools: None,
user_invocable: true,
},
// -----------------------------------------------------------------------
// verify
// -----------------------------------------------------------------------
BundledSkill {
name: "verify",
description: "Verify that code or behaviour is correct.",
aliases: &["check", "validate"],
when_to_use: Some("After implementing something, to verify it is correct."),
argument_hint: Some("[what to verify]"),
prompt_template: r#"# Verify: $ARGUMENTS
## Verification Steps
1. Read the relevant code / implementation.
2. Check against requirements (if specified).
3. Look for edge cases and error conditions.
4. Run tests if available.
5. Check for common pitfalls: null handling, error propagation, type safety.
6. Report: what was verified, what passed, what failed or is uncertain."#,
allowed_tools: None,
user_invocable: true,
},
// -----------------------------------------------------------------------
// update-config
// -----------------------------------------------------------------------
BundledSkill {
name: "update-config",
description: "Configure Claude Code settings (hooks, permissions, env vars, behaviours) via settings.json.",
aliases: &["config-update", "settings"],
when_to_use: Some("When the user wants to configure automated behaviours, permissions, or settings."),
argument_hint: Some("<what to configure>"),
prompt_template: r#"# Update Config Skill
Modify Claude Code configuration by updating settings.json files.
## Settings File Locations
| File | Scope | Use For |
|------|-------|---------|
| `~/.claude/settings.json` | Global | Personal preferences for all projects |
| `.claude/settings.json` | Project | Team-wide hooks, permissions, plugins |
| `.claude/settings.local.json` | Project (local) | Personal overrides for this project |
Settings load in order: user project local (later overrides earlier).
## CRITICAL: Read Before Write
Always read the existing settings file before making changes.
Merge new settings with existing ones never replace the entire file.
## Hook Events
PreToolUse, PostToolUse, PreCompact, PostCompact, Stop, Notification, SessionStart
## User Request
$ARGUMENTS"#,
allowed_tools: Some(&["Read", "Write", "Edit", "Bash"]),
user_invocable: true,
},
// -----------------------------------------------------------------------
// claude-api
// -----------------------------------------------------------------------
BundledSkill {
name: "claude-api",
description: "Build apps with the Claude API or Anthropic SDK.",
aliases: &["api", "anthropic-sdk"],
when_to_use: Some("When the user wants to use the Claude API, Anthropic SDK, or build Claude-powered apps."),
argument_hint: Some("[what to build]"),
prompt_template: r#"# Build a Claude API Integration
## User Request
$ARGUMENTS
## Default Models
- Most capable: claude-opus-4-6
- Balanced: claude-sonnet-4-6
- Fast: claude-haiku-4-5-20251001
## SDK Quickstart
**Python**
```python
pip install anthropic
import anthropic
client = anthropic.Anthropic()
```
**TypeScript / Node**
```typescript
npm install @anthropic-ai/sdk
import Anthropic from '@anthropic-ai/sdk';
const client = new Anthropic();
```
## Key API Features
- Streaming (`stream_message`)
- Tool use / function calling
- Extended thinking
- Prompt caching
- Vision (image input)
- Files API
- Batch processing
Use async/await patterns. Follow SDK best practices."#,
allowed_tools: Some(&["Read", "Grep", "Glob", "WebFetch"]),
user_invocable: true,
},
// -----------------------------------------------------------------------
// loop
// -----------------------------------------------------------------------
BundledSkill {
name: "loop",
description: "Run a prompt or slash command on a recurring interval.",
aliases: &[],
when_to_use: Some("When the user wants to run something repeatedly on a schedule."),
argument_hint: Some("[interval] <command>"),
prompt_template: r#"# /loop — schedule a recurring prompt
Parse the input below into `[interval] <prompt>` and schedule it with CronCreate.
## Parsing (in priority order)
1. **Leading token**: if the first token matches `^\d+[smhd]$` (e.g. `5m`, `2h`), that
is the interval; the rest is the prompt.
2. **Trailing "every" clause**: if the input ends with `every <N><unit>` extract that
as the interval and strip it from the prompt.
3. **Default**: interval is `10m` and the entire input is the prompt.
If the resulting prompt is empty, show usage `/loop [interval] <prompt>` and stop.
## Interval Cron
| Pattern | Cron | Notes |
|---------|------|-------|
| `Nm` (N 59) | `*/N * * * *` | every N minutes |
| `Nh` (N 23) | `0 */N * * *` | every N hours |
| `Nd` | `0 0 */N * *` | every N days at midnight |
| `Ns` | round up to nearest minute | cron min granularity is 1 min |
## Action
1. Call CronCreate with the parsed cron expression and prompt.
2. Confirm what was scheduled, including the cron expression and human-readable cadence.
3. **Immediately execute the parsed prompt now** don't wait for the first cron fire.
## Input
$ARGUMENTS"#,
allowed_tools: Some(&["CronCreate", "CronList"]),
user_invocable: true,
},
];
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/// Find a bundled skill by name or alias (case-insensitive).
pub fn find_bundled_skill(name: &str) -> Option<&'static BundledSkill> {
let lower = name.to_lowercase();
BUNDLED_SKILLS.iter().find(|s| {
s.name == lower || s.aliases.iter().any(|a| *a == lower)
})
}
/// Return `(name, description)` pairs for all user-invocable bundled skills.
pub fn user_invocable_skills() -> Vec<(&'static str, &'static str)> {
BUNDLED_SKILLS
.iter()
.filter(|s| s.user_invocable)
.map(|s| (s.name, s.description))
.collect()
}
/// Expand a skill's prompt template, substituting `$ARGUMENTS` and
/// `$ARGUMENTS_SUFFIX`.
///
/// - `$ARGUMENTS` → replaced by `args` verbatim (or `""` when empty)
/// - `$ARGUMENTS_SUFFIX` → replaced by `": <args>"` when non-empty, else `""`
pub fn expand_prompt(skill: &BundledSkill, args: &str) -> String {
let suffix = if args.is_empty() {
String::new()
} else {
format!(": {}", args)
};
skill
.prompt_template
.replace("$ARGUMENTS_SUFFIX", &suffix)
.replace("$ARGUMENTS", args)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn all_skills_have_non_empty_names() {
for s in BUNDLED_SKILLS {
assert!(!s.name.is_empty(), "skill has empty name");
}
}
#[test]
fn all_skills_have_non_empty_descriptions() {
for s in BUNDLED_SKILLS {
assert!(
!s.description.is_empty(),
"skill '{}' has empty description",
s.name
);
}
}
#[test]
fn all_skills_have_non_empty_prompt_templates() {
for s in BUNDLED_SKILLS {
assert!(
!s.prompt_template.is_empty(),
"skill '{}' has empty prompt_template",
s.name
);
}
}
#[test]
fn skill_names_are_unique() {
let mut seen = std::collections::HashSet::new();
for s in BUNDLED_SKILLS {
assert!(
seen.insert(s.name),
"duplicate skill name: {}",
s.name
);
}
}
#[test]
fn find_by_primary_name() {
let skill = find_bundled_skill("simplify");
assert!(skill.is_some());
assert_eq!(skill.unwrap().name, "simplify");
}
#[test]
fn find_by_alias() {
let skill = find_bundled_skill("mem");
assert!(skill.is_some());
assert_eq!(skill.unwrap().name, "remember");
}
#[test]
fn find_case_insensitive() {
assert!(find_bundled_skill("SIMPLIFY").is_some());
assert!(find_bundled_skill("Debug").is_some());
}
#[test]
fn find_missing_returns_none() {
assert!(find_bundled_skill("nonexistent-skill-xyz").is_none());
}
#[test]
fn expand_prompt_substitutes_arguments() {
let skill = find_bundled_skill("debug").unwrap();
let expanded = expand_prompt(skill, "NullPointerException in Foo.java");
assert!(expanded.contains("NullPointerException in Foo.java"));
assert!(!expanded.contains("$ARGUMENTS"));
}
#[test]
fn expand_prompt_empty_args_no_residual_placeholder() {
let skill = find_bundled_skill("simplify").unwrap();
let expanded = expand_prompt(skill, "");
assert!(!expanded.contains("$ARGUMENTS"));
assert!(!expanded.contains("$ARGUMENTS_SUFFIX"));
}
#[test]
fn expand_prompt_suffix_non_empty() {
let skill = find_bundled_skill("stuck").unwrap();
let expanded = expand_prompt(skill, "trying to run tests");
// Should contain ": trying to run tests" from $ARGUMENTS_SUFFIX
assert!(expanded.contains(": trying to run tests"));
}
#[test]
fn expand_prompt_suffix_empty() {
let skill = find_bundled_skill("stuck").unwrap();
let expanded = expand_prompt(skill, "");
// $ARGUMENTS_SUFFIX should be "" (not ": ")
assert!(!expanded.contains(": "));
}
#[test]
fn user_invocable_skills_non_empty() {
let skills = user_invocable_skills();
assert!(!skills.is_empty());
}
#[test]
fn user_invocable_skills_all_marked_true() {
for (name, _) in user_invocable_skills() {
let skill = find_bundled_skill(name).unwrap();
assert!(
skill.user_invocable,
"skill '{}' returned by user_invocable_skills() but user_invocable=false",
name
);
}
}
}

View file

@ -0,0 +1,199 @@
// ConfigTool: get or set Claude Code configuration settings at runtime.
//
// Reads from and persists to ~/.claude/settings.json.
// Supported settings: model, max_tokens, verbose, permission_mode.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
pub struct ConfigTool;
#[derive(Debug, Deserialize)]
struct ConfigInput {
setting: String,
value: Option<Value>,
}
static SUPPORTED_SETTINGS: &[(&str, &str)] = &[
("model", "LLM model to use (e.g. 'claude-opus-4-6')"),
("max_tokens", "Maximum output tokens per response"),
("verbose", "Enable verbose logging (true/false)"),
("permission_mode", "Permission mode: default | accept_edits | bypass_permissions | plan"),
("auto_compact", "Auto-compact conversation when context fills (true/false)"),
];
#[async_trait]
impl Tool for ConfigTool {
fn name(&self) -> &str { "Config" }
fn description(&self) -> &str {
"Get or set Claude Code configuration settings. Omit 'value' to read the current value. \
Supported settings: model, max_tokens, verbose, permission_mode, auto_compact. \
Changes persist to ~/.claude/settings.json."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::Write }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"setting": {
"type": "string",
"description": "Setting key (e.g. 'model', 'verbose', 'max_tokens', 'permission_mode')"
},
"value": {
"description": "New value to set. Omit to read the current value."
}
},
"required": ["setting"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: ConfigInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let key = params.setting.trim();
// List all supported settings
if key == "list" || key == "help" {
let lines: Vec<String> = SUPPORTED_SETTINGS
.iter()
.map(|(k, d)| format!(" {}{}", k, d))
.collect();
return ToolResult::success(format!(
"Supported settings:\n{}",
lines.join("\n")
));
}
// Load current settings
let mut settings = match cc_core::config::Settings::load().await {
Ok(s) => s,
Err(e) => return ToolResult::error(format!("Failed to load settings: {}", e)),
};
if let Some(new_value) = params.value {
// SET operation
match key {
"model" => {
let s = match new_value.as_str() {
Some(s) => s.to_string(),
None => return ToolResult::error("'model' must be a string".to_string()),
};
settings.config.model = Some(s.clone());
if let Err(e) = settings.save().await {
return ToolResult::error(format!("Failed to save settings: {}", e));
}
ToolResult::success(format!("model = \"{}\"", s))
}
"max_tokens" => {
let n = match new_value.as_u64() {
Some(n) => n as u32,
None => return ToolResult::error("'max_tokens' must be a positive integer".to_string()),
};
settings.config.max_tokens = Some(n);
if let Err(e) = settings.save().await {
return ToolResult::error(format!("Failed to save settings: {}", e));
}
ToolResult::success(format!("max_tokens = {}", n))
}
"verbose" => {
let b = match new_value.as_bool() {
Some(b) => b,
None => return ToolResult::error("'verbose' must be true or false".to_string()),
};
settings.config.verbose = b;
if let Err(e) = settings.save().await {
return ToolResult::error(format!("Failed to save settings: {}", e));
}
ToolResult::success(format!("verbose = {}", b))
}
"auto_compact" => {
let b = match new_value.as_bool() {
Some(b) => b,
None => return ToolResult::error("'auto_compact' must be true or false".to_string()),
};
settings.config.auto_compact = b;
if let Err(e) = settings.save().await {
return ToolResult::error(format!("Failed to save settings: {}", e));
}
ToolResult::success(format!("auto_compact = {}", b))
}
"permission_mode" => {
use cc_core::config::PermissionMode;
let s = match new_value.as_str() {
Some(s) => s,
None => return ToolResult::error("'permission_mode' must be a string".to_string()),
};
let mode = match s {
"default" => PermissionMode::Default,
"accept_edits" | "acceptEdits" => PermissionMode::AcceptEdits,
"bypass_permissions" | "bypassPermissions" => {
PermissionMode::BypassPermissions
}
"plan" => PermissionMode::Plan,
_ => {
return ToolResult::error(format!(
"Unknown permission_mode '{}'. Use: default | accept_edits | bypass_permissions | plan",
s
))
}
};
settings.config.permission_mode = mode;
if let Err(e) = settings.save().await {
return ToolResult::error(format!("Failed to save settings: {}", e));
}
ToolResult::success(format!("permission_mode = \"{}\"", s))
}
_ => ToolResult::error(format!(
"Unknown setting '{}'. Use setting='list' to see all supported settings.",
key
)),
}
} else {
// GET operation
match key {
"model" => ToolResult::success(format!(
"model = \"{}\"",
settings.config.effective_model()
)),
"max_tokens" => ToolResult::success(format!(
"max_tokens = {}",
settings.config.effective_max_tokens()
)),
"verbose" => ToolResult::success(format!(
"verbose = {}",
settings.config.verbose
)),
"auto_compact" => ToolResult::success(format!(
"auto_compact = {}",
settings.config.auto_compact
)),
"permission_mode" => ToolResult::success(format!(
"permission_mode = \"{}\"",
permission_mode_str(&settings.config.permission_mode)
)),
_ => ToolResult::error(format!(
"Unknown setting '{}'. Use setting='list' to see all supported settings.",
key
)),
}
}
}
}
fn permission_mode_str(mode: &cc_core::config::PermissionMode) -> &'static str {
use cc_core::config::PermissionMode;
match mode {
PermissionMode::Default => "default",
PermissionMode::AcceptEdits => "accept_edits",
PermissionMode::BypassPermissions => "bypass_permissions",
PermissionMode::Plan => "plan",
}
}

View file

@ -0,0 +1,435 @@
// Cron tools: schedule recurring and one-shot prompts.
//
// CronCreateTool create a new scheduled task (cron expression)
// CronDeleteTool remove an existing scheduled task
// CronListTool list all scheduled tasks
//
// Scheduled tasks are stored in a global in-memory store (session-only).
// Optionally persisted to `.claude/scheduled_tasks.json` (durable mode).
//
// Cron expression format: "M H DoM Mon DoW" (standard 5-field cron in local
// time). For example:
// "*/5 * * * *" = every 5 minutes
// "30 14 * * 1" = every Monday at 14:30
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use chrono::{DateTime, Datelike, Local, Timelike};
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::debug;
use uuid::Uuid;
// ---------------------------------------------------------------------------
// In-memory store
// ---------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CronTask {
pub id: String,
pub cron: String,
pub prompt: String,
pub recurring: bool,
pub durable: bool,
pub created_at: u64,
}
static CRON_STORE: Lazy<Arc<RwLock<HashMap<String, CronTask>>>> =
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
// ---------------------------------------------------------------------------
// Public scheduler API (used by cc-query cron_scheduler)
// ---------------------------------------------------------------------------
/// Check if a cron expression fires at the given minute-resolution datetime.
pub fn cron_matches(expr: &str, dt: &DateTime<Local>) -> bool {
let fields: Vec<&str> = expr.split_whitespace().collect();
if fields.len() != 5 {
return false;
}
let minute = dt.minute();
let hour = dt.hour();
let day = dt.day();
let month = dt.month();
let dow = dt.weekday().num_days_from_sunday(); // 0=Sun .. 6=Sat
cron_field_matches(fields[0], minute)
&& cron_field_matches(fields[1], hour)
&& cron_field_matches(fields[2], day)
&& cron_field_matches(fields[3], month)
&& cron_field_matches(fields[4], dow)
}
fn cron_field_matches(field: &str, value: u32) -> bool {
if field == "*" {
return true;
}
// */N step
if let Some(step_str) = field.strip_prefix("*/") {
if let Ok(step) = step_str.parse::<u32>() {
return step > 0 && value % step == 0;
}
}
// Comma-separated list of values or ranges
for part in field.split(',') {
if cron_range_matches(part, value) {
return true;
}
}
false
}
fn cron_range_matches(part: &str, value: u32) -> bool {
if let Some(dash) = part.find('-') {
let lo: u32 = part[..dash].parse().unwrap_or(u32::MAX);
let hi: u32 = part[dash + 1..].parse().unwrap_or(0);
value >= lo && value <= hi
} else {
part.parse::<u32>()
.map_or(false, |n| n == value || (n == 7 && value == 0)) // 7 = Sunday alias
}
}
/// Return all tasks whose cron expression fires at `dt`.
/// One-shot tasks (recurring=false) are removed from the store after being returned.
pub async fn pop_due_tasks(dt: &DateTime<Local>) -> Vec<CronTask> {
let mut store = CRON_STORE.write().await;
let due: Vec<CronTask> = store
.values()
.filter(|t| cron_matches(&t.cron, dt))
.cloned()
.collect();
for t in &due {
if !t.recurring {
store.remove(&t.id);
}
}
due
}
// ---------------------------------------------------------------------------
// Simple cron expression parser (5-field)
// ---------------------------------------------------------------------------
/// Validate that a 5-field cron expression is syntactically correct.
fn validate_cron(expr: &str) -> bool {
let fields: Vec<&str> = expr.split_whitespace().collect();
if fields.len() != 5 {
return false;
}
// Check each field: ranges for M(0-59), H(0-23), DoM(1-31), Mon(1-12), DoW(0-7)
let ranges = [(0u32, 59), (0, 23), (1, 31), (1, 12), (0, 7)];
for (i, field) in fields.iter().enumerate() {
if *field == "*" {
continue;
}
// Handle */N (step)
if let Some(step) = field.strip_prefix("*/") {
if step.parse::<u32>().is_err() {
return false;
}
continue;
}
// Handle N-M (range) or N
let parts: Vec<&str> = field.split('-').collect();
for part in &parts {
match part.parse::<u32>() {
Ok(n) => {
if n < ranges[i].0 || n > ranges[i].1 {
return false;
}
}
Err(_) => return false,
}
}
}
true
}
/// Convert a cron expression to a human-readable description.
fn cron_to_human(expr: &str) -> String {
let fields: Vec<&str> = expr.split_whitespace().collect();
if fields.len() != 5 {
return expr.to_string();
}
let (minute, hour, dom, month, dow) = (fields[0], fields[1], fields[2], fields[3], fields[4]);
if expr == "* * * * *" {
return "every minute".to_string();
}
if minute.starts_with("*/") {
let n = &minute[2..];
return format!("every {} minutes", n);
}
if hour == "*" && dom == "*" && month == "*" && dow == "*" {
return format!("at minute {} of every hour", minute);
}
if dom == "*" && month == "*" && dow == "*" {
return format!("daily at {:0>2}:{:0>2}", hour, minute);
}
// Fallback: return the raw expression
format!("cron({})", expr)
}
// ---------------------------------------------------------------------------
// CronCreate
// ---------------------------------------------------------------------------
pub struct CronCreateTool;
#[derive(Debug, Deserialize)]
struct CronCreateInput {
cron: String,
prompt: String,
#[serde(default = "default_true")]
recurring: bool,
#[serde(default)]
durable: bool,
}
fn default_true() -> bool { true }
#[async_trait]
impl Tool for CronCreateTool {
fn name(&self) -> &str { "CronCreate" }
fn description(&self) -> &str {
"Schedule a recurring or one-shot prompt using a standard 5-field cron expression \
in local time: \"M H DoM Mon DoW\". Examples:\n\
- \"*/5 * * * *\" = every 5 minutes\n\
- \"30 14 * * 1\" = every Monday at 14:30\n\
- \"0 9 15 * *\" = 15th of each month at 09:00\n\
Use recurring=false for one-shot (fires once then auto-deletes).\n\
Use durable=true to persist across sessions."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"cron": {
"type": "string",
"description": "5-field cron expression: M H DoM Mon DoW"
},
"prompt": {
"type": "string",
"description": "The prompt to run at each scheduled time"
},
"recurring": {
"type": "boolean",
"description": "true (default) = repeat on every match; false = fire once then delete"
},
"durable": {
"type": "boolean",
"description": "true = persist to .claude/scheduled_tasks.json; false (default) = session only"
}
},
"required": ["cron", "prompt"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: CronCreateInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
if !validate_cron(&params.cron) {
return ToolResult::error(format!(
"Invalid cron expression '{}'. Expected 5 fields: M H DoM Mon DoW.",
params.cron
));
}
let mut store = CRON_STORE.write().await;
if store.len() >= 50 {
return ToolResult::error("Too many scheduled jobs (max 50). Cancel one first.".to_string());
}
let id = Uuid::new_v4().to_string()[..8].to_string();
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let task = CronTask {
id: id.clone(),
cron: params.cron.clone(),
prompt: params.prompt.clone(),
recurring: params.recurring,
durable: params.durable,
created_at: now,
};
// Optionally persist to disk
if params.durable {
if let Err(e) = persist_tasks_to_disk(&store, ctx).await {
debug!("Failed to persist cron task to disk: {}", e);
}
}
store.insert(id.clone(), task);
let human = cron_to_human(&params.cron);
let where_note = if params.durable {
"Persisted to .claude/scheduled_tasks.json"
} else {
"Session-only (dies when Claude exits)"
};
let msg = if params.recurring {
format!(
"Scheduled recurring job {} ({}). {}",
id, human, where_note
)
} else {
format!(
"Scheduled one-shot task {} ({}). {}. Will fire once then auto-delete.",
id, human, where_note
)
};
ToolResult::success(msg)
}
}
// ---------------------------------------------------------------------------
// CronDelete
// ---------------------------------------------------------------------------
pub struct CronDeleteTool;
#[derive(Debug, Deserialize)]
struct CronDeleteInput {
id: String,
}
#[async_trait]
impl Tool for CronDeleteTool {
fn name(&self) -> &str { "CronDelete" }
fn description(&self) -> &str {
"Cancel a scheduled cron task by its ID. Use CronList to find the ID."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "The cron task ID to delete"
}
},
"required": ["id"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: CronDeleteInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let mut store = CRON_STORE.write().await;
if store.remove(&params.id).is_some() {
ToolResult::success(format!("Deleted cron task '{}'.", params.id))
} else {
ToolResult::error(format!("Cron task '{}' not found.", params.id))
}
}
}
// ---------------------------------------------------------------------------
// CronList
// ---------------------------------------------------------------------------
pub struct CronListTool;
#[async_trait]
impl Tool for CronListTool {
fn name(&self) -> &str { "CronList" }
fn description(&self) -> &str {
"List all currently scheduled cron tasks."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {}
})
}
async fn execute(&self, _input: Value, _ctx: &ToolContext) -> ToolResult {
let store = CRON_STORE.read().await;
if store.is_empty() {
return ToolResult::success("No scheduled cron tasks.".to_string());
}
let mut tasks: Vec<&CronTask> = store.values().collect();
tasks.sort_by_key(|t| t.created_at);
let lines: Vec<String> = tasks
.iter()
.map(|t| {
format!(
"{} | {} | {} | recurring={} | durable={} | prompt: {}",
t.id,
t.cron,
cron_to_human(&t.cron),
t.recurring,
t.durable,
if t.prompt.len() > 60 {
format!("{}", &t.prompt[..60])
} else {
t.prompt.clone()
}
)
})
.collect();
ToolResult::success(format!(
"Scheduled tasks ({}):\n\n{}",
tasks.len(),
lines.join("\n")
))
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/// Persist all durable tasks to `.claude/scheduled_tasks.json`.
async fn persist_tasks_to_disk(
store: &HashMap<String, CronTask>,
ctx: &ToolContext,
) -> Result<(), String> {
let durable: Vec<&CronTask> = store.values().filter(|t| t.durable).collect();
let json = serde_json::to_string_pretty(&durable)
.map_err(|e| e.to_string())?;
let dir = ctx.working_dir.join(".claude");
tokio::fs::create_dir_all(&dir)
.await
.map_err(|e| e.to_string())?;
tokio::fs::write(dir.join("scheduled_tasks.json"), json)
.await
.map_err(|e| e.to_string())?;
Ok(())
}

View file

@ -0,0 +1,64 @@
// EnterPlanMode tool: switch the session into planning (read-only) mode.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct EnterPlanModeTool;
#[derive(Debug, Deserialize)]
struct EnterPlanModeInput {
#[serde(default)]
reason: Option<String>,
}
#[async_trait]
impl Tool for EnterPlanModeTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_ENTER_PLAN_MODE
}
fn description(&self) -> &str {
"Enter plan mode. In plan mode, the assistant can only read files and \
think, but cannot execute commands or write files. Use this to step back \
and plan a complex change before implementing it."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::None
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"reason": {
"type": "string",
"description": "Why you want to enter plan mode"
}
},
"required": []
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: EnterPlanModeInput = serde_json::from_value(input).unwrap_or(EnterPlanModeInput {
reason: None,
});
debug!(reason = ?params.reason, "Entering plan mode");
let msg = if let Some(reason) = &params.reason {
format!("Entered plan mode: {}", reason)
} else {
"Entered plan mode. Only read-only operations are allowed.".to_string()
};
ToolResult::success(msg).with_metadata(json!({
"type": "enter_plan_mode",
"reason": params.reason,
}))
}
}

View file

@ -0,0 +1,63 @@
// ExitPlanMode tool: leave planning mode and return to normal execution.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct ExitPlanModeTool;
#[derive(Debug, Deserialize)]
struct ExitPlanModeInput {
#[serde(default)]
summary: Option<String>,
}
#[async_trait]
impl Tool for ExitPlanModeTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_EXIT_PLAN_MODE
}
fn description(&self) -> &str {
"Exit plan mode and return to normal execution mode where all tools \
are available. Optionally provide a summary of the plan."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::None
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"summary": {
"type": "string",
"description": "Summary of the plan you developed"
}
},
"required": []
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: ExitPlanModeInput = serde_json::from_value(input).unwrap_or(ExitPlanModeInput {
summary: None,
});
debug!(summary = ?params.summary, "Exiting plan mode");
let msg = if let Some(summary) = &params.summary {
format!("Exited plan mode. Plan summary: {}", summary)
} else {
"Exited plan mode. All tools are now available.".to_string()
};
ToolResult::success(msg).with_metadata(json!({
"type": "exit_plan_mode",
"summary": params.summary,
}))
}
}

View file

@ -0,0 +1,152 @@
// FileEdit tool: exact string replacement with old/new strings (like sed but
// deterministic). Mirrors the TypeScript Edit tool behaviour.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct FileEditTool;
#[derive(Debug, Deserialize)]
struct FileEditInput {
file_path: String,
old_string: String,
new_string: String,
#[serde(default)]
replace_all: bool,
}
#[async_trait]
impl Tool for FileEditTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_FILE_EDIT
}
fn description(&self) -> &str {
"Performs exact string replacements in files. The edit will FAIL if \
`old_string` is not unique in the file (unless `replace_all` is true). \
You MUST read the file first before editing. Preserve the exact \
indentation as it appears in the file."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::Write
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "The absolute path to the file to modify"
},
"old_string": {
"type": "string",
"description": "The text to replace (must be unique in the file unless replace_all is true)"
},
"new_string": {
"type": "string",
"description": "The text to replace it with (must be different from old_string)"
},
"replace_all": {
"type": "boolean",
"description": "Replace all occurrences of old_string (default false)"
}
},
"required": ["file_path", "old_string", "new_string"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: FileEditInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
// Validate old != new
if params.old_string == params.new_string {
return ToolResult::error(
"old_string and new_string must be different".to_string(),
);
}
let path = ctx.resolve_path(&params.file_path);
debug!(path = %path.display(), "Editing file");
// Permission check
if let Err(e) = ctx.check_permission(
self.name(),
&format!("Edit {}", path.display()),
false,
) {
return ToolResult::error(e.to_string());
}
// Read current content
let content = match tokio::fs::read_to_string(&path).await {
Ok(c) => c,
Err(e) => {
return ToolResult::error(format!(
"Failed to read file {}: {}",
path.display(),
e
))
}
};
// Count occurrences
let count = content.matches(&params.old_string).count();
if count == 0 {
return ToolResult::error(format!(
"old_string not found in {}. Make sure the string matches exactly, \
including whitespace and indentation.",
path.display()
));
}
if count > 1 && !params.replace_all {
return ToolResult::error(format!(
"old_string appears {} times in {}. Either provide a larger string \
with more surrounding context to make it unique, or set replace_all \
to true to replace every occurrence.",
count,
path.display()
));
}
// Perform replacement
let new_content = if params.replace_all {
content.replace(&params.old_string, &params.new_string)
} else {
// Replace only the first occurrence
content.replacen(&params.old_string, &params.new_string, 1)
};
// Write back
if let Err(e) = tokio::fs::write(&path, &new_content).await {
return ToolResult::error(format!(
"Failed to write file {}: {}",
path.display(),
e
));
}
// Build a diff snippet for the response
let replacements = if params.replace_all { count } else { 1 };
let msg = format!(
"Successfully edited {} ({} replacement{}).",
path.display(),
replacements,
if replacements != 1 { "s" } else { "" }
);
ToolResult::success(msg).with_metadata(json!({
"file_path": path.display().to_string(),
"replacements": replacements,
}))
}
}

View file

@ -0,0 +1,161 @@
// FileRead tool: read files with optional line range, image support, PDF page ranges.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct FileReadTool;
#[derive(Debug, Deserialize)]
struct FileReadInput {
file_path: String,
#[serde(default)]
offset: Option<usize>,
#[serde(default)]
limit: Option<usize>,
}
#[async_trait]
impl Tool for FileReadTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_FILE_READ
}
fn description(&self) -> &str {
"Reads a file from the local filesystem. You can access any file directly. \
By default reads up to 2000 lines from the beginning. Results are returned \
with line numbers starting at 1. This tool can read images (PNG, JPG) and \
PDF files."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::ReadOnly
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "The absolute path to the file to read"
},
"offset": {
"type": "number",
"description": "The line number to start reading from (1-based). Only provide if the file is too large to read at once."
},
"limit": {
"type": "number",
"description": "The number of lines to read. Only provide if the file is too large to read at once."
}
},
"required": ["file_path"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: FileReadInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let path = ctx.resolve_path(&params.file_path);
debug!(path = %path.display(), "Reading file");
// Check if file exists
if !path.exists() {
return ToolResult::error(format!("File not found: {}", path.display()));
}
// Check if it's a directory
if path.is_dir() {
return ToolResult::error(format!(
"{} is a directory, not a file. Use Bash with `ls` to list directory contents.",
path.display()
));
}
// Detect binary / image files by extension
let ext = path
.extension()
.and_then(|e| e.to_str())
.unwrap_or("")
.to_lowercase();
let image_exts = ["png", "jpg", "jpeg", "gif", "bmp", "webp", "svg", "ico"];
if image_exts.contains(&ext.as_str()) {
return ToolResult::success(format!(
"[Image file: {}. The image content has been captured for visual analysis.]",
path.display()
));
}
if ext == "pdf" {
return ToolResult::success(format!(
"[PDF file: {}. Use the `pages` parameter to read specific page ranges.]",
path.display()
));
}
// Read text file
let content = match tokio::fs::read_to_string(&path).await {
Ok(c) => c,
Err(e) => {
// Might be binary
if e.kind() == std::io::ErrorKind::InvalidData {
return ToolResult::error(format!(
"File appears to be binary and cannot be displayed as text: {}",
path.display()
));
}
return ToolResult::error(format!("Failed to read file: {}", e));
}
};
if content.is_empty() {
return ToolResult::success(format!(
"[File {} exists but is empty]",
path.display()
));
}
let lines: Vec<&str> = content.lines().collect();
let total_lines = lines.len();
let offset = params.offset.unwrap_or(0);
let limit = params.limit.unwrap_or(2000);
// Convert 1-based offset to 0-based index
let start = if offset > 0 { offset - 1 } else { 0 };
let end = (start + limit).min(total_lines);
if start >= total_lines {
return ToolResult::error(format!(
"Offset {} exceeds total line count {} in {}",
offset,
total_lines,
path.display()
));
}
let mut output = String::new();
let width = format!("{}", end).len();
for (i, line) in lines[start..end].iter().enumerate() {
let line_num = start + i + 1;
output.push_str(&format!("{:>width$}\t{}\n", line_num, line, width = width));
}
if end < total_lines {
output.push_str(&format!(
"\n... ({} more lines, {} total. Use offset/limit to read more.)\n",
total_lines - end,
total_lines
));
}
ToolResult::success(output)
}
}

View file

@ -0,0 +1,110 @@
// FileWrite tool: write/create files.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct FileWriteTool;
#[derive(Debug, Deserialize)]
struct FileWriteInput {
file_path: String,
content: String,
}
#[async_trait]
impl Tool for FileWriteTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_FILE_WRITE
}
fn description(&self) -> &str {
"Writes a file to the local filesystem. This tool will overwrite the existing \
file if there is one. Prefer the Edit tool for modifying existing files. \
Only use this tool to create new files or for complete rewrites."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::Write
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "The absolute path to the file to write"
},
"content": {
"type": "string",
"description": "The content to write to the file"
}
},
"required": ["file_path", "content"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: FileWriteInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let path = ctx.resolve_path(&params.file_path);
debug!(path = %path.display(), "Writing file");
// Permission check
if let Err(e) = ctx.check_permission(
self.name(),
&format!("Write {}", path.display()),
false,
) {
return ToolResult::error(e.to_string());
}
// Ensure parent directories exist
if let Some(parent) = path.parent() {
if !parent.exists() {
if let Err(e) = tokio::fs::create_dir_all(parent).await {
return ToolResult::error(format!(
"Failed to create directory {}: {}",
parent.display(),
e
));
}
}
}
let is_new = !path.exists();
// Write the file
if let Err(e) = tokio::fs::write(&path, &params.content).await {
return ToolResult::error(format!(
"Failed to write file {}: {}",
path.display(),
e
));
}
let line_count = params.content.lines().count();
let byte_count = params.content.len();
let action = if is_new { "Created" } else { "Wrote" };
ToolResult::success(format!(
"{} {} ({} lines, {} bytes)",
action,
path.display(),
line_count,
byte_count
))
.with_metadata(json!({
"file_path": path.display().to_string(),
"is_new": is_new,
"lines": line_count,
"bytes": byte_count,
}))
}
}

View file

@ -0,0 +1,127 @@
// Glob tool: fast file pattern matching.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use std::path::PathBuf;
use tracing::debug;
pub struct GlobTool;
#[derive(Debug, Deserialize)]
struct GlobInput {
pattern: String,
#[serde(default)]
path: Option<String>,
}
#[async_trait]
impl Tool for GlobTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_GLOB
}
fn description(&self) -> &str {
"Fast file pattern matching tool that works with any codebase size. \
Supports glob patterns like \"**/*.rs\" or \"src/**/*.ts\". Returns \
matching file paths sorted by modification time. Use this tool when \
you need to find files by name patterns."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::ReadOnly
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"pattern": {
"type": "string",
"description": "The glob pattern to match files against"
},
"path": {
"type": "string",
"description": "The directory to search in. Defaults to working directory."
}
},
"required": ["pattern"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: GlobInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let base_dir = params
.path
.as_ref()
.map(|p| ctx.resolve_path(p))
.unwrap_or_else(|| ctx.working_dir.clone());
debug!(pattern = %params.pattern, dir = %base_dir.display(), "Running glob");
if !base_dir.exists() || !base_dir.is_dir() {
return ToolResult::error(format!(
"Directory not found: {}",
base_dir.display()
));
}
// Build the full glob pattern
let full_pattern = base_dir.join(&params.pattern);
let pattern_str = full_pattern.to_string_lossy().to_string();
// On Windows, normalize backslashes to forward slashes for the glob crate
let pattern_str = pattern_str.replace('\\', "/");
let entries: Vec<PathBuf> = match glob::glob(&pattern_str) {
Ok(paths) => paths.filter_map(|p| p.ok()).collect(),
Err(e) => {
return ToolResult::error(format!("Invalid glob pattern: {}", e));
}
};
if entries.is_empty() {
return ToolResult::success(format!(
"No files matched pattern \"{}\" in {}",
params.pattern,
base_dir.display()
));
}
// Sort by modification time (most recent first) — fall back to name sort
let mut entries_with_time: Vec<(PathBuf, std::time::SystemTime)> = entries
.into_iter()
.filter_map(|p| {
let mtime = std::fs::metadata(&p).ok()?.modified().ok()?;
Some((p, mtime))
})
.collect();
entries_with_time.sort_by(|a, b| b.1.cmp(&a.1));
let total = entries_with_time.len();
let max_results = 250;
let truncated = total > max_results;
let mut output = String::new();
for (path, _) in entries_with_time.iter().take(max_results) {
output.push_str(&path.display().to_string());
output.push('\n');
}
if truncated {
output.push_str(&format!(
"\n... and {} more files (showing first {})\n",
total - max_results,
max_results,
));
}
ToolResult::success(output)
}
}

View file

@ -0,0 +1,364 @@
// Grep tool: content search with ripgrep-style options.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use regex::RegexBuilder;
use serde::Deserialize;
use serde_json::{json, Value};
use std::path::PathBuf;
use tracing::debug;
use walkdir::WalkDir;
pub struct GrepTool;
#[derive(Debug, Deserialize)]
struct GrepInput {
pattern: String,
#[serde(default)]
path: Option<String>,
#[serde(default, rename = "type")]
file_type: Option<String>,
#[serde(default)]
glob: Option<String>,
#[serde(default = "default_output_mode")]
output_mode: String,
#[serde(default)]
context: Option<usize>,
#[serde(default, rename = "-i")]
case_insensitive: bool,
#[serde(default, rename = "-n")]
show_line_numbers: Option<bool>,
#[serde(default)]
head_limit: Option<usize>,
#[serde(default)]
multiline: bool,
}
fn default_output_mode() -> String {
"files_with_matches".to_string()
}
/// Map file type shorthand to extensions (similar to ripgrep --type).
fn extensions_for_type(t: &str) -> Vec<&'static str> {
match t {
"rust" | "rs" => vec!["rs"],
"js" => vec!["js", "jsx", "mjs", "cjs"],
"ts" => vec!["ts", "tsx", "mts", "cts"],
"py" | "python" => vec!["py", "pyi"],
"go" => vec!["go"],
"java" => vec!["java"],
"c" => vec!["c", "h"],
"cpp" => vec!["cpp", "hpp", "cc", "hh", "cxx"],
"rb" | "ruby" => vec!["rb"],
"php" => vec!["php"],
"swift" => vec!["swift"],
"kt" | "kotlin" => vec!["kt", "kts"],
"css" => vec!["css", "scss", "sass", "less"],
"html" => vec!["html", "htm"],
"json" => vec!["json"],
"yaml" | "yml" => vec!["yaml", "yml"],
"toml" => vec!["toml"],
"xml" => vec!["xml"],
"md" | "markdown" => vec!["md", "markdown"],
"sh" | "shell" | "bash" => vec!["sh", "bash", "zsh"],
_ => vec![],
}
}
#[async_trait]
impl Tool for GrepTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_GREP
}
fn description(&self) -> &str {
"A powerful search tool built on regex. Supports full regex syntax. \
Filter files with the `glob` parameter or `type` parameter. Output \
modes: \"content\" shows matching lines, \"files_with_matches\" shows \
only file paths (default), \"count\" shows match counts."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::ReadOnly
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"pattern": {
"type": "string",
"description": "The regular expression pattern to search for"
},
"path": {
"type": "string",
"description": "File or directory to search in. Defaults to working directory."
},
"type": {
"type": "string",
"description": "File type to search (e.g. js, py, rust, go)"
},
"glob": {
"type": "string",
"description": "Glob pattern to filter files (e.g. \"*.js\")"
},
"output_mode": {
"type": "string",
"enum": ["content", "files_with_matches", "count"],
"description": "Output mode (default: files_with_matches)"
},
"context": {
"type": "number",
"description": "Number of context lines before and after each match"
},
"-i": {
"type": "boolean",
"description": "Case insensitive search"
},
"-n": {
"type": "boolean",
"description": "Show line numbers (for content mode)"
},
"head_limit": {
"type": "number",
"description": "Limit output to first N entries (default 250)"
},
"multiline": {
"type": "boolean",
"description": "Enable multiline mode where . matches newlines"
}
},
"required": ["pattern"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: GrepInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let search_path = params
.path
.as_ref()
.map(|p| ctx.resolve_path(p))
.unwrap_or_else(|| ctx.working_dir.clone());
debug!(pattern = %params.pattern, path = %search_path.display(), "Running grep");
// Compile regex
let regex = match RegexBuilder::new(&params.pattern)
.case_insensitive(params.case_insensitive)
.dot_matches_new_line(params.multiline)
.multi_line(params.multiline)
.build()
{
Ok(r) => r,
Err(e) => return ToolResult::error(format!("Invalid regex: {}", e)),
};
let head_limit = params.head_limit.unwrap_or(250);
let context_lines = params.context.unwrap_or(0);
let show_line_numbers = params.show_line_numbers.unwrap_or(true);
// Collect candidate file extensions
let type_exts: Vec<&str> = params
.file_type
.as_deref()
.map(extensions_for_type)
.unwrap_or_default();
// Build glob matcher for filtering
let glob_pattern = params.glob.as_deref();
// If the search path is a single file, just search it.
if search_path.is_file() {
return self.search_file(
&search_path,
&regex,
&params.output_mode,
context_lines,
show_line_numbers,
);
}
// Walk directory tree
let mut results: Vec<String> = Vec::new();
let mut match_count = 0usize;
for entry in WalkDir::new(&search_path)
.follow_links(true)
.into_iter()
.filter_entry(|e| {
// Skip hidden directories
let name = e.file_name().to_string_lossy();
!name.starts_with('.')
&& name != "node_modules"
&& name != "target"
&& name != "__pycache__"
&& name != ".git"
})
{
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
if !entry.file_type().is_file() {
continue;
}
let path = entry.path();
// Type filter
if !type_exts.is_empty() {
let ext = path
.extension()
.and_then(|e| e.to_str())
.unwrap_or("");
if !type_exts.contains(&ext) {
continue;
}
}
// Glob filter
if let Some(pattern) = glob_pattern {
let name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
if let Ok(m) = glob::Pattern::new(pattern) {
if !m.matches(name) {
continue;
}
}
}
// Read file (skip binary)
let content = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => continue,
};
let lines: Vec<&str> = content.lines().collect();
let mut file_matches: Vec<(usize, &str)> = Vec::new();
for (i, line) in lines.iter().enumerate() {
if regex.is_match(line) {
file_matches.push((i, line));
}
}
if file_matches.is_empty() {
continue;
}
match params.output_mode.as_str() {
"files_with_matches" => {
results.push(path.display().to_string());
match_count += 1;
}
"count" => {
results.push(format!("{}:{}", path.display(), file_matches.len()));
match_count += 1;
}
"content" => {
for (line_idx, _) in &file_matches {
let start = line_idx.saturating_sub(context_lines);
let end = (*line_idx + context_lines + 1).min(lines.len());
for ci in start..end {
let prefix = if show_line_numbers {
format!("{}:{}:", path.display(), ci + 1)
} else {
format!("{}:", path.display())
};
results.push(format!("{}{}", prefix, lines[ci]));
}
if context_lines > 0 {
results.push("--".to_string());
}
match_count += 1;
}
}
_ => {
results.push(path.display().to_string());
match_count += 1;
}
}
if match_count >= head_limit {
break;
}
}
if results.is_empty() {
return ToolResult::success(format!(
"No matches found for pattern \"{}\" in {}",
params.pattern,
search_path.display()
));
}
let output = results.join("\n");
ToolResult::success(output)
}
}
impl GrepTool {
fn search_file(
&self,
path: &PathBuf,
regex: &regex::Regex,
output_mode: &str,
context_lines: usize,
show_line_numbers: bool,
) -> ToolResult {
let content = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(e) => return ToolResult::error(format!("Failed to read {}: {}", path.display(), e)),
};
let lines: Vec<&str> = content.lines().collect();
let mut matching_lines: Vec<usize> = Vec::new();
for (i, line) in lines.iter().enumerate() {
if regex.is_match(line) {
matching_lines.push(i);
}
}
if matching_lines.is_empty() {
return ToolResult::success(format!(
"No matches found in {}",
path.display()
));
}
match output_mode {
"files_with_matches" => ToolResult::success(path.display().to_string()),
"count" => ToolResult::success(format!(
"{}:{}",
path.display(),
matching_lines.len()
)),
_ => {
let mut results = Vec::new();
for line_idx in &matching_lines {
let start = line_idx.saturating_sub(context_lines);
let end = (*line_idx + context_lines + 1).min(lines.len());
for ci in start..end {
if show_line_numbers {
results.push(format!("{}:{}", ci + 1, lines[ci]));
} else {
results.push(lines[ci].to_string());
}
}
if context_lines > 0 {
results.push("--".to_string());
}
}
ToolResult::success(results.join("\n"))
}
}
}
}

View file

@ -0,0 +1,451 @@
// cc-tools: All tool implementations for the Claude Code Rust port.
//
// Each tool maps to a capability the LLM can invoke: running shell commands,
// reading/writing/editing files, searching codebases, fetching web pages, etc.
use async_trait::async_trait;
use cc_core::config::PermissionMode;
use cc_core::cost::CostTracker;
use cc_core::permissions::{PermissionDecision, PermissionHandler, PermissionRequest};
use cc_core::types::ToolDefinition;
use serde_json::Value;
use std::path::PathBuf;
use std::sync::Arc;
// Sub-modules each contains a full tool implementation.
pub mod ask_user;
pub mod bash;
pub mod brief;
pub mod config_tool;
pub mod cron;
pub mod enter_plan_mode;
pub mod exit_plan_mode;
pub mod file_edit;
pub mod file_read;
pub mod file_write;
pub mod glob_tool;
pub mod grep_tool;
pub mod mcp_resources;
pub mod todo_write;
pub mod notebook_edit;
pub mod powershell;
pub mod send_message;
pub mod bundled_skills;
pub mod skill_tool;
pub mod sleep;
pub mod tasks;
pub mod tool_search;
pub mod web_fetch;
pub mod web_search;
pub mod worktree;
// Re-exports for convenience.
pub use ask_user::AskUserQuestionTool;
pub use bash::BashTool;
pub use brief::BriefTool;
pub use config_tool::ConfigTool;
pub use cron::{CronCreateTool, CronDeleteTool, CronListTool};
pub use enter_plan_mode::EnterPlanModeTool;
pub use exit_plan_mode::ExitPlanModeTool;
pub use file_edit::FileEditTool;
pub use file_read::FileReadTool;
pub use file_write::FileWriteTool;
pub use glob_tool::GlobTool;
pub use grep_tool::GrepTool;
pub use mcp_resources::{ListMcpResourcesTool, ReadMcpResourceTool};
pub use todo_write::TodoWriteTool;
pub use notebook_edit::NotebookEditTool;
pub use powershell::PowerShellTool;
pub use send_message::{SendMessageTool, drain_inbox, peek_inbox};
pub use skill_tool::SkillTool;
pub use sleep::SleepTool;
pub use tasks::{TaskCreateTool, TaskGetTool, TaskListTool, TaskOutputTool, TaskStopTool, TaskUpdateTool};
pub use tool_search::ToolSearchTool;
pub use web_fetch::WebFetchTool;
pub use web_search::WebSearchTool;
pub use worktree::{EnterWorktreeTool, ExitWorktreeTool};
// ---------------------------------------------------------------------------
// Core trait & types
// ---------------------------------------------------------------------------
/// The result of executing a tool.
#[derive(Debug, Clone)]
pub struct ToolResult {
/// Content to send back to the model as the tool result.
pub content: String,
/// Whether this invocation was an error.
pub is_error: bool,
/// Optional structured metadata (for the TUI to render diffs, etc.).
pub metadata: Option<Value>,
}
impl ToolResult {
pub fn success(content: impl Into<String>) -> Self {
Self {
content: content.into(),
is_error: false,
metadata: None,
}
}
pub fn error(content: impl Into<String>) -> Self {
Self {
content: content.into(),
is_error: true,
metadata: None,
}
}
pub fn with_metadata(mut self, meta: Value) -> Self {
self.metadata = Some(meta);
self
}
}
/// Permission level required by a tool.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PermissionLevel {
/// No permission needed (read-only, purely informational).
None,
/// Read-only access to the filesystem or network.
ReadOnly,
/// Write access to the filesystem.
Write,
/// Arbitrary command execution.
Execute,
/// Potentially dangerous (e.g., bypass sandbox).
Dangerous,
}
/// Shared context passed to every tool invocation.
#[derive(Clone)]
pub struct ToolContext {
pub working_dir: PathBuf,
pub permission_mode: PermissionMode,
pub permission_handler: Arc<dyn PermissionHandler>,
pub cost_tracker: Arc<CostTracker>,
pub session_id: String,
/// If true, suppress interactive prompts (batch / CI mode).
pub non_interactive: bool,
/// Optional MCP manager for ListMcpResources / ReadMcpResource tools.
pub mcp_manager: Option<Arc<cc_mcp::McpManager>>,
/// Configured event hooks (PreToolUse, PostToolUse, etc.).
pub config: cc_core::config::Config,
}
impl ToolContext {
/// Resolve a potentially relative path against the working directory.
pub fn resolve_path(&self, path: &str) -> PathBuf {
let p = PathBuf::from(path);
if p.is_absolute() {
p
} else {
self.working_dir.join(p)
}
}
/// Check permissions for a tool invocation.
pub fn check_permission(
&self,
tool_name: &str,
description: &str,
is_read_only: bool,
) -> Result<(), cc_core::error::ClaudeError> {
let request = PermissionRequest {
tool_name: tool_name.to_string(),
description: description.to_string(),
details: None,
is_read_only,
};
let decision = self.permission_handler.request_permission(&request);
match decision {
PermissionDecision::Allow | PermissionDecision::AllowPermanently => Ok(()),
_ => Err(cc_core::error::ClaudeError::PermissionDenied(format!(
"Permission denied for tool '{}'",
tool_name
))),
}
}
}
/// The trait every tool must implement.
#[async_trait]
pub trait Tool: Send + Sync {
/// Human-readable name (matches the constant in cc_core::constants).
fn name(&self) -> &str;
/// One-line description shown to the LLM.
fn description(&self) -> &str;
/// The permission level the tool requires.
fn permission_level(&self) -> PermissionLevel;
/// JSON Schema describing the tool's input parameters.
fn input_schema(&self) -> Value;
/// Execute the tool with the given JSON input.
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult;
/// Produce a `ToolDefinition` suitable for sending to the API.
fn to_definition(&self) -> ToolDefinition {
ToolDefinition {
name: self.name().to_string(),
description: self.description().to_string(),
input_schema: self.input_schema(),
}
}
}
/// Return all built-in tools (excluding AgentTool, which lives in cc-query).
pub fn all_tools() -> Vec<Box<dyn Tool>> {
vec![
Box::new(BashTool),
Box::new(FileReadTool),
Box::new(FileEditTool),
Box::new(FileWriteTool),
Box::new(GlobTool),
Box::new(GrepTool),
Box::new(WebFetchTool),
Box::new(WebSearchTool),
Box::new(NotebookEditTool),
Box::new(TaskCreateTool),
Box::new(TaskGetTool),
Box::new(TaskUpdateTool),
Box::new(TaskListTool),
Box::new(TaskStopTool),
Box::new(TaskOutputTool),
Box::new(TodoWriteTool),
Box::new(AskUserQuestionTool),
Box::new(EnterPlanModeTool),
Box::new(ExitPlanModeTool),
Box::new(PowerShellTool),
Box::new(SleepTool),
Box::new(CronCreateTool),
Box::new(CronDeleteTool),
Box::new(CronListTool),
Box::new(EnterWorktreeTool),
Box::new(ExitWorktreeTool),
Box::new(ListMcpResourcesTool),
Box::new(ReadMcpResourceTool),
Box::new(ToolSearchTool),
Box::new(BriefTool),
Box::new(ConfigTool),
Box::new(SendMessageTool),
Box::new(SkillTool),
]
}
/// Find a tool by name (case-sensitive).
pub fn find_tool(name: &str) -> Option<Box<dyn Tool>> {
all_tools().into_iter().find(|t| t.name() == name)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
// ---- Tool registry tests ------------------------------------------------
#[test]
fn test_all_tools_non_empty() {
let tools = all_tools();
assert!(!tools.is_empty(), "all_tools() must return at least one tool");
}
#[test]
fn test_all_tools_have_unique_names() {
let tools = all_tools();
let mut names = std::collections::HashSet::new();
for tool in &tools {
assert!(
names.insert(tool.name().to_string()),
"Duplicate tool name: {}",
tool.name()
);
}
}
#[test]
fn test_all_tools_have_non_empty_descriptions() {
for tool in all_tools() {
assert!(
!tool.description().is_empty(),
"Tool '{}' has empty description",
tool.name()
);
}
}
#[test]
fn test_all_tools_have_valid_input_schema() {
for tool in all_tools() {
let schema = tool.input_schema();
assert!(
schema.is_object(),
"Tool '{}' input_schema must be a JSON object",
tool.name()
);
assert!(
schema.get("type").is_some() || schema.get("properties").is_some(),
"Tool '{}' schema missing type or properties",
tool.name()
);
}
}
#[test]
fn test_find_tool_found() {
let tool = find_tool("Bash");
assert!(tool.is_some(), "Should find the Bash tool");
assert_eq!(tool.unwrap().name(), "Bash");
}
#[test]
fn test_find_tool_not_found() {
assert!(find_tool("NonExistentTool12345").is_none());
}
#[test]
fn test_find_tool_case_sensitive() {
// Tool names are case-sensitive — "bash" should not match "Bash"
assert!(find_tool("bash").is_none());
assert!(find_tool("Bash").is_some());
}
#[test]
fn test_core_tools_present() {
let expected = [
"Bash", "Read", "Edit", "Write", "Glob", "Grep",
"WebFetch", "WebSearch",
"TodoWrite", "Skill",
];
for name in &expected {
assert!(
find_tool(name).is_some(),
"Expected tool '{}' not found in all_tools()",
name
);
}
}
// ---- ToolResult tests ---------------------------------------------------
#[test]
fn test_tool_result_success() {
let r = ToolResult::success("done");
assert!(!r.is_error);
assert_eq!(r.content, "done");
assert!(r.metadata.is_none());
}
#[test]
fn test_tool_result_error() {
let r = ToolResult::error("something went wrong");
assert!(r.is_error);
assert_eq!(r.content, "something went wrong");
}
#[test]
fn test_tool_result_with_metadata() {
let r = ToolResult::success("ok")
.with_metadata(serde_json::json!({"file": "foo.rs", "lines": 10}));
assert!(r.metadata.is_some());
let meta = r.metadata.unwrap();
assert_eq!(meta["file"], "foo.rs");
}
// ---- ToolContext::resolve_path tests ------------------------------------
#[test]
fn test_resolve_path_absolute() {
use cc_core::config::Config;
use cc_core::permissions::AutoPermissionHandler;
let handler = Arc::new(AutoPermissionHandler {
mode: cc_core::config::PermissionMode::Default,
});
let ctx = ToolContext {
working_dir: PathBuf::from("/workspace"),
permission_mode: cc_core::config::PermissionMode::Default,
permission_handler: handler,
cost_tracker: cc_core::cost::CostTracker::new(),
session_id: "test".to_string(),
non_interactive: true,
mcp_manager: None,
config: Config::default(),
};
// Absolute paths pass through unchanged
let resolved = ctx.resolve_path("/absolute/path/file.rs");
assert_eq!(resolved, PathBuf::from("/absolute/path/file.rs"));
}
#[test]
fn test_resolve_path_relative() {
use cc_core::config::Config;
use cc_core::permissions::AutoPermissionHandler;
let handler = Arc::new(AutoPermissionHandler {
mode: cc_core::config::PermissionMode::Default,
});
let ctx = ToolContext {
working_dir: PathBuf::from("/workspace"),
permission_mode: cc_core::config::PermissionMode::Default,
permission_handler: handler,
cost_tracker: cc_core::cost::CostTracker::new(),
session_id: "test".to_string(),
non_interactive: true,
mcp_manager: None,
config: Config::default(),
};
// Relative paths get joined with working_dir
let resolved = ctx.resolve_path("src/main.rs");
assert_eq!(resolved, PathBuf::from("/workspace/src/main.rs"));
}
// ---- PermissionLevel tests ---------------------------------------------
#[test]
fn test_permission_level_order() {
// Just verify the variants exist and are distinct
assert_ne!(PermissionLevel::None, PermissionLevel::ReadOnly);
assert_ne!(PermissionLevel::Write, PermissionLevel::Execute);
assert_ne!(PermissionLevel::Execute, PermissionLevel::Dangerous);
}
#[test]
fn test_bash_tool_permission_level() {
assert_eq!(BashTool.permission_level(), PermissionLevel::Execute);
}
#[test]
fn test_file_read_permission_level() {
assert_eq!(FileReadTool.permission_level(), PermissionLevel::ReadOnly);
}
#[test]
fn test_file_edit_permission_level() {
assert_eq!(FileEditTool.permission_level(), PermissionLevel::Write);
}
#[test]
fn test_file_write_permission_level() {
assert_eq!(FileWriteTool.permission_level(), PermissionLevel::Write);
}
// ---- Tool to_definition tests ------------------------------------------
#[test]
fn test_tool_to_definition() {
let def = BashTool.to_definition();
assert_eq!(def.name, "Bash");
assert!(!def.description.is_empty());
assert!(def.input_schema.is_object());
}
}

View file

@ -0,0 +1,148 @@
// MCP resource tools: list and read resources from connected MCP servers.
//
// ListMcpResourcesTool enumerate all resources available from MCP servers
// ReadMcpResourceTool read a specific resource by server name + URI
//
// These require an MCP manager to be configured in ToolContext.mcp_manager.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
// ---------------------------------------------------------------------------
// ListMcpResourcesTool
// ---------------------------------------------------------------------------
pub struct ListMcpResourcesTool;
#[derive(Debug, Deserialize)]
struct ListMcpResourcesInput {
#[serde(default)]
server: Option<String>,
}
#[async_trait]
impl Tool for ListMcpResourcesTool {
fn name(&self) -> &str { "ListMcpResources" }
fn description(&self) -> &str {
"List all resources available from connected MCP servers. \
Optionally filter by server name. \
Resources represent data that MCP servers expose (files, database records, etc.)."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::ReadOnly }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"server": {
"type": "string",
"description": "Optional server name to filter resources by"
}
}
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: ListMcpResourcesInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let manager = match &ctx.mcp_manager {
Some(m) => m,
None => {
return ToolResult::error(
"No MCP servers connected. Configure MCP servers in settings.".to_string(),
);
}
};
let resources = manager.list_all_resources(params.server.as_deref()).await;
if resources.is_empty() {
return ToolResult::success(
"No resources found. MCP servers may still provide tools even if they have no resources."
.to_string(),
);
}
let json_out = serde_json::to_string_pretty(&resources).unwrap_or_default();
debug!(count = resources.len(), "Listed MCP resources");
ToolResult::success(json_out)
}
}
// ---------------------------------------------------------------------------
// ReadMcpResourceTool
// ---------------------------------------------------------------------------
pub struct ReadMcpResourceTool;
#[derive(Debug, Deserialize)]
struct ReadMcpResourceInput {
server: String,
uri: String,
}
#[async_trait]
impl Tool for ReadMcpResourceTool {
fn name(&self) -> &str { "ReadMcpResource" }
fn description(&self) -> &str {
"Read a specific resource from an MCP server by URI. \
Use ListMcpResources to discover available resource URIs."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::ReadOnly }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"server": {
"type": "string",
"description": "The MCP server name"
},
"uri": {
"type": "string",
"description": "The resource URI to read"
}
},
"required": ["server", "uri"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: ReadMcpResourceInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let manager = match &ctx.mcp_manager {
Some(m) => m,
None => {
return ToolResult::error(
"No MCP servers connected. Configure MCP servers in settings.".to_string(),
);
}
};
debug!(server = %params.server, uri = %params.uri, "Reading MCP resource");
match manager.read_resource(&params.server, &params.uri).await {
Ok(contents) => {
let json_out = serde_json::to_string_pretty(&contents).unwrap_or_default();
ToolResult::success(json_out)
}
Err(e) => ToolResult::error(format!(
"Failed to read resource '{}' from server '{}': {}",
params.uri, params.server, e
)),
}
}
}

View file

@ -0,0 +1,298 @@
// NotebookEditTool: edit Jupyter notebook cells (.ipynb files).
//
// Supports three edit modes:
// - replace: modify an existing cell's source
// - insert: add a new cell after a given cell (or at the start)
// - delete: remove a cell
//
// Behaviour mirrors the TypeScript NotebookEditTool.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct NotebookEditTool;
#[derive(Debug, Deserialize)]
struct NotebookEditInput {
notebook_path: String,
#[serde(default)]
cell_id: Option<String>,
#[serde(default)]
new_source: Option<String>,
#[serde(default = "default_cell_type")]
cell_type: String,
#[serde(default = "default_edit_mode")]
edit_mode: String,
}
fn default_cell_type() -> String {
"code".to_string()
}
fn default_edit_mode() -> String {
"replace".to_string()
}
#[async_trait]
impl Tool for NotebookEditTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_NOTEBOOK_EDIT
}
fn description(&self) -> &str {
"Edit cells in a Jupyter notebook (.ipynb file). Supports three edit modes:\n\
- replace: modify an existing cell's source (requires cell_id)\n\
- insert: add a new cell after a given cell (or at the start if no cell_id)\n\
- delete: remove a cell (requires cell_id)\n\
You MUST read the notebook file before editing."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::Write
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"notebook_path": {
"type": "string",
"description": "Absolute path to the .ipynb notebook file"
},
"cell_id": {
"type": "string",
"description": "Cell ID (UUID or 'cell-N' index). Required for replace/delete."
},
"new_source": {
"type": "string",
"description": "New cell content. Required for replace/insert."
},
"cell_type": {
"type": "string",
"enum": ["code", "markdown"],
"description": "Cell type for insert operations (default: code)"
},
"edit_mode": {
"type": "string",
"enum": ["replace", "insert", "delete"],
"description": "Edit mode: replace, insert, or delete (default: replace)"
}
},
"required": ["notebook_path"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: NotebookEditInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let path = ctx.resolve_path(&params.notebook_path);
// Validate extension
if path.extension().and_then(|e| e.to_str()) != Some("ipynb") {
return ToolResult::error("File must have .ipynb extension".to_string());
}
// Permission check
if let Err(e) = ctx.check_permission(
self.name(),
&format!("Edit notebook {}", path.display()),
false,
) {
return ToolResult::error(e.to_string());
}
// Read notebook
let content = match tokio::fs::read_to_string(&path).await {
Ok(c) => c,
Err(e) => return ToolResult::error(format!("Failed to read notebook: {}", e)),
};
let mut notebook: Value = match serde_json::from_str(&content) {
Ok(v) => v,
Err(e) => return ToolResult::error(format!("Invalid notebook JSON: {}", e)),
};
debug!(path = %path.display(), mode = %params.edit_mode, "Editing notebook");
let result = match params.edit_mode.as_str() {
"replace" => {
let cell_id = match &params.cell_id {
Some(id) => id.clone(),
None => return ToolResult::error("cell_id is required for replace mode".to_string()),
};
let new_source = match &params.new_source {
Some(s) => s.clone(),
None => return ToolResult::error("new_source is required for replace mode".to_string()),
};
replace_cell(&mut notebook, &cell_id, &new_source)
}
"insert" => {
let new_source = match &params.new_source {
Some(s) => s.clone(),
None => return ToolResult::error("new_source is required for insert mode".to_string()),
};
insert_cell(&mut notebook, params.cell_id.as_deref(), &new_source, &params.cell_type)
}
"delete" => {
let cell_id = match &params.cell_id {
Some(id) => id.clone(),
None => return ToolResult::error("cell_id is required for delete mode".to_string()),
};
delete_cell(&mut notebook, &cell_id)
}
other => return ToolResult::error(format!("Unknown edit_mode: {}", other)),
};
match result {
Ok(msg) => {
// Write back
let updated = match serde_json::to_string_pretty(&notebook) {
Ok(s) => s,
Err(e) => return ToolResult::error(format!("Failed to serialize notebook: {}", e)),
};
if let Err(e) = tokio::fs::write(&path, &updated).await {
return ToolResult::error(format!("Failed to write notebook: {}", e));
}
ToolResult::success(msg)
}
Err(e) => ToolResult::error(e),
}
}
}
// ---------------------------------------------------------------------------
// Notebook manipulation helpers
// ---------------------------------------------------------------------------
/// Resolve a cell index from "cell-N" notation or return `None` for UUID lookup.
fn parse_cell_index(cell_id: &str) -> Option<usize> {
cell_id
.strip_prefix("cell-")
.and_then(|n| n.parse::<usize>().ok())
}
/// Find the position of a cell in the `cells` array by id or "cell-N".
fn find_cell_index(cells: &[Value], cell_id: &str) -> Result<usize, String> {
// Try "cell-N" index format first
if let Some(idx) = parse_cell_index(cell_id) {
if idx < cells.len() {
return Ok(idx);
}
return Err(format!("Cell index {} is out of range (notebook has {} cells)", idx, cells.len()));
}
// Try UUID match
for (i, cell) in cells.iter().enumerate() {
if let Some(id) = cell.get("id").and_then(|v| v.as_str()) {
if id == cell_id {
return Ok(i);
}
}
}
Err(format!("Cell '{}' not found", cell_id))
}
/// Generate a simple random cell ID (8 hex chars, like nbformat ≥ 4.5).
fn generate_cell_id() -> String {
use std::time::{SystemTime, UNIX_EPOCH};
let nanos = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.subsec_nanos())
.unwrap_or(0);
format!("{:08x}", nanos ^ 0xdeadbeef_u32)
}
/// Build a new cell JSON object.
fn make_cell(cell_type: &str, source: &str, cell_id: &str) -> Value {
let source_lines: Vec<Value> = if source.is_empty() {
vec![]
} else {
let lines: Vec<&str> = source.split_inclusive('\n').collect();
lines.iter().map(|l| Value::String(l.to_string())).collect()
};
match cell_type {
"markdown" => json!({
"cell_type": "markdown",
"id": cell_id,
"metadata": {},
"source": source_lines
}),
_ => json!({
"cell_type": "code",
"id": cell_id,
"metadata": {},
"source": source_lines,
"outputs": [],
"execution_count": null
}),
}
}
fn replace_cell(notebook: &mut Value, cell_id: &str, new_source: &str) -> Result<String, String> {
let cells = notebook
.get_mut("cells")
.and_then(|c| c.as_array_mut())
.ok_or_else(|| "Notebook has no 'cells' array".to_string())?;
let idx = find_cell_index(cells, cell_id)?;
let cell = &mut cells[idx];
let source_lines: Vec<Value> = new_source
.split_inclusive('\n')
.map(|l| Value::String(l.to_string()))
.collect();
cell["source"] = Value::Array(source_lines);
// Reset execution state for code cells
if cell.get("cell_type").and_then(|t| t.as_str()) == Some("code") {
cell["outputs"] = Value::Array(vec![]);
cell["execution_count"] = Value::Null;
}
Ok(format!("Replaced cell '{}' (index {})", cell_id, idx))
}
fn insert_cell(
notebook: &mut Value,
after_cell_id: Option<&str>,
new_source: &str,
cell_type: &str,
) -> Result<String, String> {
let cells = notebook
.get_mut("cells")
.and_then(|c| c.as_array_mut())
.ok_or_else(|| "Notebook has no 'cells' array".to_string())?;
let insert_at = if let Some(id) = after_cell_id {
find_cell_index(cells, id)? + 1
} else {
0
};
let new_id = generate_cell_id();
let cell = make_cell(cell_type, new_source, &new_id);
cells.insert(insert_at, cell);
Ok(format!("Inserted {} cell '{}' at position {}", cell_type, new_id, insert_at))
}
fn delete_cell(notebook: &mut Value, cell_id: &str) -> Result<String, String> {
let cells = notebook
.get_mut("cells")
.and_then(|c| c.as_array_mut())
.ok_or_else(|| "Notebook has no 'cells' array".to_string())?;
let idx = find_cell_index(cells, cell_id)?;
cells.remove(idx);
Ok(format!("Deleted cell '{}' (was at index {})", cell_id, idx))
}

View file

@ -0,0 +1,136 @@
// PowerShell tool: execute PowerShell commands (Windows-native).
//
// On Windows, PowerShell provides richer scripting than cmd.exe.
// On non-Windows platforms, attempts to use `pwsh` (PowerShell Core).
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use std::process::Stdio;
use std::time::Duration;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tracing::debug;
pub struct PowerShellTool;
#[derive(Debug, Deserialize)]
struct PowerShellInput {
command: String,
#[serde(default)]
description: Option<String>,
#[serde(default = "default_timeout")]
timeout: u64,
}
fn default_timeout() -> u64 { 120_000 }
#[async_trait]
impl Tool for PowerShellTool {
fn name(&self) -> &str { "PowerShell" }
fn description(&self) -> &str {
"Execute a PowerShell command. Use for Windows-native operations, .NET APIs, \
registry access, and Windows-specific system administration."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::Execute }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"command": { "type": "string", "description": "The PowerShell command to execute" },
"description": { "type": "string", "description": "Description of what this command does" },
"timeout": { "type": "number", "description": "Timeout in ms (default 120000)" }
},
"required": ["command"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: PowerShellInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let desc = params.description.as_deref().unwrap_or(&params.command);
if let Err(e) = ctx.check_permission(self.name(), desc, false) {
return ToolResult::error(e.to_string());
}
// Determine the PowerShell executable
let (exe, args) = if cfg!(windows) {
("powershell", vec!["-NoProfile", "-NonInteractive", "-Command"])
} else {
// PowerShell Core on non-Windows
("pwsh", vec!["-NoProfile", "-NonInteractive", "-Command"])
};
debug!(command = %params.command, "Executing PowerShell command");
let timeout_ms = params.timeout.min(600_000);
let timeout_dur = Duration::from_millis(timeout_ms);
let mut child = match Command::new(exe)
.args(&args)
.arg(&params.command)
.current_dir(&ctx.working_dir)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stdin(Stdio::null())
.spawn()
{
Ok(c) => c,
Err(e) => return ToolResult::error(format!("Failed to spawn PowerShell: {}", e)),
};
let stdout = child.stdout.take();
let stderr = child.stderr.take();
let result = tokio::time::timeout(timeout_dur, async {
let mut stdout_lines = Vec::new();
let mut stderr_lines = Vec::new();
if let Some(out) = stdout {
let mut lines = BufReader::new(out).lines();
while let Ok(Some(line)) = lines.next_line().await {
stdout_lines.push(line);
}
}
if let Some(err) = stderr {
let mut lines = BufReader::new(err).lines();
while let Ok(Some(line)) = lines.next_line().await {
stderr_lines.push(line);
}
}
let status = child.wait().await;
(stdout_lines, stderr_lines, status)
}).await;
match result {
Ok((stdout_lines, stderr_lines, status)) => {
let exit_code = status.map(|s| s.code().unwrap_or(-1)).unwrap_or(-1);
let mut output = stdout_lines.join("\n");
if !stderr_lines.is_empty() {
if !output.is_empty() { output.push('\n'); }
output.push_str("STDERR:\n");
output.push_str(&stderr_lines.join("\n"));
}
if output.is_empty() { output = "(no output)".to_string(); }
if exit_code != 0 {
ToolResult::error(format!("PowerShell exited with code {}\n{}", exit_code, output))
} else {
ToolResult::success(output)
}
}
Err(_) => {
let _ = child.kill().await;
ToolResult::error(format!("PowerShell command timed out after {}ms", timeout_ms))
}
}
}
}

View file

@ -0,0 +1,149 @@
// SendMessageTool: send a message to another agent or broadcast to all.
//
// In the TypeScript version this uses a complex mailbox/swarm system with
// process-level sockets. The Rust port uses a simpler in-process DashMap
// inbox that works for sub-agents spawned via AgentTool.
//
// Messages are stored keyed by recipient name. Other agents can check
// their inbox by calling drain_inbox() or peek_inbox().
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use dashmap::DashMap;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
// ---------------------------------------------------------------------------
// In-process inbox
// ---------------------------------------------------------------------------
/// A single message in the inbox.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentMessage {
pub from: String,
pub to: String,
pub content: String,
pub timestamp: u64,
}
/// Global inbox: recipient_id → queued messages.
static INBOX: Lazy<DashMap<String, Vec<AgentMessage>>> = Lazy::new(DashMap::new);
/// Remove and return all messages queued for `recipient`.
pub fn drain_inbox(recipient: &str) -> Vec<AgentMessage> {
INBOX.remove(recipient).map(|(_, v)| v).unwrap_or_default()
}
/// Read (without removing) all messages queued for `recipient`.
pub fn peek_inbox(recipient: &str) -> Vec<AgentMessage> {
INBOX.get(recipient).map(|v| v.clone()).unwrap_or_default()
}
// ---------------------------------------------------------------------------
// Tool
// ---------------------------------------------------------------------------
pub struct SendMessageTool;
#[derive(Debug, Deserialize)]
struct SendMessageInput {
/// Recipient name, or "*" for broadcast.
to: String,
/// Message body.
message: String,
/// Short preview text shown in the UI.
#[serde(default)]
summary: Option<String>,
}
#[async_trait]
impl Tool for SendMessageTool {
fn name(&self) -> &str { "SendMessage" }
fn description(&self) -> &str {
"Send a message to another agent by name, or broadcast to all active agents with to=\"*\". \
Recipients accumulate messages in their inbox and can retrieve them. \
Use this for coordination between concurrent sub-agents."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"to": {
"type": "string",
"description": "Recipient agent name or session ID. Use \"*\" to broadcast to all."
},
"message": {
"type": "string",
"description": "Message content"
},
"summary": {
"type": "string",
"description": "510 word preview for the UI (optional)"
}
},
"required": ["to", "message"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: SendMessageInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
if params.message.is_empty() {
return ToolResult::error("Message cannot be empty.".to_string());
}
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let msg = AgentMessage {
from: ctx.session_id.clone(),
to: params.to.clone(),
content: params.message.clone(),
timestamp: now,
};
let preview = params
.summary
.as_deref()
.unwrap_or_else(|| {
let s = params.message.as_str();
&s[..s.len().min(60)]
});
if params.to == "*" {
// Broadcast: deliver to every existing inbox key
let recipients: Vec<String> = INBOX.iter().map(|e| e.key().clone()).collect();
if recipients.is_empty() {
return ToolResult::success(
"Broadcast queued (no active recipient inboxes yet).".to_string(),
);
}
for key in &recipients {
INBOX.entry(key.clone()).or_default().push(msg.clone());
}
return ToolResult::success(format!(
"Broadcast to {} agent(s): {}",
recipients.len(),
preview
));
}
// Directed message
INBOX.entry(params.to.clone()).or_default().push(msg);
ToolResult::success(format!("Message sent to '{}': {}", params.to, preview))
}
}

View file

@ -0,0 +1,227 @@
// SkillTool: execute user-defined skill (prompt template) files programmatically.
//
// Skills are Markdown files stored in:
// <project>/.claude/commands/<name>.md
// ~/.claude/commands/<name>.md
//
// Bundled skills (defined in bundled_skills.rs) are checked first before the
// disk directories, so they take precedence over same-named .md files.
//
// The model invokes this tool to expand a skill's prompt inline.
// Supports $ARGUMENTS placeholder substitution.
// Use skill="list" to discover available skills.
use crate::bundled_skills::{expand_prompt, find_bundled_skill, user_invocable_skills};
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use std::path::PathBuf;
use tracing::debug;
pub struct SkillTool;
#[derive(Debug, Deserialize)]
struct SkillInput {
skill: String,
#[serde(default)]
args: Option<String>,
}
#[async_trait]
impl Tool for SkillTool {
fn name(&self) -> &str { "Skill" }
fn description(&self) -> &str {
"Execute a skill (custom prompt template) by name. \
Skills are .md files in .claude/commands/ or ~/.claude/commands/. \
Use skill=\"list\" to discover available skills. \
The expanded skill prompt is returned for you to act on."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::ReadOnly }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"skill": {
"type": "string",
"description": "Skill name (without .md extension), or \"list\" to enumerate skills"
},
"args": {
"type": "string",
"description": "Arguments passed to the skill — replaces $ARGUMENTS in the template"
}
},
"required": ["skill"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: SkillInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let dirs = skill_search_dirs(ctx);
if params.skill == "list" {
return list_skills(&dirs).await;
}
let skill_name = params.skill.trim_end_matches(".md");
debug!(skill = skill_name, "Loading skill");
// Check bundled skills first — they take precedence over disk files.
if let Some(bundled) = find_bundled_skill(skill_name) {
let args = params.args.as_deref().unwrap_or("");
let prompt = expand_prompt(bundled, args);
let prompt = prompt.trim().to_string();
if prompt.is_empty() {
return ToolResult::error(format!(
"Bundled skill '{}' expanded to empty content.",
skill_name
));
}
return ToolResult::success(prompt);
}
let raw = match find_and_read_skill(skill_name, &dirs).await {
Some(c) => c,
None => {
return ToolResult::error(format!(
"Skill '{}' not found. Use skill=\"list\" to see available skills.",
skill_name
));
}
};
// Strip YAML frontmatter if present (--- ... ---)
let content = strip_frontmatter(&raw);
// Substitute $ARGUMENTS
let prompt = if let Some(args) = &params.args {
content.replace("$ARGUMENTS", args)
} else {
content.replace("$ARGUMENTS", "")
};
let prompt = prompt.trim().to_string();
if prompt.is_empty() {
return ToolResult::error(format!("Skill '{}' expanded to empty content.", skill_name));
}
ToolResult::success(prompt)
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
fn skill_search_dirs(ctx: &ToolContext) -> Vec<PathBuf> {
let mut dirs = vec![
ctx.working_dir.join(".claude").join("commands"),
];
if let Some(home) = dirs::home_dir() {
dirs.push(home.join(".claude").join("commands"));
}
dirs
}
async fn list_skills(dirs: &[PathBuf]) -> ToolResult {
// Start with the bundled skills.
let mut lines: Vec<String> = Vec::new();
let bundled = user_invocable_skills();
for (name, desc) in &bundled {
lines.push(format!(" {}{} [bundled]", name, desc));
}
let bundled_names: Vec<&str> = bundled.iter().map(|(n, _)| *n).collect();
// Then add disk skills, skipping any that shadow a bundled name.
let mut disk_skills: Vec<(String, PathBuf)> = Vec::new();
for dir in dirs {
match tokio::fs::read_dir(dir).await {
Ok(mut entries) => {
while let Ok(Some(entry)) = entries.next_entry().await {
let path = entry.path();
if path.extension().map_or(false, |e| e == "md") {
if let Some(stem) = path.file_stem().and_then(|s| s.to_str()) {
let name = stem.to_string();
// Deduplicate — project-level shadows user-level;
// bundled skills shadow everything.
if !disk_skills.iter().any(|(n, _)| n == &name)
&& !bundled_names.contains(&name.as_str())
{
disk_skills.push((name, path));
}
}
}
}
}
Err(_) => {} // directory doesn't exist, skip
}
}
disk_skills.sort_by(|a, b| a.0.cmp(&b.0));
for (name, path) in &disk_skills {
let desc = read_skill_description(path).await;
lines.push(format!(" {}{}", name, desc));
}
let total = bundled.len() + disk_skills.len();
if total == 0 {
return ToolResult::success(
"No skills found. Create .md files in .claude/commands/ to define skills.\n\
Example: .claude/commands/review.md"
.to_string(),
);
}
ToolResult::success(format!(
"Available skills ({}):\n{}",
total,
lines.join("\n")
))
}
async fn find_and_read_skill(name: &str, dirs: &[PathBuf]) -> Option<String> {
for dir in dirs {
let path = dir.join(format!("{}.md", name));
if let Ok(content) = tokio::fs::read_to_string(&path).await {
return Some(content);
}
}
None
}
async fn read_skill_description(path: &std::path::Path) -> String {
let Ok(content) = tokio::fs::read_to_string(path).await else {
return "(no description)".to_string();
};
let body = strip_frontmatter(&content);
// First non-empty, non-heading line
for line in body.lines() {
let t = line.trim().trim_start_matches('#').trim();
if !t.is_empty() {
let truncated = if t.len() > 80 { &t[..80] } else { t };
return truncated.to_string();
}
}
"(no description)".to_string()
}
/// Remove YAML frontmatter delimited by `---` at the start of the file.
fn strip_frontmatter(content: &str) -> String {
if content.starts_with("---") {
// Find closing ---
let after_open = &content[3..];
if let Some(close_pos) = after_open.find("\n---") {
// Skip past the closing delimiter and any leading newline
let rest = &after_open[close_pos + 4..];
return rest.trim_start_matches('\n').to_string();
}
}
content.to_string()
}

View file

@ -0,0 +1,63 @@
// SleepTool: pause execution for a specified duration.
//
// Useful when the model needs to wait between operations (e.g., polling,
// rate limiting, or waiting for external processes). Unlike `Bash(sleep ...)`,
// this does not hold a shell process and can run concurrently with other tools.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use std::time::Duration;
use tracing::debug;
pub struct SleepTool;
#[derive(Debug, Deserialize)]
struct SleepInput {
/// Duration in milliseconds (capped at 300_000 = 5 minutes).
#[serde(alias = "ms", alias = "duration_ms")]
ms: u64,
}
#[async_trait]
impl Tool for SleepTool {
fn name(&self) -> &str { "Sleep" }
fn description(&self) -> &str {
"Wait for a specified duration in milliseconds. \
Use instead of Bash(sleep ...) it doesn't hold a shell process \
and can run concurrently with other tools. \
The user can interrupt the sleep at any time."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"ms": {
"type": "number",
"description": "Duration to sleep in milliseconds (max 300000 = 5 minutes)"
}
},
"required": ["ms"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: SleepInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
// Cap at 5 minutes
let duration_ms = params.ms.min(300_000);
debug!(ms = duration_ms, "Sleeping");
tokio::time::sleep(Duration::from_millis(duration_ms)).await;
ToolResult::success(format!("Slept for {}ms.", duration_ms))
}
}

View file

@ -0,0 +1,503 @@
// Task management tools: TaskCreate, TaskGet, TaskUpdate, TaskList, TaskStop, TaskOutput.
//
// Implements a simple in-process task store backed by a global Arc<Mutex<HashMap>>.
// Tasks have id, subject, description, status, owner, blocks/blocked-by dependencies,
// and optional output.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use dashmap::DashMap;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::sync::Arc;
use tracing::debug;
use uuid::Uuid;
// ---------------------------------------------------------------------------
// Task store (global singleton)
// ---------------------------------------------------------------------------
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum TaskStatus {
Pending,
InProgress,
Completed,
Deleted,
Running, // for background shell tasks
Failed,
}
impl std::fmt::Display for TaskStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
TaskStatus::Pending => "pending",
TaskStatus::InProgress => "in_progress",
TaskStatus::Completed => "completed",
TaskStatus::Deleted => "deleted",
TaskStatus::Running => "running",
TaskStatus::Failed => "failed",
};
write!(f, "{}", s)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Task {
pub id: String,
pub subject: String,
pub description: String,
pub status: TaskStatus,
pub owner: Option<String>,
/// IDs of tasks this task blocks (i.e., those tasks depend on this one completing).
pub blocks: Vec<String>,
/// IDs of tasks that must complete before this task can start.
pub blocked_by: Vec<String>,
pub metadata: Option<Value>,
pub output: Option<String>,
pub created_at: chrono::DateTime<chrono::Utc>,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
impl Task {
fn new(subject: impl Into<String>, description: impl Into<String>) -> Self {
let now = chrono::Utc::now();
Self {
id: Uuid::new_v4().to_string(),
subject: subject.into(),
description: description.into(),
status: TaskStatus::Pending,
owner: None,
blocks: vec![],
blocked_by: vec![],
metadata: None,
output: None,
created_at: now,
updated_at: now,
}
}
fn to_summary_value(&self) -> Value {
// Compute effective blocked_by (exclude completed tasks)
let blocked_by = self.blocked_by.clone();
json!({
"id": self.id,
"subject": self.subject,
"status": self.status.to_string(),
"owner": self.owner,
"blocked_by": blocked_by,
})
}
fn to_full_value(&self) -> Value {
json!({
"id": self.id,
"subject": self.subject,
"description": self.description,
"status": self.status.to_string(),
"owner": self.owner,
"blocks": self.blocks,
"blocked_by": self.blocked_by,
"metadata": self.metadata,
"output": self.output,
"created_at": self.created_at.to_rfc3339(),
"updated_at": self.updated_at.to_rfc3339(),
})
}
}
/// Global task store shared across all tool invocations.
static TASK_STORE: Lazy<Arc<DashMap<String, Task>>> =
Lazy::new(|| Arc::new(DashMap::new()));
// ---------------------------------------------------------------------------
// TaskCreate
// ---------------------------------------------------------------------------
pub struct TaskCreateTool;
#[derive(Debug, Deserialize)]
struct TaskCreateInput {
subject: String,
description: String,
#[serde(default)]
metadata: Option<Value>,
}
#[async_trait]
impl Tool for TaskCreateTool {
fn name(&self) -> &str { cc_core::constants::TOOL_NAME_TASK_CREATE }
fn description(&self) -> &str { "Create a new task to track work items. Returns the task ID." }
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"subject": { "type": "string", "description": "Brief title for the task" },
"description": { "type": "string", "description": "Detailed description of what needs to be done" },
"metadata": { "type": "object", "description": "Optional arbitrary metadata" }
},
"required": ["subject", "description"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: TaskCreateInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let mut task = Task::new(&params.subject, &params.description);
task.metadata = params.metadata;
let task_id = task.id.clone();
debug!(task_id = %task_id, subject = %params.subject, "Creating task");
TASK_STORE.insert(task_id.clone(), task);
ToolResult::success(serde_json::to_string_pretty(&json!({
"task_id": task_id,
"subject": params.subject,
})).unwrap_or_default())
}
}
// ---------------------------------------------------------------------------
// TaskGet
// ---------------------------------------------------------------------------
pub struct TaskGetTool;
#[derive(Debug, Deserialize)]
struct TaskGetInput {
#[serde(alias = "taskId")]
task_id: String,
}
#[async_trait]
impl Tool for TaskGetTool {
fn name(&self) -> &str { cc_core::constants::TOOL_NAME_TASK_GET }
fn description(&self) -> &str { "Get full details of a task by ID." }
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"task_id": { "type": "string", "description": "Task ID to retrieve" }
},
"required": ["task_id"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: TaskGetInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
match TASK_STORE.get(&params.task_id) {
Some(task) => ToolResult::success(
serde_json::to_string_pretty(&task.to_full_value()).unwrap_or_default()
),
None => ToolResult::success(
serde_json::to_string_pretty(&json!(null)).unwrap_or_default()
),
}
}
}
// ---------------------------------------------------------------------------
// TaskUpdate
// ---------------------------------------------------------------------------
pub struct TaskUpdateTool;
#[derive(Debug, Deserialize)]
struct TaskUpdateInput {
#[serde(alias = "taskId")]
task_id: String,
#[serde(default)]
subject: Option<String>,
#[serde(default)]
description: Option<String>,
#[serde(default)]
status: Option<String>,
#[serde(default)]
owner: Option<String>,
#[serde(default, rename = "addBlocks")]
add_blocks: Option<Vec<String>>,
#[serde(default, rename = "addBlockedBy")]
add_blocked_by: Option<Vec<String>>,
#[serde(default)]
metadata: Option<Value>,
#[serde(default)]
output: Option<String>,
}
#[async_trait]
impl Tool for TaskUpdateTool {
fn name(&self) -> &str { cc_core::constants::TOOL_NAME_TASK_UPDATE }
fn description(&self) -> &str { "Update a task's properties (status, subject, description, etc.)." }
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"task_id": { "type": "string", "description": "Task ID to update" },
"subject": { "type": "string" },
"description": { "type": "string" },
"status": {
"type": "string",
"enum": ["pending", "in_progress", "completed", "deleted", "failed"]
},
"owner": { "type": "string" },
"addBlocks": { "type": "array", "items": { "type": "string" } },
"addBlockedBy": { "type": "array", "items": { "type": "string" } },
"metadata": { "type": "object" },
"output": { "type": "string" }
},
"required": ["task_id"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: TaskUpdateInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let mut task = match TASK_STORE.get_mut(&params.task_id) {
Some(t) => t,
None => return ToolResult::error(format!("Task '{}' not found", params.task_id)),
};
let mut updated_fields: Vec<&str> = vec![];
if let Some(subject) = &params.subject {
task.subject = subject.clone();
updated_fields.push("subject");
}
if let Some(desc) = &params.description {
task.description = desc.clone();
updated_fields.push("description");
}
if let Some(status_str) = &params.status {
task.status = match status_str.as_str() {
"pending" => TaskStatus::Pending,
"in_progress" | "in-progress" => TaskStatus::InProgress,
"completed" => TaskStatus::Completed,
"deleted" => TaskStatus::Deleted,
"running" => TaskStatus::Running,
"failed" => TaskStatus::Failed,
other => return ToolResult::error(format!("Unknown status: {}", other)),
};
updated_fields.push("status");
}
if let Some(owner) = &params.owner {
task.owner = Some(owner.clone());
updated_fields.push("owner");
}
if let Some(blocks) = &params.add_blocks {
for b in blocks {
if !task.blocks.contains(b) {
task.blocks.push(b.clone());
}
}
updated_fields.push("blocks");
}
if let Some(blocked_by) = &params.add_blocked_by {
for b in blocked_by {
if !task.blocked_by.contains(b) {
task.blocked_by.push(b.clone());
}
}
updated_fields.push("blocked_by");
}
if let Some(meta) = &params.metadata {
task.metadata = Some(meta.clone());
updated_fields.push("metadata");
}
if let Some(out) = &params.output {
task.output = Some(out.clone());
updated_fields.push("output");
}
task.updated_at = chrono::Utc::now();
// Handle deletion
let task_id = task.id.clone();
let task_status = task.status.clone();
drop(task); // release the lock
if task_status == TaskStatus::Deleted {
TASK_STORE.remove(&task_id);
}
ToolResult::success(serde_json::to_string_pretty(&json!({
"success": true,
"task_id": task_id,
"updated_fields": updated_fields,
})).unwrap_or_default())
}
}
// ---------------------------------------------------------------------------
// TaskList
// ---------------------------------------------------------------------------
pub struct TaskListTool;
#[async_trait]
impl Tool for TaskListTool {
fn name(&self) -> &str { cc_core::constants::TOOL_NAME_TASK_LIST }
fn description(&self) -> &str { "List all active tasks (excluding deleted/completed)." }
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"include_completed": {
"type": "boolean",
"description": "Include completed tasks (default false)"
}
}
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let include_completed = input
.get("include_completed")
.and_then(|v| v.as_bool())
.unwrap_or(false);
let tasks: Vec<Value> = TASK_STORE
.iter()
.filter(|entry| {
let status = &entry.value().status;
match status {
TaskStatus::Deleted => false,
TaskStatus::Completed => include_completed,
_ => true,
}
})
.map(|entry| entry.value().to_summary_value())
.collect();
ToolResult::success(serde_json::to_string_pretty(&tasks).unwrap_or_default())
}
}
// ---------------------------------------------------------------------------
// TaskStop
// ---------------------------------------------------------------------------
pub struct TaskStopTool;
#[derive(Debug, Deserialize)]
struct TaskStopInput {
#[serde(alias = "shell_id")]
task_id: String,
}
#[async_trait]
impl Tool for TaskStopTool {
fn name(&self) -> &str { cc_core::constants::TOOL_NAME_TASK_STOP }
fn description(&self) -> &str { "Stop a running background task." }
fn permission_level(&self) -> PermissionLevel { PermissionLevel::Execute }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"task_id": { "type": "string", "description": "ID of the task to stop" }
},
"required": ["task_id"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: TaskStopInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
match TASK_STORE.get_mut(&params.task_id) {
Some(mut task) => {
if task.status != TaskStatus::Running && task.status != TaskStatus::InProgress {
return ToolResult::error(format!(
"Task '{}' is not running (status: {})",
params.task_id, task.status
));
}
task.status = TaskStatus::Completed;
task.updated_at = chrono::Utc::now();
ToolResult::success(serde_json::to_string_pretty(&json!({
"message": "Task stopped",
"task_id": params.task_id,
})).unwrap_or_default())
}
None => ToolResult::error(format!("Task '{}' not found", params.task_id)),
}
}
}
// ---------------------------------------------------------------------------
// TaskOutput
// ---------------------------------------------------------------------------
pub struct TaskOutputTool;
#[derive(Debug, Deserialize)]
struct TaskOutputInput {
task_id: String,
#[serde(default = "default_block")]
block: bool,
}
fn default_block() -> bool { true }
#[async_trait]
impl Tool for TaskOutputTool {
fn name(&self) -> &str { cc_core::constants::TOOL_NAME_TASK_OUTPUT }
fn description(&self) -> &str { "Get the output of a task." }
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"task_id": { "type": "string", "description": "Task ID to get output for" },
"block": { "type": "boolean", "description": "Wait for task to complete (default true)" }
},
"required": ["task_id"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: TaskOutputInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
match TASK_STORE.get(&params.task_id) {
Some(task) => {
let retrieval_status = match &task.status {
TaskStatus::Completed | TaskStatus::Failed => "success",
TaskStatus::Running | TaskStatus::InProgress => {
if params.block { "success" } else { "not_ready" }
}
_ => "success",
};
ToolResult::success(serde_json::to_string_pretty(&json!({
"retrieval_status": retrieval_status,
"task": task.to_full_value(),
})).unwrap_or_default())
}
None => ToolResult::error(format!("Task '{}' not found", params.task_id)),
}
}
}

View file

@ -0,0 +1,127 @@
// TodoWrite tool: task / todo list management.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct TodoWriteTool;
#[derive(Debug, Deserialize)]
struct TodoWriteInput {
todos: Vec<TodoItem>,
}
#[derive(Debug, Clone, Deserialize)]
struct TodoItem {
id: String,
content: String,
status: TodoStatus,
#[serde(default)]
priority: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "snake_case")]
enum TodoStatus {
Pending,
InProgress,
Completed,
}
impl std::fmt::Display for TodoStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TodoStatus::Pending => write!(f, "pending"),
TodoStatus::InProgress => write!(f, "in_progress"),
TodoStatus::Completed => write!(f, "completed"),
}
}
}
#[async_trait]
impl Tool for TodoWriteTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_TODO_WRITE
}
fn description(&self) -> &str {
"Write and manage a todo/task list. Provide the complete list of todos \
each time (this replaces the entire list). Use this to track progress \
on multi-step tasks."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::None
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"todos": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": { "type": "string" },
"content": { "type": "string" },
"status": {
"type": "string",
"enum": ["pending", "in_progress", "completed"]
},
"priority": { "type": "string" }
},
"required": ["id", "content", "status"]
},
"description": "The complete list of todo items"
}
},
"required": ["todos"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: TodoWriteInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
debug!(count = params.todos.len(), "Writing todo list");
let total = params.todos.len();
let completed = params
.todos
.iter()
.filter(|t| matches!(t.status, TodoStatus::Completed))
.count();
let in_progress = params
.todos
.iter()
.filter(|t| matches!(t.status, TodoStatus::InProgress))
.count();
let pending = total - completed - in_progress;
let mut output = format!(
"Todo list updated ({} total: {} pending, {} in progress, {} completed)\n\n",
total, pending, in_progress, completed
);
for item in &params.todos {
let icon = match item.status {
TodoStatus::Pending => "[ ]",
TodoStatus::InProgress => "[~]",
TodoStatus::Completed => "[x]",
};
output.push_str(&format!("{} {} ({})\n", icon, item.content, item.id));
}
ToolResult::success(output).with_metadata(json!({
"total": total,
"completed": completed,
"in_progress": in_progress,
"pending": pending,
}))
}
}

View file

@ -0,0 +1,201 @@
// ToolSearchTool: search for tools by name or keyword.
//
// This is used by the model to discover "deferred" tools that are not yet
// loaded into context. In the Rust port there is no deferred-tool mechanism
// (all tools are always available), but this tool still provides a useful
// search interface for the model to discover available capabilities.
//
// Supports two query modes:
// - "select:ToolName" → direct lookup by exact name
// - "keyword search" → fuzzy name + description match with scoring
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
pub struct ToolSearchTool;
#[derive(Debug, Deserialize)]
struct ToolSearchInput {
query: String,
#[serde(default = "default_max")]
max_results: usize,
}
fn default_max() -> usize { 5 }
/// A minimal catalog entry describing one tool.
#[derive(Debug, Clone)]
struct ToolEntry {
name: &'static str,
description: &'static str,
keywords: &'static [&'static str],
}
/// Static catalog of all built-in tools with keywords for scoring.
static TOOL_CATALOG: &[ToolEntry] = &[
ToolEntry { name: "Bash", description: "Execute shell commands", keywords: &["shell", "run", "command", "exec", "terminal"] },
ToolEntry { name: "Read", description: "Read file contents", keywords: &["file", "read", "cat", "content"] },
ToolEntry { name: "Write", description: "Write or create files", keywords: &["file", "write", "create", "save"] },
ToolEntry { name: "Edit", description: "Edit existing files with string replacement", keywords: &["file", "edit", "modify", "replace", "patch"] },
ToolEntry { name: "Glob", description: "Find files by pattern", keywords: &["find", "pattern", "search", "files", "glob"] },
ToolEntry { name: "Grep", description: "Search file contents with regex", keywords: &["search", "regex", "grep", "find", "content"] },
ToolEntry { name: "WebFetch", description: "Fetch web page content", keywords: &["web", "fetch", "http", "url", "browser"] },
ToolEntry { name: "WebSearch", description: "Search the web", keywords: &["web", "search", "internet", "query"] },
ToolEntry { name: "NotebookEdit", description: "Edit Jupyter notebook cells", keywords: &["notebook", "jupyter", "ipynb", "cell"] },
ToolEntry { name: "TodoWrite", description: "Manage todo list", keywords: &["todo", "task", "list", "write"] },
ToolEntry { name: "AskUserQuestion", description: "Ask the user a question", keywords: &["ask", "question", "user", "input", "clarify"] },
ToolEntry { name: "EnterPlanMode", description: "Enter planning mode", keywords: &["plan", "mode", "planning"] },
ToolEntry { name: "ExitPlanMode", description: "Exit planning mode", keywords: &["plan", "exit", "mode"] },
ToolEntry { name: "Sleep", description: "Wait for a duration", keywords: &["sleep", "wait", "delay", "pause"] },
ToolEntry { name: "PowerShell", description: "Execute PowerShell commands", keywords: &["powershell", "windows", "ps", "command"] },
ToolEntry { name: "CronCreate", description: "Schedule a recurring cron task", keywords: &["cron", "schedule", "recurring", "timer"] },
ToolEntry { name: "CronDelete", description: "Cancel a scheduled cron task", keywords: &["cron", "delete", "cancel", "remove"] },
ToolEntry { name: "CronList", description: "List all cron tasks", keywords: &["cron", "list", "scheduled", "tasks"] },
ToolEntry { name: "EnterWorktree", description: "Create and enter a git worktree", keywords: &["worktree", "git", "branch", "isolate"] },
ToolEntry { name: "ExitWorktree", description: "Exit the current git worktree", keywords: &["worktree", "git", "exit", "restore"] },
ToolEntry { name: "TaskCreate", description: "Create a background task", keywords: &["task", "create", "background", "async"] },
ToolEntry { name: "TaskGet", description: "Get task details", keywords: &["task", "get", "status", "details"] },
ToolEntry { name: "TaskUpdate", description: "Update a task's status", keywords: &["task", "update", "status", "progress"] },
ToolEntry { name: "TaskList", description: "List all tasks", keywords: &["task", "list", "all", "tasks"] },
ToolEntry { name: "TaskStop", description: "Stop a running task", keywords: &["task", "stop", "kill", "cancel"] },
ToolEntry { name: "TaskOutput", description: "Get task output/logs", keywords: &["task", "output", "logs", "result"] },
ToolEntry { name: "ListMcpResources", description: "List MCP server resources", keywords: &["mcp", "resource", "list", "server"] },
ToolEntry { name: "ReadMcpResource", description: "Read an MCP resource", keywords: &["mcp", "resource", "read", "server"] },
ToolEntry { name: "Agent", description: "Launch a sub-agent for complex tasks", keywords: &["agent", "subagent", "task", "parallel", "delegate"] },
ToolEntry { name: "Brief", description: "Send a formatted message to the user", keywords: &["brief", "message", "notify", "proactive", "status", "update"] },
ToolEntry { name: "Config", description: "Get or set Claude Code configuration", keywords: &["config", "settings", "model", "verbose", "permission", "configure"] },
ToolEntry { name: "SendMessage", description: "Send a message to another agent", keywords: &["send", "message", "agent", "broadcast", "communicate", "inbox"] },
ToolEntry { name: "Skill", description: "Execute a skill prompt template", keywords: &["skill", "command", "template", "prompt", "slash", "custom"] },
];
#[async_trait]
impl Tool for ToolSearchTool {
fn name(&self) -> &str { "ToolSearch" }
fn description(&self) -> &str {
"Search for available tools by name or keyword. Use 'select:ToolName' for direct \
lookup or provide keywords for fuzzy search. Returns matching tool names and their \
descriptions. Max 5 results by default."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::None }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Query: use 'select:ToolName' for direct selection, or keywords to search"
},
"max_results": {
"type": "number",
"description": "Maximum results to return (default: 5)"
}
},
"required": ["query"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: ToolSearchInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let query = params.query.trim();
let max = params.max_results.min(20);
// select: prefix — direct lookup
if let Some(names_str) = query.strip_prefix("select:").map(str::trim) {
let requested: Vec<&str> = names_str.split(',').map(str::trim).collect();
let mut found = Vec::new();
let mut missing = Vec::new();
for name in requested {
if let Some(entry) = TOOL_CATALOG.iter().find(|e| {
e.name.eq_ignore_ascii_case(name)
}) {
found.push(format!("{}: {}", entry.name, entry.description));
} else {
missing.push(name.to_string());
}
}
if found.is_empty() {
return ToolResult::success(format!(
"No matching tools found for: {}",
missing.join(", ")
));
}
let mut out = found.join("\n");
if !missing.is_empty() {
out.push_str(&format!("\n\nNot found: {}", missing.join(", ")));
}
return ToolResult::success(out);
}
// Keyword search with scoring
let q_lower = query.to_lowercase();
let terms: Vec<&str> = q_lower.split_whitespace().collect();
let mut scored: Vec<(usize, &ToolEntry)> = TOOL_CATALOG
.iter()
.filter_map(|entry| {
let mut score = 0usize;
let name_lower = entry.name.to_lowercase();
let desc_lower = entry.description.to_lowercase();
for term in &terms {
// Exact name match
if name_lower == *term {
score += 20;
} else if name_lower.contains(term) {
score += 10;
}
// Description match
if desc_lower.contains(term) {
score += 5;
}
// Keyword match
for &kw in entry.keywords {
if kw == *term {
score += 8;
} else if kw.contains(term) {
score += 3;
}
}
}
if score > 0 { Some((score, entry)) } else { None }
})
.collect();
scored.sort_by(|a, b| b.0.cmp(&a.0));
scored.truncate(max);
if scored.is_empty() {
return ToolResult::success(format!(
"No tools found matching '{}'. Try broader keywords or use 'select:ToolName'.",
query
));
}
let lines: Vec<String> = scored
.iter()
.map(|(_, e)| format!("{}: {}", e.name, e.description))
.collect();
ToolResult::success(format!(
"Tools matching '{}':\n\n{}\n\nTotal tools available: {}",
query,
lines.join("\n"),
TOOL_CATALOG.len()
))
}
}

View file

@ -0,0 +1,236 @@
// WebFetch tool: HTTP GET with basic HTML-to-text conversion.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct WebFetchTool;
#[derive(Debug, Deserialize)]
struct WebFetchInput {
url: String,
#[serde(default)]
prompt: Option<String>,
}
/// Naively strip HTML tags and decode common entities.
fn strip_html(html: &str) -> String {
let mut result = String::with_capacity(html.len());
let mut in_tag = false;
let mut in_script = false;
let mut in_style = false;
let lower = html.to_lowercase();
let chars: Vec<char> = html.chars().collect();
let lower_chars: Vec<char> = lower.chars().collect();
let len = chars.len();
let mut i = 0;
while i < len {
if !in_tag && chars[i] == '<' {
in_tag = true;
// Check for script/style open/close
let rest: String = lower_chars[i..].iter().take(20).collect();
if rest.starts_with("<script") {
in_script = true;
} else if rest.starts_with("</script") {
in_script = false;
} else if rest.starts_with("<style") {
in_style = true;
} else if rest.starts_with("</style") {
in_style = false;
}
// Block tags => newline
let block_tags = [
"<br", "<p ", "<p>", "</p>", "<div", "</div>", "<h1", "<h2", "<h3",
"<h4", "<h5", "<h6", "</h1", "</h2", "</h3", "</h4", "</h5", "</h6",
"<li", "</li", "<tr", "</tr", "<hr",
];
for tag in &block_tags {
if rest.starts_with(tag) {
result.push('\n');
break;
}
}
i += 1;
continue;
}
if in_tag {
if chars[i] == '>' {
in_tag = false;
}
i += 1;
continue;
}
if in_script || in_style {
i += 1;
continue;
}
// Decode basic entities
if chars[i] == '&' {
let rest: String = chars[i..].iter().take(10).collect();
if rest.starts_with("&amp;") {
result.push('&');
i += 5;
} else if rest.starts_with("&lt;") {
result.push('<');
i += 4;
} else if rest.starts_with("&gt;") {
result.push('>');
i += 4;
} else if rest.starts_with("&quot;") {
result.push('"');
i += 6;
} else if rest.starts_with("&#39;") || rest.starts_with("&apos;") {
result.push('\'');
i += if rest.starts_with("&#39;") { 5 } else { 6 };
} else if rest.starts_with("&nbsp;") {
result.push(' ');
i += 6;
} else {
result.push('&');
i += 1;
}
continue;
}
result.push(chars[i]);
i += 1;
}
// Collapse multiple blank lines
let mut collapsed = String::new();
let mut blank_count = 0;
for line in result.lines() {
let trimmed = line.trim();
if trimmed.is_empty() {
blank_count += 1;
if blank_count <= 2 {
collapsed.push('\n');
}
} else {
blank_count = 0;
collapsed.push_str(trimmed);
collapsed.push('\n');
}
}
collapsed.trim().to_string()
}
#[async_trait]
impl Tool for WebFetchTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_WEB_FETCH
}
fn description(&self) -> &str {
"Fetches a web page URL and returns its content as text. HTML is \
automatically converted to plain text. Use this for reading documentation, \
APIs, and other web resources."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::ReadOnly
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL to fetch"
},
"prompt": {
"type": "string",
"description": "Optional prompt for how to process the content"
}
},
"required": ["url"]
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: WebFetchInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
// Permission check
if let Err(e) = ctx.check_permission(
self.name(),
&format!("Fetch {}", params.url),
true, // read-only
) {
return ToolResult::error(e.to_string());
}
debug!(url = %params.url, "Fetching web page");
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(30))
.redirect(reqwest::redirect::Policy::limited(10))
.build();
let client = match client {
Ok(c) => c,
Err(e) => return ToolResult::error(format!("Failed to create HTTP client: {}", e)),
};
let resp = match client.get(&params.url)
.header("User-Agent", "Claude-Code/1.0")
.send()
.await
{
Ok(r) => r,
Err(e) => return ToolResult::error(format!("Failed to fetch {}: {}", params.url, e)),
};
let status = resp.status();
if !status.is_success() {
return ToolResult::error(format!(
"HTTP {} when fetching {}",
status, params.url
));
}
let content_type = resp
.headers()
.get("content-type")
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let body = match resp.text().await {
Ok(b) => b,
Err(e) => return ToolResult::error(format!("Failed to read response body: {}", e)),
};
// Convert HTML to text if applicable
let text = if content_type.contains("html") {
strip_html(&body)
} else {
body
};
// Truncate very long content
const MAX_LEN: usize = 100_000;
let text = if text.len() > MAX_LEN {
format!(
"{}\n\n... (truncated, {} total characters)",
&text[..MAX_LEN],
text.len()
)
} else {
text
};
ToolResult::success(text)
}
}

View file

@ -0,0 +1,227 @@
// WebSearch tool: search the web using Brave Search API or fallback to DuckDuckGo.
//
// Mirrors the TypeScript WebSearch tool behaviour:
// - Accepts a query string
// - Returns a list of results with title, url, and snippet
// - Falls back to DuckDuckGo if no search API key is configured
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use serde::Deserialize;
use serde_json::{json, Value};
use tracing::debug;
pub struct WebSearchTool;
#[derive(Debug, Deserialize)]
struct WebSearchInput {
query: String,
#[serde(default = "default_num_results")]
num_results: usize,
}
fn default_num_results() -> usize {
5
}
#[async_trait]
impl Tool for WebSearchTool {
fn name(&self) -> &str {
cc_core::constants::TOOL_NAME_WEB_SEARCH
}
fn description(&self) -> &str {
"Search the web for information. Returns a list of relevant web pages with \
titles, URLs, and snippets. Use this when you need current information \
not available in your training data, or when searching for documentation, \
examples, or news."
}
fn permission_level(&self) -> PermissionLevel {
PermissionLevel::ReadOnly
}
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query"
},
"num_results": {
"type": "number",
"description": "Number of results to return (default: 5, max: 10)"
}
},
"required": ["query"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: WebSearchInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let num_results = params.num_results.min(10).max(1);
debug!(query = %params.query, num_results, "Web search");
// Try Brave Search API first, then fall back to DuckDuckGo
if let Some(api_key) = std::env::var("BRAVE_SEARCH_API_KEY").ok().filter(|k| !k.is_empty()) {
search_brave(&params.query, num_results, &api_key).await
} else {
search_duckduckgo(&params.query, num_results).await
}
}
}
/// Search using the Brave Search API.
async fn search_brave(query: &str, num_results: usize, api_key: &str) -> ToolResult {
let client = reqwest::Client::new();
let url = format!(
"https://api.search.brave.com/res/v1/web/search?q={}&count={}",
urlencoding_simple(query),
num_results
);
let resp = match client
.get(&url)
.header("Accept", "application/json")
.header("Accept-Encoding", "gzip")
.header("X-Subscription-Token", api_key)
.send()
.await
{
Ok(r) => r,
Err(e) => return ToolResult::error(format!("Search request failed: {}", e)),
};
if !resp.status().is_success() {
let status = resp.status().as_u16();
return ToolResult::error(format!("Brave Search API returned status {}", status));
}
let data: Value = match resp.json().await {
Ok(v) => v,
Err(e) => return ToolResult::error(format!("Failed to parse response: {}", e)),
};
let results = format_brave_results(&data, num_results);
ToolResult::success(results)
}
fn format_brave_results(data: &Value, max: usize) -> String {
let mut output = String::new();
let web_results = data
.get("web")
.and_then(|w| w.get("results"))
.and_then(|r| r.as_array());
if let Some(items) = web_results {
for (i, item) in items.iter().take(max).enumerate() {
let title = item.get("title").and_then(|t| t.as_str()).unwrap_or("(No title)");
let url = item.get("url").and_then(|u| u.as_str()).unwrap_or("");
let snippet = item.get("description").and_then(|s| s.as_str()).unwrap_or("");
output.push_str(&format!("{}. **{}**\n URL: {}\n {}\n\n", i + 1, title, url, snippet));
}
}
if output.is_empty() {
"No results found.".to_string()
} else {
output
}
}
/// Fallback: DuckDuckGo Instant Answer API.
/// Note: this doesn't return full search results, only instant answers.
async fn search_duckduckgo(query: &str, num_results: usize) -> ToolResult {
let client = reqwest::Client::new();
let url = format!(
"https://api.duckduckgo.com/?q={}&format=json&no_html=1&skip_disambig=1",
urlencoding_simple(query)
);
let resp = match client
.get(&url)
.header("User-Agent", "Claude Code/1.0")
.send()
.await
{
Ok(r) => r,
Err(e) => return ToolResult::error(format!("Search request failed: {}", e)),
};
if !resp.status().is_success() {
let status = resp.status().as_u16();
return ToolResult::error(format!("DuckDuckGo API returned status {}", status));
}
let data: Value = match resp.json().await {
Ok(v) => v,
Err(e) => return ToolResult::error(format!("Failed to parse response: {}", e)),
};
let output = format_ddg_results(&data, num_results);
ToolResult::success(output)
}
fn format_ddg_results(data: &Value, max: usize) -> String {
let mut output = String::new();
let mut count = 0;
// Abstract (main answer)
if let Some(abstract_text) = data.get("Abstract").and_then(|a| a.as_str()) {
if !abstract_text.is_empty() {
let source = data.get("AbstractSource").and_then(|s| s.as_str()).unwrap_or("");
let url = data.get("AbstractURL").and_then(|u| u.as_str()).unwrap_or("");
output.push_str(&format!("**{}**\n{}\nURL: {}\n\n", source, abstract_text, url));
count += 1;
}
}
// Related topics
if let Some(topics) = data.get("RelatedTopics").and_then(|t| t.as_array()) {
for topic in topics.iter().take(max.saturating_sub(count)) {
if let Some(text) = topic.get("Text").and_then(|t| t.as_str()) {
if !text.is_empty() {
let url = topic.get("FirstURL").and_then(|u| u.as_str()).unwrap_or("");
output.push_str(&format!("- {}\n {}\n\n", text, url));
}
}
}
}
if output.is_empty() {
format!(
"No instant answer found for '{}'. Try using the Brave Search API \
by setting the BRAVE_SEARCH_API_KEY environment variable for full web search.",
data.get("QuerySearchQuery")
.and_then(|q| q.as_str())
.unwrap_or("your query")
)
} else {
output
}
}
/// Minimal percent-encoding for URL query parameters.
fn urlencoding_simple(s: &str) -> String {
let mut encoded = String::new();
for ch in s.chars() {
match ch {
'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '_' | '.' | '~' => {
encoded.push(ch);
}
' ' => encoded.push('+'),
_ => {
for byte in ch.to_string().as_bytes() {
encoded.push_str(&format!("%{:02X}", byte));
}
}
}
}
encoded
}

View file

@ -0,0 +1,351 @@
// Worktree tools: create and exit git worktrees for isolated work sessions.
//
// EnterWorktreeTool create a new git worktree with an optional branch name,
// switching the session's working directory to it.
// ExitWorktreeTool exit the current worktree, optionally removing it, and
// restore the original working directory.
//
// These tools mirror the TypeScript EnterWorktreeTool / ExitWorktreeTool.
use crate::{PermissionLevel, Tool, ToolContext, ToolResult};
use async_trait::async_trait;
use once_cell::sync::Lazy;
use serde::Deserialize;
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::debug;
// ---------------------------------------------------------------------------
// Session-level state: only one active worktree per session.
// ---------------------------------------------------------------------------
#[derive(Debug, Clone)]
pub struct WorktreeSession {
pub original_cwd: PathBuf,
pub worktree_path: PathBuf,
pub branch: Option<String>,
pub original_head: Option<String>,
}
static WORKTREE_SESSION: Lazy<Arc<RwLock<Option<WorktreeSession>>>> =
Lazy::new(|| Arc::new(RwLock::new(None)));
// ---------------------------------------------------------------------------
// EnterWorktreeTool
// ---------------------------------------------------------------------------
pub struct EnterWorktreeTool;
#[derive(Debug, Deserialize)]
struct EnterWorktreeInput {
/// Optional branch name. If omitted, a timestamped branch is created.
#[serde(default)]
branch: Option<String>,
/// Sub-path under the repo root where the worktree will be created.
/// Defaults to `.worktrees/<branch>`.
#[serde(default)]
path: Option<String>,
}
#[async_trait]
impl Tool for EnterWorktreeTool {
fn name(&self) -> &str { "EnterWorktree" }
fn description(&self) -> &str {
"Create a new git worktree and switch the session's working directory to it. \
This gives you an isolated environment to experiment or work on a feature \
without affecting the main working tree. \
Use ExitWorktree to return to the original directory."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::Write }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"branch": {
"type": "string",
"description": "Branch name to create. Defaults to a timestamped name like worktree-1234567890."
},
"path": {
"type": "string",
"description": "Optional path for the worktree directory. Defaults to .worktrees/<branch>."
}
}
})
}
async fn execute(&self, input: Value, ctx: &ToolContext) -> ToolResult {
let params: EnterWorktreeInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
// Check if already in a worktree session
{
let session = WORKTREE_SESSION.read().await;
if session.is_some() {
return ToolResult::error(
"Already in a worktree session. Call ExitWorktree first.".to_string(),
);
}
}
if let Err(e) = ctx.check_permission(
self.name(),
"Create a git worktree",
false,
) {
return ToolResult::error(e.to_string());
}
// Determine branch name
let ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let branch = params
.branch
.clone()
.unwrap_or_else(|| format!("worktree-{}", ts));
// Determine worktree path
let worktree_path = if let Some(p) = params.path {
ctx.working_dir.join(p)
} else {
ctx.working_dir.join(".worktrees").join(&branch)
};
// Get current HEAD for change tracking
let head = run_git(&ctx.working_dir, &["rev-parse", "HEAD"]).await;
let original_head = head.ok().map(|h| h.trim().to_string());
// Create the worktree
let worktree_str = worktree_path.to_string_lossy().to_string();
let result = run_git(
&ctx.working_dir,
&["worktree", "add", "-b", &branch, &worktree_str],
)
.await;
match result {
Err(e) => ToolResult::error(format!("Failed to create worktree: {}", e)),
Ok(_) => {
debug!(
branch = %branch,
path = %worktree_path.display(),
"Created worktree"
);
// Save session state
*WORKTREE_SESSION.write().await = Some(WorktreeSession {
original_cwd: ctx.working_dir.clone(),
worktree_path: worktree_path.clone(),
branch: Some(branch.clone()),
original_head,
});
ToolResult::success(format!(
"Created worktree at {} on branch '{}'.\n\
The working directory is now {}.\n\
Use ExitWorktree to return to {}.",
worktree_path.display(),
branch,
worktree_path.display(),
ctx.working_dir.display(),
))
.with_metadata(json!({
"worktree_path": worktree_path.to_string_lossy(),
"branch": branch,
"original_cwd": ctx.working_dir.to_string_lossy(),
}))
}
}
}
}
// ---------------------------------------------------------------------------
// ExitWorktreeTool
// ---------------------------------------------------------------------------
pub struct ExitWorktreeTool;
#[derive(Debug, Deserialize)]
struct ExitWorktreeInput {
/// "keep" = leave the worktree on disk; "remove" = delete it.
#[serde(default = "default_action")]
action: String,
/// Required if action=="remove" and there are uncommitted changes.
#[serde(default)]
discard_changes: bool,
}
fn default_action() -> String { "keep".to_string() }
#[async_trait]
impl Tool for ExitWorktreeTool {
fn name(&self) -> &str { "ExitWorktree" }
fn description(&self) -> &str {
"Exit the current worktree session created by EnterWorktree and restore the \
original working directory. Use action='keep' to preserve the worktree on \
disk, or action='remove' to delete it. Only operates on worktrees created \
by EnterWorktree in this session."
}
fn permission_level(&self) -> PermissionLevel { PermissionLevel::Write }
fn input_schema(&self) -> Value {
json!({
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["keep", "remove"],
"description": "\"keep\" leaves the worktree on disk; \"remove\" deletes it and its branch."
},
"discard_changes": {
"type": "boolean",
"description": "Set true when action=remove and the worktree has uncommitted/unmerged work to discard."
}
},
"required": ["action"]
})
}
async fn execute(&self, input: Value, _ctx: &ToolContext) -> ToolResult {
let params: ExitWorktreeInput = match serde_json::from_value(input) {
Ok(p) => p,
Err(e) => return ToolResult::error(format!("Invalid input: {}", e)),
};
let session_guard = WORKTREE_SESSION.read().await;
let session = match &*session_guard {
Some(s) => s.clone(),
None => {
return ToolResult::error(
"No-op: there is no active EnterWorktree session to exit. \
This tool only operates on worktrees created by EnterWorktree \
in the current session."
.to_string(),
);
}
};
drop(session_guard);
let worktree_str = session.worktree_path.to_string_lossy().to_string();
// If action is "remove", check for uncommitted changes
if params.action == "remove" && !params.discard_changes {
let status = run_git(&session.worktree_path, &["status", "--porcelain"]).await;
let changed_files = status
.as_deref()
.unwrap_or("")
.lines()
.filter(|l| !l.trim().is_empty())
.count();
let commit_count = if let Some(ref head) = session.original_head {
let rev = run_git(
&session.worktree_path,
&["rev-list", "--count", &format!("{}..HEAD", head)],
)
.await
.unwrap_or_default();
rev.trim().parse::<usize>().unwrap_or(0)
} else {
0
};
if changed_files > 0 || commit_count > 0 {
let mut parts = Vec::new();
if changed_files > 0 {
parts.push(format!("{} uncommitted file(s)", changed_files));
}
if commit_count > 0 {
parts.push(format!("{} commit(s) on the worktree branch", commit_count));
}
return ToolResult::error(format!(
"Worktree has {}. Removing will discard this work permanently. \
Confirm with the user, then re-invoke with discard_changes=true \
or use action=\"keep\" to preserve the worktree.",
parts.join(" and ")
));
}
}
// Clear session state
*WORKTREE_SESSION.write().await = None;
match params.action.as_str() {
"keep" => {
// Just remove the worktree from git's tracking list (prune),
// but keep the directory on disk.
let _ = run_git(
&session.original_cwd,
&["worktree", "lock", "--reason", "kept by ExitWorktree", &worktree_str],
)
.await;
ToolResult::success(format!(
"Exited worktree. Work preserved at {} on branch {}. \
Session is now back in {}.",
session.worktree_path.display(),
session.branch.as_deref().unwrap_or("(unknown)"),
session.original_cwd.display(),
))
}
"remove" => {
// Remove the worktree
let _ = run_git(
&session.original_cwd,
&["worktree", "remove", "--force", &worktree_str],
)
.await;
// Delete the branch if we created it
if let Some(ref branch) = session.branch {
let _ = run_git(
&session.original_cwd,
&["branch", "-D", branch],
)
.await;
}
ToolResult::success(format!(
"Exited and removed worktree at {}. \
Session is now back in {}.",
session.worktree_path.display(),
session.original_cwd.display(),
))
}
other => ToolResult::error(format!(
"Unknown action '{}'. Use 'keep' or 'remove'.",
other
)),
}
}
}
// ---------------------------------------------------------------------------
// Helper
// ---------------------------------------------------------------------------
async fn run_git(cwd: &std::path::Path, args: &[&str]) -> Result<String, String> {
let output = tokio::process::Command::new("git")
.args(args)
.current_dir(cwd)
.output()
.await
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}