初始化提交
Some checks failed
CI / Check / macos-latest (push) Has been cancelled
CI / Check / ubuntu-latest (push) Has been cancelled
CI / Check / windows-latest (push) Has been cancelled
CI / Test / macos-latest (push) Has been cancelled
CI / Test / ubuntu-latest (push) Has been cancelled
CI / Test / windows-latest (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Format (push) Has been cancelled
CI / Security Audit (push) Has been cancelled
CI / Secrets Scan (push) Has been cancelled
CI / Install Script Smoke Test (push) Has been cancelled

This commit is contained in:
iven
2026-03-01 16:24:24 +08:00
commit 92e5def702
492 changed files with 211343 additions and 0 deletions

View File

@@ -0,0 +1,34 @@
[package]
name = "openfang-extensions"
version.workspace = true
edition.workspace = true
license.workspace = true
description = "Extension & integration system for OpenFang — one-click MCP server setup, credential vault, OAuth2 PKCE"
[dependencies]
openfang-types = { path = "../openfang-types" }
serde = { workspace = true }
serde_json = { workspace = true }
toml = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }
chrono = { workspace = true }
dashmap = { workspace = true }
tokio = { workspace = true }
reqwest = { workspace = true }
axum = { workspace = true }
zeroize = { workspace = true }
rand = { workspace = true }
sha2 = { workspace = true }
dirs = { workspace = true }
url = { workspace = true }
base64 = { workspace = true }
# Encryption
aes-gcm = { workspace = true }
argon2 = { workspace = true }
[dev-dependencies]
tokio-test = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,42 @@
id = "aws"
name = "AWS"
description = "Manage Amazon Web Services resources including S3, EC2, Lambda, and more through the MCP server"
category = "cloud"
icon = "☁️"
tags = ["cloud", "amazon", "infrastructure", "s3", "ec2", "lambda", "devops"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-aws"]
[[required_env]]
name = "AWS_ACCESS_KEY_ID"
label = "AWS Access Key ID"
help = "The access key ID from your AWS IAM credentials"
is_secret = true
get_url = "https://console.aws.amazon.com/iam/home#/security_credentials"
[[required_env]]
name = "AWS_SECRET_ACCESS_KEY"
label = "AWS Secret Access Key"
help = "The secret access key paired with your access key ID"
is_secret = true
get_url = "https://console.aws.amazon.com/iam/home#/security_credentials"
[[required_env]]
name = "AWS_REGION"
label = "AWS Region"
help = "The default AWS region to use (e.g., us-east-1, eu-west-1)"
is_secret = false
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to the AWS IAM Console (https://console.aws.amazon.com/iam/) and create or select an IAM user with programmatic access.
2. Generate an access key pair and note down the Access Key ID and Secret Access Key.
3. Paste both credentials and your preferred AWS region (default: us-east-1) into the fields above.
"""

View File

@@ -0,0 +1,49 @@
id = "azure-mcp"
name = "Microsoft Azure"
description = "Manage Azure resources including VMs, Storage, and App Services through the MCP server"
category = "cloud"
icon = "🔷"
tags = ["cloud", "microsoft", "infrastructure", "azure", "devops", "enterprise"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-azure"]
[[required_env]]
name = "AZURE_SUBSCRIPTION_ID"
label = "Azure Subscription ID"
help = "Your Azure subscription ID (found in the Azure Portal under Subscriptions)"
is_secret = false
get_url = "https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade"
[[required_env]]
name = "AZURE_TENANT_ID"
label = "Azure Tenant ID"
help = "Your Azure Active Directory tenant ID"
is_secret = false
get_url = "https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview"
[[required_env]]
name = "AZURE_CLIENT_ID"
label = "Azure Client ID"
help = "The application (client) ID of your Azure AD app registration"
is_secret = false
get_url = "https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade"
[[required_env]]
name = "AZURE_CLIENT_SECRET"
label = "Azure Client Secret"
help = "A client secret generated for your Azure AD app registration"
is_secret = true
get_url = "https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. In the Azure Portal, register an application under Azure Active Directory > App registrations and note the Client ID and Tenant ID.
2. Create a client secret under Certificates & Secrets for the registered application.
3. Assign the appropriate RBAC roles to the application on your subscription, then paste all four values into the fields above.
"""

View File

@@ -0,0 +1,35 @@
id = "bitbucket"
name = "Bitbucket"
description = "Access Bitbucket repositories, pull requests, and pipelines through the MCP server"
category = "devtools"
icon = "🪣"
tags = ["git", "vcs", "code", "pull-requests", "ci", "atlassian"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-bitbucket"]
[[required_env]]
name = "BITBUCKET_USERNAME"
label = "Bitbucket Username"
help = "Your Bitbucket Cloud username (not email)"
is_secret = false
get_url = "https://bitbucket.org/account/settings/"
[[required_env]]
name = "BITBUCKET_APP_PASSWORD"
label = "Bitbucket App Password"
help = "An app password with repository and pull request permissions"
is_secret = true
get_url = "https://bitbucket.org/account/settings/app-passwords/"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to Bitbucket > Personal Settings > App passwords (https://bitbucket.org/account/settings/app-passwords/).
2. Create an app password with 'Repositories: Read/Write' and 'Pull requests: Read/Write' permissions.
3. Enter your Bitbucket username and paste the app password into the fields above.
"""

View File

@@ -0,0 +1,28 @@
id = "brave-search"
name = "Brave Search"
description = "Perform web searches using the Brave Search API through the MCP server"
category = "ai"
icon = "🦁"
tags = ["search", "web", "brave", "api", "information-retrieval"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-brave-search"]
[[required_env]]
name = "BRAVE_API_KEY"
label = "Brave Search API Key"
help = "An API key from the Brave Search API dashboard"
is_secret = true
get_url = "https://brave.com/search/api/"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to https://brave.com/search/api/ and sign up for a Brave Search API plan (free tier available).
2. Generate an API key from your Brave Search API dashboard.
3. Paste the API key into the BRAVE_API_KEY field above.
"""

View File

@@ -0,0 +1,28 @@
id = "discord-mcp"
name = "Discord"
description = "Access Discord servers, channels, and messages through the MCP server"
category = "communication"
icon = "🎮"
tags = ["chat", "messaging", "community", "gaming", "voice"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-discord"]
[[required_env]]
name = "DISCORD_BOT_TOKEN"
label = "Discord Bot Token"
help = "A bot token from the Discord Developer Portal"
is_secret = true
get_url = "https://discord.com/developers/applications"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to the Discord Developer Portal (https://discord.com/developers/applications) and create a new application.
2. Navigate to the 'Bot' section, click 'Add Bot', and copy the bot token.
3. Invite the bot to your server using the OAuth2 URL generator with the required permissions, then paste the token into the DISCORD_BOT_TOKEN field above.
"""

View File

@@ -0,0 +1,28 @@
id = "dropbox"
name = "Dropbox"
description = "Access and manage Dropbox files and folders through the MCP server"
category = "productivity"
icon = "📦"
tags = ["files", "storage", "cloud-storage", "sync", "sharing"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-dropbox"]
[[required_env]]
name = "DROPBOX_ACCESS_TOKEN"
label = "Dropbox Access Token"
help = "A short-lived or long-lived access token from the Dropbox App Console"
is_secret = true
get_url = "https://www.dropbox.com/developers/apps"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to the Dropbox App Console (https://www.dropbox.com/developers/apps) and create a new app or select an existing one.
2. Under the 'OAuth 2' section, generate an access token with the required permissions.
3. Paste the access token into the DROPBOX_ACCESS_TOKEN field above.
"""

View File

@@ -0,0 +1,35 @@
id = "elasticsearch"
name = "Elasticsearch"
description = "Search and manage Elasticsearch indices and documents through the MCP server"
category = "data"
icon = "🔍"
tags = ["search", "database", "indexing", "analytics", "full-text"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-elasticsearch"]
[[required_env]]
name = "ELASTICSEARCH_URL"
label = "Elasticsearch URL"
help = "The base URL of your Elasticsearch cluster (e.g., https://my-cluster.es.us-east-1.aws.found.io:9243)"
is_secret = false
get_url = ""
[[required_env]]
name = "ELASTICSEARCH_API_KEY"
label = "Elasticsearch API Key"
help = "An API key with read/write permissions for the target indices"
is_secret = true
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Obtain your Elasticsearch cluster URL from your Elastic Cloud dashboard or self-hosted instance configuration.
2. Create an API key in Kibana (Stack Management > API Keys) with appropriate index permissions.
3. Paste the cluster URL and API key into the fields above.
"""

View File

@@ -0,0 +1,28 @@
id = "exa-search"
name = "Exa Search"
description = "Perform AI-powered neural searches and retrieve web content through the Exa MCP server"
category = "ai"
icon = "🔎"
tags = ["search", "web", "ai", "neural", "semantic", "information-retrieval"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-exa"]
[[required_env]]
name = "EXA_API_KEY"
label = "Exa API Key"
help = "An API key from the Exa dashboard"
is_secret = true
get_url = "https://dashboard.exa.ai/api-keys"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to https://dashboard.exa.ai/ and create an account or sign in.
2. Navigate to API Keys (https://dashboard.exa.ai/api-keys) and generate a new key.
3. Paste the API key into the EXA_API_KEY field above.
"""

View File

@@ -0,0 +1,28 @@
id = "gcp-mcp"
name = "Google Cloud Platform"
description = "Manage GCP resources including Compute Engine, Cloud Storage, and BigQuery through the MCP server"
category = "cloud"
icon = "🌐"
tags = ["cloud", "google", "infrastructure", "gce", "gcs", "bigquery", "devops"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-gcp"]
[[required_env]]
name = "GOOGLE_APPLICATION_CREDENTIALS"
label = "Service Account Key Path"
help = "Absolute path to a GCP service account JSON key file"
is_secret = false
get_url = "https://console.cloud.google.com/iam-admin/serviceaccounts"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to the GCP Console > IAM & Admin > Service Accounts (https://console.cloud.google.com/iam-admin/serviceaccounts) and create a new service account with the necessary roles.
2. Generate a JSON key file for the service account and save it to a secure location on your filesystem.
3. Enter the absolute path to the JSON key file in the GOOGLE_APPLICATION_CREDENTIALS field above.
"""

View File

@@ -0,0 +1,34 @@
id = "github"
name = "GitHub"
description = "Access GitHub repositories, issues, pull requests, and organizations through the official MCP server"
category = "devtools"
icon = "🐙"
tags = ["git", "vcs", "code", "issues", "pull-requests", "ci"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-github"]
[[required_env]]
name = "GITHUB_PERSONAL_ACCESS_TOKEN"
label = "GitHub Personal Access Token"
help = "A fine-grained or classic PAT with repo and read:org scopes"
is_secret = true
get_url = "https://github.com/settings/tokens"
[oauth]
provider = "github"
scopes = ["repo", "read:org"]
auth_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to https://github.com/settings/tokens and create a Personal Access Token (classic or fine-grained) with 'repo' and 'read:org' scopes.
2. Paste the token into the GITHUB_PERSONAL_ACCESS_TOKEN field above.
3. Alternatively, use the OAuth flow to authorize OpenFang directly with your GitHub account.
"""

View File

@@ -0,0 +1,28 @@
id = "gitlab"
name = "GitLab"
description = "Access GitLab projects, merge requests, issues, and CI/CD pipelines through the MCP server"
category = "devtools"
icon = "🦊"
tags = ["git", "vcs", "code", "merge-requests", "ci", "devops"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-gitlab"]
[[required_env]]
name = "GITLAB_PERSONAL_ACCESS_TOKEN"
label = "GitLab Personal Access Token"
help = "A personal access token with api scope from your GitLab instance"
is_secret = true
get_url = "https://gitlab.com/-/user_settings/personal_access_tokens"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Navigate to GitLab > User Settings > Access Tokens (https://gitlab.com/-/user_settings/personal_access_tokens).
2. Create a new personal access token with the 'api' scope and an appropriate expiration date.
3. Paste the token into the GITLAB_PERSONAL_ACCESS_TOKEN field above.
"""

View File

@@ -0,0 +1,27 @@
id = "gmail"
name = "Gmail"
description = "Read, send, and manage Gmail messages and drafts through the Anthropic MCP server"
category = "productivity"
icon = "📧"
tags = ["email", "google", "messaging", "inbox", "communication"]
[transport]
type = "stdio"
command = "npx"
args = ["@anthropic/server-gmail"]
[oauth]
provider = "google"
scopes = ["https://www.googleapis.com/auth/gmail.modify"]
auth_url = "https://accounts.google.com/o/oauth2/v2/auth"
token_url = "https://oauth2.googleapis.com/token"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Click 'Connect' to initiate the OAuth flow with your Google account.
2. Grant OpenFang permission to read and modify your Gmail messages when prompted.
3. The connection will be established automatically after authorization.
"""

View File

@@ -0,0 +1,27 @@
id = "google-calendar"
name = "Google Calendar"
description = "Manage Google Calendar events, schedules, and availability through the Anthropic MCP server"
category = "productivity"
icon = "📅"
tags = ["calendar", "scheduling", "google", "events", "meetings"]
[transport]
type = "stdio"
command = "npx"
args = ["@anthropic/server-google-calendar"]
[oauth]
provider = "google"
scopes = ["https://www.googleapis.com/auth/calendar"]
auth_url = "https://accounts.google.com/o/oauth2/v2/auth"
token_url = "https://oauth2.googleapis.com/token"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Click 'Connect' to initiate the OAuth flow with your Google account.
2. Grant OpenFang access to your Google Calendar when prompted.
3. The connection will be established automatically after authorization.
"""

View File

@@ -0,0 +1,27 @@
id = "google-drive"
name = "Google Drive"
description = "Browse, search, and read files from Google Drive through the Anthropic MCP server"
category = "productivity"
icon = "📁"
tags = ["files", "storage", "google", "documents", "cloud-storage"]
[transport]
type = "stdio"
command = "npx"
args = ["@anthropic/server-google-drive"]
[oauth]
provider = "google"
scopes = ["https://www.googleapis.com/auth/drive.readonly"]
auth_url = "https://accounts.google.com/o/oauth2/v2/auth"
token_url = "https://oauth2.googleapis.com/token"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Click 'Connect' to initiate the OAuth flow with your Google account.
2. Grant OpenFang read-only access to your Google Drive files when prompted.
3. The connection will be established automatically after authorization.
"""

View File

@@ -0,0 +1,42 @@
id = "jira"
name = "Jira"
description = "Access Jira issues, projects, boards, and sprints through the Atlassian MCP server"
category = "devtools"
icon = "📋"
tags = ["project-management", "issues", "agile", "atlassian", "tracking"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-atlassian"]
[[required_env]]
name = "JIRA_API_TOKEN"
label = "Jira API Token"
help = "An API token generated from your Atlassian account"
is_secret = true
get_url = "https://id.atlassian.com/manage-profile/security/api-tokens"
[[required_env]]
name = "JIRA_INSTANCE_URL"
label = "Jira Instance URL"
help = "Your Jira Cloud instance URL (e.g., https://yourcompany.atlassian.net)"
is_secret = false
get_url = ""
[[required_env]]
name = "JIRA_USER_EMAIL"
label = "Jira User Email"
help = "The email address associated with your Atlassian account"
is_secret = false
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to https://id.atlassian.com/manage-profile/security/api-tokens and create a new API token.
2. Enter your Jira Cloud instance URL (e.g., https://yourcompany.atlassian.net) and the email linked to your Atlassian account.
3. Paste the API token into the JIRA_API_TOKEN field above.
"""

View File

@@ -0,0 +1,28 @@
id = "linear"
name = "Linear"
description = "Manage Linear issues, projects, cycles, and teams through the MCP server"
category = "devtools"
icon = "📐"
tags = ["project-management", "issues", "agile", "tracking", "sprint"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-linear"]
[[required_env]]
name = "LINEAR_API_KEY"
label = "Linear API Key"
help = "A personal API key from your Linear account settings"
is_secret = true
get_url = "https://linear.app/settings/api"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Open Linear and go to Settings > API (https://linear.app/settings/api).
2. Click 'Create key' to generate a new personal API key.
3. Paste the key into the LINEAR_API_KEY field above.
"""

View File

@@ -0,0 +1,28 @@
id = "mongodb"
name = "MongoDB"
description = "Query and manage MongoDB databases and collections through the MCP server"
category = "data"
icon = "🍃"
tags = ["database", "nosql", "document", "mongo", "queries"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-mongodb"]
[[required_env]]
name = "MONGODB_URI"
label = "MongoDB Connection URI"
help = "A full MongoDB connection string (e.g., mongodb+srv://user:password@cluster.mongodb.net/dbname)"
is_secret = true
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Obtain your MongoDB connection URI from MongoDB Atlas (Clusters > Connect > Drivers) or your self-hosted instance.
2. Ensure the database user has the necessary read/write permissions for the collections you want to access.
3. Paste the full connection URI into the MONGODB_URI field above.
"""

View File

@@ -0,0 +1,28 @@
id = "notion"
name = "Notion"
description = "Access and manage Notion pages, databases, and blocks through the MCP server"
category = "productivity"
icon = "📝"
tags = ["notes", "wiki", "knowledge-base", "documentation", "databases"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-notion"]
[[required_env]]
name = "NOTION_API_KEY"
label = "Notion Integration Token"
help = "An internal integration token created in your Notion workspace settings"
is_secret = true
get_url = "https://www.notion.so/my-integrations"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to https://www.notion.so/my-integrations and click 'New integration'.
2. Give it a name, select your workspace, and grant the required capabilities (Read/Update/Insert content).
3. Copy the Internal Integration Token and paste it into the NOTION_API_KEY field above. Then share relevant pages with the integration in Notion.
"""

View File

@@ -0,0 +1,28 @@
id = "postgresql"
name = "PostgreSQL"
description = "Query and manage PostgreSQL databases through the MCP server"
category = "data"
icon = "🐘"
tags = ["database", "sql", "relational", "postgres", "queries"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-postgres"]
[[required_env]]
name = "POSTGRES_CONNECTION_STRING"
label = "PostgreSQL Connection String"
help = "A full connection URI (e.g., postgresql://user:password@host:5432/dbname)"
is_secret = true
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Obtain your PostgreSQL connection string in the format: postgresql://user:password@host:5432/dbname.
2. Ensure the database user has the necessary read/write permissions for the tables you want to access.
3. Paste the full connection string into the POSTGRES_CONNECTION_STRING field above.
"""

View File

@@ -0,0 +1,28 @@
id = "redis"
name = "Redis"
description = "Access and manage Redis key-value stores through the MCP server"
category = "data"
icon = "🔴"
tags = ["database", "cache", "key-value", "in-memory", "nosql"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-redis"]
[[required_env]]
name = "REDIS_URL"
label = "Redis Connection URL"
help = "A Redis connection URL (e.g., redis://user:password@host:6379/0)"
is_secret = true
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Obtain your Redis connection URL from your Redis hosting provider or local instance (e.g., redis://localhost:6379/0).
2. If authentication is required, include the password in the URL: redis://user:password@host:6379/0.
3. Paste the full connection URL into the REDIS_URL field above.
"""

View File

@@ -0,0 +1,35 @@
id = "sentry"
name = "Sentry"
description = "Monitor and manage Sentry error tracking, issues, and releases through the MCP server"
category = "devtools"
icon = "🐛"
tags = ["monitoring", "errors", "debugging", "observability", "apm"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-sentry"]
[[required_env]]
name = "SENTRY_AUTH_TOKEN"
label = "Sentry Auth Token"
help = "An authentication token with project:read and event:read scopes"
is_secret = true
get_url = "https://sentry.io/settings/account/api/auth-tokens/"
[[required_env]]
name = "SENTRY_ORG_SLUG"
label = "Sentry Organization Slug"
help = "Your Sentry organization slug (found in Settings > General)"
is_secret = false
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to Sentry > Settings > Auth Tokens (https://sentry.io/settings/account/api/auth-tokens/) and create a new token with 'project:read' and 'event:read' scopes.
2. Find your organization slug in Sentry > Settings > General Settings.
3. Paste the auth token and organization slug into the fields above.
"""

View File

@@ -0,0 +1,41 @@
id = "slack"
name = "Slack"
description = "Access Slack channels, messages, and users through the MCP server"
category = "communication"
icon = "💬"
tags = ["chat", "messaging", "team", "channels", "collaboration"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-slack"]
[[required_env]]
name = "SLACK_BOT_TOKEN"
label = "Slack Bot Token"
help = "A bot user OAuth token starting with xoxb-"
is_secret = true
get_url = "https://api.slack.com/apps"
[[required_env]]
name = "SLACK_TEAM_ID"
label = "Slack Team ID"
help = "Your Slack workspace team ID (found in workspace settings or URL)"
is_secret = false
get_url = ""
[oauth]
provider = "slack"
scopes = ["channels:read", "chat:write", "users:read"]
auth_url = "https://slack.com/oauth/v2/authorize"
token_url = "https://slack.com/api/oauth.v2.access"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Go to https://api.slack.com/apps and create a new Slack app (or use an existing one). Add the 'channels:read', 'chat:write', and 'users:read' bot token scopes.
2. Install the app to your workspace and copy the Bot User OAuth Token (starts with xoxb-).
3. Paste the bot token and your workspace Team ID into the fields above, or use the OAuth flow to authorize directly.
"""

View File

@@ -0,0 +1,28 @@
id = "sqlite-mcp"
name = "SQLite"
description = "Query and manage local SQLite databases through the MCP server"
category = "data"
icon = "💾"
tags = ["database", "sql", "relational", "sqlite", "local", "embedded"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-sqlite"]
[[required_env]]
name = "SQLITE_DB_PATH"
label = "SQLite Database Path"
help = "Absolute path to the SQLite database file (e.g., /home/user/data/mydb.sqlite)"
is_secret = false
get_url = ""
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Locate the SQLite database file you want to connect to on your local filesystem.
2. Enter the absolute path to the database file in the SQLITE_DB_PATH field above.
3. Ensure the file has appropriate read/write permissions for the OpenFang process.
"""

View File

@@ -0,0 +1,27 @@
id = "teams-mcp"
name = "Microsoft Teams"
description = "Access Microsoft Teams channels, chats, and messages through the MCP server"
category = "communication"
icon = "👥"
tags = ["chat", "messaging", "microsoft", "enterprise", "collaboration"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-teams"]
[oauth]
provider = "microsoft"
scopes = ["Team.ReadBasic.All", "Chat.ReadWrite"]
auth_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Click 'Connect' to initiate the OAuth flow with your Microsoft account.
2. Sign in with your Microsoft 365 account and grant OpenFang permission to read teams and read/write chats.
3. The connection will be established automatically after authorization.
"""

View File

@@ -0,0 +1,28 @@
id = "todoist"
name = "Todoist"
description = "Manage Todoist tasks, projects, and labels through the MCP server"
category = "productivity"
icon = "✅"
tags = ["tasks", "todo", "project-management", "productivity", "gtd"]
[transport]
type = "stdio"
command = "npx"
args = ["@modelcontextprotocol/server-todoist"]
[[required_env]]
name = "TODOIST_API_KEY"
label = "Todoist API Token"
help = "Your personal API token from Todoist settings"
is_secret = true
get_url = "https://todoist.com/prefs/integrations"
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Open Todoist and go to Settings > Integrations > Developer (https://todoist.com/prefs/integrations).
2. Copy your API token from the 'API token' section.
3. Paste the token into the TODOIST_API_KEY field above.
"""

View File

@@ -0,0 +1,173 @@
//! Compile-time embedded integration templates.
//!
//! All 25 integration TOML files are baked into the binary via `include_str!()`,
//! ensuring they ship with every OpenFang build with zero filesystem dependencies.
/// Returns all bundled integration templates as `(id, TOML content)` pairs.
pub fn bundled_integrations() -> Vec<(&'static str, &'static str)> {
vec![
// ── DevTools (6) ────────────────────────────────────────────────────
("github", include_str!("../integrations/github.toml")),
("gitlab", include_str!("../integrations/gitlab.toml")),
("linear", include_str!("../integrations/linear.toml")),
("jira", include_str!("../integrations/jira.toml")),
("bitbucket", include_str!("../integrations/bitbucket.toml")),
("sentry", include_str!("../integrations/sentry.toml")),
// ── Productivity (6) ────────────────────────────────────────────────
(
"google-calendar",
include_str!("../integrations/google-calendar.toml"),
),
("gmail", include_str!("../integrations/gmail.toml")),
("notion", include_str!("../integrations/notion.toml")),
("todoist", include_str!("../integrations/todoist.toml")),
(
"google-drive",
include_str!("../integrations/google-drive.toml"),
),
("dropbox", include_str!("../integrations/dropbox.toml")),
// ── Communication (3) ───────────────────────────────────────────────
("slack", include_str!("../integrations/slack.toml")),
(
"discord-mcp",
include_str!("../integrations/discord-mcp.toml"),
),
("teams-mcp", include_str!("../integrations/teams-mcp.toml")),
// ── Data (5) ────────────────────────────────────────────────────────
(
"postgresql",
include_str!("../integrations/postgresql.toml"),
),
(
"sqlite-mcp",
include_str!("../integrations/sqlite-mcp.toml"),
),
("mongodb", include_str!("../integrations/mongodb.toml")),
("redis", include_str!("../integrations/redis.toml")),
(
"elasticsearch",
include_str!("../integrations/elasticsearch.toml"),
),
// ── Cloud (3) ───────────────────────────────────────────────────────
("aws", include_str!("../integrations/aws.toml")),
("gcp-mcp", include_str!("../integrations/gcp-mcp.toml")),
("azure-mcp", include_str!("../integrations/azure-mcp.toml")),
// ── AI & Search (2) ─────────────────────────────────────────────────
(
"brave-search",
include_str!("../integrations/brave-search.toml"),
),
(
"exa-search",
include_str!("../integrations/exa-search.toml"),
),
]
}
#[cfg(test)]
mod tests {
use super::*;
use crate::IntegrationTemplate;
#[test]
fn bundled_count() {
assert_eq!(bundled_integrations().len(), 25);
}
#[test]
fn all_bundled_parse() {
for (id, content) in bundled_integrations() {
let t: IntegrationTemplate = toml::from_str(content)
.unwrap_or_else(|e| panic!("Failed to parse '{}': {}", id, e));
assert_eq!(t.id, id);
assert!(!t.name.is_empty());
assert!(!t.description.is_empty());
}
}
#[test]
fn category_counts() {
let templates: Vec<IntegrationTemplate> = bundled_integrations()
.iter()
.map(|(_, c)| toml::from_str(c).unwrap())
.collect();
let devtools = templates
.iter()
.filter(|t| t.category == crate::IntegrationCategory::DevTools)
.count();
let productivity = templates
.iter()
.filter(|t| t.category == crate::IntegrationCategory::Productivity)
.count();
let communication = templates
.iter()
.filter(|t| t.category == crate::IntegrationCategory::Communication)
.count();
let data = templates
.iter()
.filter(|t| t.category == crate::IntegrationCategory::Data)
.count();
let cloud = templates
.iter()
.filter(|t| t.category == crate::IntegrationCategory::Cloud)
.count();
let ai = templates
.iter()
.filter(|t| t.category == crate::IntegrationCategory::AI)
.count();
assert_eq!(devtools, 6);
assert_eq!(productivity, 6);
assert_eq!(communication, 3);
assert_eq!(data, 5);
assert_eq!(cloud, 3);
assert_eq!(ai, 2);
}
#[test]
fn no_duplicate_ids() {
let integrations = bundled_integrations();
let mut seen = std::collections::HashSet::new();
for (id, _) in &integrations {
assert!(seen.insert(id), "Duplicate integration id: {}", id);
}
}
#[test]
fn all_have_transport() {
for (id, content) in bundled_integrations() {
let t: IntegrationTemplate = toml::from_str(content)
.unwrap_or_else(|e| panic!("Failed to parse '{}': {}", id, e));
// All bundled integrations use stdio transport via npx
match &t.transport {
crate::McpTransportTemplate::Stdio { command, args } => {
assert_eq!(command, "npx", "{} should use npx", id);
assert!(!args.is_empty(), "{} should have args", id);
}
crate::McpTransportTemplate::Sse { .. } => {
panic!("{} unexpectedly uses SSE transport", id);
}
}
}
}
#[test]
fn oauth_integrations() {
let templates: Vec<(String, IntegrationTemplate)> = bundled_integrations()
.iter()
.map(|(id, c)| (id.to_string(), toml::from_str(c).unwrap()))
.collect();
let oauth_ids: Vec<&str> = templates
.iter()
.filter(|(_, t)| t.oauth.is_some())
.map(|(id, _)| id.as_str())
.collect();
// Expected OAuth integrations: github, google-calendar, gmail, google-drive, slack, teams-mcp
assert!(oauth_ids.contains(&"github"));
assert!(oauth_ids.contains(&"google-calendar"));
assert!(oauth_ids.contains(&"gmail"));
assert!(oauth_ids.contains(&"google-drive"));
assert!(oauth_ids.contains(&"slack"));
assert!(oauth_ids.contains(&"teams-mcp"));
assert_eq!(oauth_ids.len(), 6);
}
}

View File

@@ -0,0 +1,254 @@
//! Credential resolution chain — resolves secrets from multiple sources.
//!
//! Resolution order:
//! 1. Encrypted vault (`~/.openfang/vault.enc`)
//! 2. Dotenv file (`~/.openfang/.env`)
//! 3. Process environment variable
//! 4. Interactive prompt (CLI only, when `interactive` is true)
use crate::vault::CredentialVault;
use crate::ExtensionResult;
use std::collections::HashMap;
use std::path::Path;
use tracing::debug;
use zeroize::Zeroizing;
/// Credential resolver — tries multiple sources in priority order.
pub struct CredentialResolver {
/// Reference to the credential vault.
vault: Option<CredentialVault>,
/// Dotenv entries (loaded from `~/.openfang/.env`).
dotenv: HashMap<String, String>,
/// Whether to prompt interactively as a last resort.
interactive: bool,
}
impl CredentialResolver {
/// Create a resolver with optional vault and dotenv path.
pub fn new(vault: Option<CredentialVault>, dotenv_path: Option<&Path>) -> Self {
let dotenv = if let Some(path) = dotenv_path {
load_dotenv(path).unwrap_or_default()
} else {
HashMap::new()
};
Self {
vault,
dotenv,
interactive: false,
}
}
/// Enable interactive prompting as a last-resort source.
pub fn with_interactive(mut self, interactive: bool) -> Self {
self.interactive = interactive;
self
}
/// Resolve a credential by key, trying all sources in order.
pub fn resolve(&self, key: &str) -> Option<Zeroizing<String>> {
// 1. Vault
if let Some(ref vault) = self.vault {
if vault.is_unlocked() {
if let Some(val) = vault.get(key) {
debug!("Credential '{}' resolved from vault", key);
return Some(val);
}
}
}
// 2. Dotenv file
if let Some(val) = self.dotenv.get(key) {
debug!("Credential '{}' resolved from .env", key);
return Some(Zeroizing::new(val.clone()));
}
// 3. Environment variable
if let Ok(val) = std::env::var(key) {
debug!("Credential '{}' resolved from env var", key);
return Some(Zeroizing::new(val));
}
// 4. Interactive prompt (CLI only)
if self.interactive {
if let Some(val) = prompt_secret(key) {
debug!("Credential '{}' resolved from interactive prompt", key);
return Some(val);
}
}
None
}
/// Check if a credential is available (without prompting).
pub fn has_credential(&self, key: &str) -> bool {
// Check vault
if let Some(ref vault) = self.vault {
if vault.is_unlocked() && vault.get(key).is_some() {
return true;
}
}
// Check dotenv
if self.dotenv.contains_key(key) {
return true;
}
// Check env
std::env::var(key).is_ok()
}
/// Resolve all required credentials for an integration.
/// Returns a map of env_var_name -> value for all resolved credentials.
pub fn resolve_all(&self, keys: &[&str]) -> HashMap<String, Zeroizing<String>> {
let mut result = HashMap::new();
for key in keys {
if let Some(val) = self.resolve(key) {
result.insert(key.to_string(), val);
}
}
result
}
/// Check which credentials are missing.
pub fn missing_credentials(&self, keys: &[&str]) -> Vec<String> {
keys.iter()
.filter(|k| !self.has_credential(k))
.map(|k| k.to_string())
.collect()
}
/// Store a credential in the vault (if available).
pub fn store_in_vault(&mut self, key: &str, value: Zeroizing<String>) -> ExtensionResult<()> {
if let Some(ref mut vault) = self.vault {
vault.set(key.to_string(), value)?;
Ok(())
} else {
Err(crate::ExtensionError::Vault(
"No vault configured".to_string(),
))
}
}
}
/// Load a dotenv file into a HashMap.
fn load_dotenv(path: &Path) -> Result<HashMap<String, String>, std::io::Error> {
if !path.exists() {
return Ok(HashMap::new());
}
let content = std::fs::read_to_string(path)?;
let mut map = HashMap::new();
for line in content.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
if let Some((key, value)) = line.split_once('=') {
let key = key.trim();
let mut value = value.trim().to_string();
// Strip surrounding quotes
if (value.starts_with('"') && value.ends_with('"'))
|| (value.starts_with('\'') && value.ends_with('\''))
{
value = value[1..value.len() - 1].to_string();
}
map.insert(key.to_string(), value);
}
}
Ok(map)
}
/// Prompt the user interactively for a secret value.
fn prompt_secret(key: &str) -> Option<Zeroizing<String>> {
use std::io::{self, Write};
eprint!("Enter value for {}: ", key);
io::stderr().flush().ok()?;
let mut input = String::new();
io::stdin().read_line(&mut input).ok()?;
let trimmed = input.trim().to_string();
if trimmed.is_empty() {
None
} else {
Some(Zeroizing::new(trimmed))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn load_dotenv_basic() {
let dir = tempfile::tempdir().unwrap();
let env_path = dir.path().join(".env");
std::fs::write(
&env_path,
r#"
# Comment
GITHUB_TOKEN=ghp_test123
SLACK_TOKEN="xoxb-quoted"
EMPTY=
SINGLE_QUOTED='single'
"#,
)
.unwrap();
let map = load_dotenv(&env_path).unwrap();
assert_eq!(map.get("GITHUB_TOKEN").unwrap(), "ghp_test123");
assert_eq!(map.get("SLACK_TOKEN").unwrap(), "xoxb-quoted");
assert_eq!(map.get("EMPTY").unwrap(), "");
assert_eq!(map.get("SINGLE_QUOTED").unwrap(), "single");
}
#[test]
fn load_dotenv_nonexistent() {
let map = load_dotenv(Path::new("/nonexistent/.env")).unwrap();
assert!(map.is_empty());
}
#[test]
fn resolver_env_var() {
std::env::set_var("TEST_CRED_RESOLVE_123", "from_env");
let resolver = CredentialResolver::new(None, None);
let val = resolver.resolve("TEST_CRED_RESOLVE_123").unwrap();
assert_eq!(val.as_str(), "from_env");
assert!(resolver.has_credential("TEST_CRED_RESOLVE_123"));
std::env::remove_var("TEST_CRED_RESOLVE_123");
}
#[test]
fn resolver_dotenv_overrides_env() {
let dir = tempfile::tempdir().unwrap();
let env_path = dir.path().join(".env");
std::fs::write(&env_path, "TEST_CRED_DOT_456=from_dotenv\n").unwrap();
std::env::set_var("TEST_CRED_DOT_456", "from_env");
let resolver = CredentialResolver::new(None, Some(&env_path));
let val = resolver.resolve("TEST_CRED_DOT_456").unwrap();
assert_eq!(val.as_str(), "from_dotenv"); // dotenv takes priority
std::env::remove_var("TEST_CRED_DOT_456");
}
#[test]
fn resolver_missing_credentials() {
let resolver = CredentialResolver::new(None, None);
let missing = resolver.missing_credentials(&["DEFINITELY_NOT_SET_XYZ_789"]);
assert_eq!(missing, vec!["DEFINITELY_NOT_SET_XYZ_789"]);
}
#[test]
fn resolver_resolve_all() {
std::env::set_var("TEST_MULTI_A", "a_val");
std::env::set_var("TEST_MULTI_B", "b_val");
let resolver = CredentialResolver::new(None, None);
let resolved = resolver.resolve_all(&["TEST_MULTI_A", "TEST_MULTI_B", "TEST_MULTI_C"]);
assert_eq!(resolved.len(), 2);
assert_eq!(resolved["TEST_MULTI_A"].as_str(), "a_val");
assert_eq!(resolved["TEST_MULTI_B"].as_str(), "b_val");
std::env::remove_var("TEST_MULTI_A");
std::env::remove_var("TEST_MULTI_B");
}
}

View File

@@ -0,0 +1,302 @@
//! Integration health monitor — tracks MCP server status with auto-reconnect.
//!
//! Background tokio task pings MCP connections, auto-reconnects with
//! exponential backoff (5s -> 10s -> 20s -> ... -> 5min max, 10 attempts max).
use crate::IntegrationStatus;
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use serde::Serialize;
use std::sync::Arc;
use std::time::Duration;
/// Health status for a single integration.
#[derive(Debug, Clone, Serialize)]
pub struct IntegrationHealth {
/// Integration ID.
pub id: String,
/// Current status.
pub status: IntegrationStatus,
/// Number of tools available from this MCP server.
pub tool_count: usize,
/// Last successful health check.
pub last_ok: Option<DateTime<Utc>>,
/// Last error message.
pub last_error: Option<String>,
/// Consecutive failures.
pub consecutive_failures: u32,
/// Whether auto-reconnect is in progress.
pub reconnecting: bool,
/// Reconnect attempt count.
pub reconnect_attempts: u32,
/// Uptime since last successful connect.
pub connected_since: Option<DateTime<Utc>>,
}
impl IntegrationHealth {
/// Create a new health record.
pub fn new(id: String) -> Self {
Self {
id,
status: IntegrationStatus::Available,
tool_count: 0,
last_ok: None,
last_error: None,
consecutive_failures: 0,
reconnecting: false,
reconnect_attempts: 0,
connected_since: None,
}
}
/// Mark as healthy.
pub fn mark_ok(&mut self, tool_count: usize) {
self.status = IntegrationStatus::Ready;
self.tool_count = tool_count;
self.last_ok = Some(Utc::now());
self.last_error = None;
self.consecutive_failures = 0;
self.reconnecting = false;
self.reconnect_attempts = 0;
if self.connected_since.is_none() {
self.connected_since = Some(Utc::now());
}
}
/// Mark as failed.
pub fn mark_error(&mut self, error: String) {
self.status = IntegrationStatus::Error(error.clone());
self.last_error = Some(error);
self.consecutive_failures += 1;
self.connected_since = None;
}
/// Mark as reconnecting.
pub fn mark_reconnecting(&mut self) {
self.reconnecting = true;
self.reconnect_attempts += 1;
}
}
/// Health monitor configuration.
#[derive(Debug, Clone)]
pub struct HealthMonitorConfig {
/// Whether auto-reconnect is enabled.
pub auto_reconnect: bool,
/// Maximum reconnect attempts before giving up.
pub max_reconnect_attempts: u32,
/// Maximum backoff duration in seconds.
pub max_backoff_secs: u64,
/// Base check interval in seconds.
pub check_interval_secs: u64,
}
impl Default for HealthMonitorConfig {
fn default() -> Self {
Self {
auto_reconnect: true,
max_reconnect_attempts: 10,
max_backoff_secs: 300,
check_interval_secs: 60,
}
}
}
/// The health monitor — stores health state for all integrations.
pub struct HealthMonitor {
/// Health records keyed by integration ID.
health: Arc<DashMap<String, IntegrationHealth>>,
/// Configuration.
config: HealthMonitorConfig,
}
impl HealthMonitor {
/// Create a new health monitor.
pub fn new(config: HealthMonitorConfig) -> Self {
Self {
health: Arc::new(DashMap::new()),
config,
}
}
/// Register an integration for monitoring.
pub fn register(&self, id: &str) {
self.health
.entry(id.to_string())
.or_insert_with(|| IntegrationHealth::new(id.to_string()));
}
/// Unregister an integration.
pub fn unregister(&self, id: &str) {
self.health.remove(id);
}
/// Report a successful health check.
pub fn report_ok(&self, id: &str, tool_count: usize) {
if let Some(mut entry) = self.health.get_mut(id) {
entry.mark_ok(tool_count);
}
}
/// Report a health check failure.
pub fn report_error(&self, id: &str, error: String) {
if let Some(mut entry) = self.health.get_mut(id) {
entry.mark_error(error);
}
}
/// Get health for a specific integration.
pub fn get_health(&self, id: &str) -> Option<IntegrationHealth> {
self.health.get(id).map(|e| e.clone())
}
/// Get health for all integrations.
pub fn all_health(&self) -> Vec<IntegrationHealth> {
self.health.iter().map(|e| e.value().clone()).collect()
}
/// Calculate exponential backoff duration for a given attempt.
pub fn backoff_duration(&self, attempt: u32) -> Duration {
let base_secs = 5u64;
let backoff = base_secs.saturating_mul(1u64 << attempt.min(10));
Duration::from_secs(backoff.min(self.config.max_backoff_secs))
}
/// Check if an integration should be reconnected.
pub fn should_reconnect(&self, id: &str) -> bool {
if !self.config.auto_reconnect {
return false;
}
if let Some(entry) = self.health.get(id) {
matches!(entry.status, IntegrationStatus::Error(_))
&& entry.reconnect_attempts < self.config.max_reconnect_attempts
} else {
false
}
}
/// Mark an integration as reconnecting.
pub fn mark_reconnecting(&self, id: &str) {
if let Some(mut entry) = self.health.get_mut(id) {
entry.mark_reconnecting();
}
}
/// Get a reference to the health DashMap (for background task).
pub fn health_map(&self) -> Arc<DashMap<String, IntegrationHealth>> {
self.health.clone()
}
/// Get the config.
pub fn config(&self) -> &HealthMonitorConfig {
&self.config
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn health_monitor_register_report() {
let monitor = HealthMonitor::new(HealthMonitorConfig::default());
monitor.register("github");
let h = monitor.get_health("github").unwrap();
assert_eq!(h.status, IntegrationStatus::Available);
assert_eq!(h.tool_count, 0);
monitor.report_ok("github", 12);
let h = monitor.get_health("github").unwrap();
assert_eq!(h.status, IntegrationStatus::Ready);
assert_eq!(h.tool_count, 12);
assert!(h.last_ok.is_some());
assert!(h.connected_since.is_some());
}
#[test]
fn health_monitor_error_tracking() {
let monitor = HealthMonitor::new(HealthMonitorConfig::default());
monitor.register("slack");
monitor.report_error("slack", "Connection refused".to_string());
let h = monitor.get_health("slack").unwrap();
assert!(matches!(h.status, IntegrationStatus::Error(_)));
assert_eq!(h.consecutive_failures, 1);
monitor.report_error("slack", "Timeout".to_string());
let h = monitor.get_health("slack").unwrap();
assert_eq!(h.consecutive_failures, 2);
// Recovery
monitor.report_ok("slack", 5);
let h = monitor.get_health("slack").unwrap();
assert_eq!(h.consecutive_failures, 0);
assert_eq!(h.status, IntegrationStatus::Ready);
}
#[test]
fn backoff_exponential() {
let monitor = HealthMonitor::new(HealthMonitorConfig::default());
assert_eq!(monitor.backoff_duration(0), Duration::from_secs(5));
assert_eq!(monitor.backoff_duration(1), Duration::from_secs(10));
assert_eq!(monitor.backoff_duration(2), Duration::from_secs(20));
assert_eq!(monitor.backoff_duration(3), Duration::from_secs(40));
// Capped at 300s
assert_eq!(monitor.backoff_duration(10), Duration::from_secs(300));
assert_eq!(monitor.backoff_duration(20), Duration::from_secs(300));
}
#[test]
fn should_reconnect_logic() {
let monitor = HealthMonitor::new(HealthMonitorConfig {
auto_reconnect: true,
max_reconnect_attempts: 3,
..Default::default()
});
monitor.register("test");
// Available — no reconnect needed
assert!(!monitor.should_reconnect("test"));
// Error — should reconnect
monitor.report_error("test", "fail".to_string());
assert!(monitor.should_reconnect("test"));
// Exhaust attempts
for _ in 0..3 {
monitor.mark_reconnecting("test");
}
assert!(!monitor.should_reconnect("test"));
}
#[test]
fn health_unregister() {
let monitor = HealthMonitor::new(HealthMonitorConfig::default());
monitor.register("github");
assert!(monitor.get_health("github").is_some());
monitor.unregister("github");
assert!(monitor.get_health("github").is_none());
}
#[test]
fn all_health() {
let monitor = HealthMonitor::new(HealthMonitorConfig::default());
monitor.register("a");
monitor.register("b");
monitor.register("c");
let all = monitor.all_health();
assert_eq!(all.len(), 3);
}
#[test]
fn auto_reconnect_disabled() {
let monitor = HealthMonitor::new(HealthMonitorConfig {
auto_reconnect: false,
..Default::default()
});
monitor.register("test");
monitor.report_error("test", "fail".to_string());
assert!(!monitor.should_reconnect("test"));
}
}

View File

@@ -0,0 +1,402 @@
//! Integration installer — one-click add/remove flow.
//!
//! Handles the complete flow: template lookup → credential resolution →
//! OAuth if needed → write to integrations.toml → hot-reload daemon.
use crate::credentials::CredentialResolver;
use crate::registry::IntegrationRegistry;
use crate::{ExtensionError, ExtensionResult, InstalledIntegration, IntegrationStatus};
use chrono::Utc;
use std::collections::HashMap;
use tracing::{info, warn};
use zeroize::Zeroizing;
/// Result of an installation attempt.
#[derive(Debug)]
pub struct InstallResult {
/// Integration ID.
pub id: String,
/// Final status.
pub status: IntegrationStatus,
/// Number of MCP tools that will be available.
pub tool_count: usize,
/// Message to display to the user.
pub message: String,
}
/// Install an integration.
///
/// Steps:
/// 1. Look up template in registry.
/// 2. Check credentials (vault → .env → env → prompt).
/// 3. If `--key` provided, store in vault.
/// 4. If OAuth required, run PKCE flow.
/// 5. Write to integrations.toml.
/// 6. Return install result.
pub fn install_integration(
registry: &mut IntegrationRegistry,
resolver: &mut CredentialResolver,
id: &str,
provided_keys: &HashMap<String, String>,
) -> ExtensionResult<InstallResult> {
// 1. Look up template
let template = registry
.get_template(id)
.ok_or_else(|| ExtensionError::NotFound(id.to_string()))?
.clone();
// Check not already installed
if registry.is_installed(id) {
return Err(ExtensionError::AlreadyInstalled(id.to_string()));
}
// 2. Store provided keys in vault
for (key, value) in provided_keys {
if let Err(e) = resolver.store_in_vault(key, Zeroizing::new(value.clone())) {
warn!("Could not store {} in vault: {}", key, e);
// Fall through — the key is still in the provided_keys map
}
}
// 3. Check all required credentials
let required_keys: Vec<&str> = template
.required_env
.iter()
.map(|e| e.name.as_str())
.collect();
let missing = resolver.missing_credentials(&required_keys);
// For provided keys, check them too
let actually_missing: Vec<String> = missing
.into_iter()
.filter(|k| !provided_keys.contains_key(k))
.collect();
let status = if actually_missing.is_empty() {
IntegrationStatus::Ready
} else {
IntegrationStatus::Setup
};
// 4. Determine OAuth provider
let oauth_provider = template.oauth.as_ref().map(|o| o.provider.clone());
// 5. Write install record
let entry = InstalledIntegration {
id: id.to_string(),
installed_at: Utc::now(),
enabled: true,
oauth_provider,
config: HashMap::new(),
};
registry.install(entry)?;
// 6. Build result message
let message = match &status {
IntegrationStatus::Ready => {
format!(
"{} added. MCP tools will be available as mcp_{}_*.",
template.name, id
)
}
IntegrationStatus::Setup => {
let missing_labels: Vec<String> = actually_missing
.iter()
.filter_map(|key| {
template
.required_env
.iter()
.find(|e| e.name == *key)
.map(|e| format!("{} ({})", e.label, e.name))
})
.collect();
format!(
"{} installed but needs credentials: {}",
template.name,
missing_labels.join(", ")
)
}
_ => format!("{} installed.", template.name),
};
info!("{}", message);
Ok(InstallResult {
id: id.to_string(),
status,
tool_count: 0,
message,
})
}
/// Remove an installed integration.
pub fn remove_integration(registry: &mut IntegrationRegistry, id: &str) -> ExtensionResult<String> {
let template = registry.get_template(id);
let name = template
.map(|t| t.name.clone())
.unwrap_or_else(|| id.to_string());
registry.uninstall(id)?;
let msg = format!("{name} removed.");
info!("{msg}");
Ok(msg)
}
/// List all integrations with their status.
pub fn list_integrations(
registry: &IntegrationRegistry,
resolver: &CredentialResolver,
) -> Vec<IntegrationListEntry> {
let mut entries = Vec::new();
for template in registry.list_templates() {
let installed = registry.get_installed(&template.id);
let status = match installed {
Some(inst) if !inst.enabled => IntegrationStatus::Disabled,
Some(_inst) => {
let required_keys: Vec<&str> = template
.required_env
.iter()
.map(|e| e.name.as_str())
.collect();
let missing = resolver.missing_credentials(&required_keys);
if missing.is_empty() {
IntegrationStatus::Ready
} else {
IntegrationStatus::Setup
}
}
None => IntegrationStatus::Available,
};
entries.push(IntegrationListEntry {
id: template.id.clone(),
name: template.name.clone(),
icon: template.icon.clone(),
category: template.category.to_string(),
status,
description: template.description.clone(),
});
}
entries
}
/// Flat list entry for display.
#[derive(Debug, Clone)]
pub struct IntegrationListEntry {
pub id: String,
pub name: String,
pub icon: String,
pub category: String,
pub status: IntegrationStatus,
pub description: String,
}
/// Search available integrations.
pub fn search_integrations(
registry: &IntegrationRegistry,
query: &str,
) -> Vec<IntegrationListEntry> {
registry
.search(query)
.into_iter()
.map(|t| {
let installed = registry.get_installed(&t.id);
let status = match installed {
Some(inst) if !inst.enabled => IntegrationStatus::Disabled,
Some(_) => IntegrationStatus::Ready,
None => IntegrationStatus::Available,
};
IntegrationListEntry {
id: t.id.clone(),
name: t.name.clone(),
icon: t.icon.clone(),
category: t.category.to_string(),
status,
description: t.description.clone(),
}
})
.collect()
}
/// Generate scaffold files for a new custom integration.
pub fn scaffold_integration(dir: &std::path::Path) -> ExtensionResult<String> {
let template = r#"# Custom Integration Template
# Place this in ~/.openfang/integrations/ or use `openfang add --custom <path>`
id = "my-integration"
name = "My Integration"
description = "A custom MCP server integration"
category = "devtools"
icon = "🔧"
tags = ["custom"]
[transport]
type = "stdio"
command = "npx"
args = ["my-mcp-server"]
[[required_env]]
name = "MY_API_KEY"
label = "API Key"
help = "Get your API key from https://example.com/api-keys"
is_secret = true
[health_check]
interval_secs = 60
unhealthy_threshold = 3
setup_instructions = """
1. Install the MCP server: npm install -g my-mcp-server
2. Get your API key from https://example.com/api-keys
3. Run: openfang add my-integration --key=<your-key>
"""
"#;
let path = dir.join("integration.toml");
std::fs::create_dir_all(dir)?;
std::fs::write(&path, template)?;
Ok(format!(
"Integration template created at {}",
path.display()
))
}
/// Generate scaffold files for a new skill.
pub fn scaffold_skill(dir: &std::path::Path) -> ExtensionResult<String> {
let skill_toml = r#"name = "my-skill"
description = "A custom skill"
version = "0.1.0"
runtime = "prompt_only"
"#;
let skill_md = r#"---
name: my-skill
description: A custom skill
version: 0.1.0
runtime: prompt_only
---
# My Skill
You are an expert at [domain]. When the user asks about [topic], provide [behavior].
## Guidelines
- Be concise and accurate
- Cite sources when possible
"#;
std::fs::create_dir_all(dir)?;
std::fs::write(dir.join("skill.toml"), skill_toml)?;
std::fs::write(dir.join("SKILL.md"), skill_md)?;
Ok(format!("Skill scaffold created at {}", dir.display()))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::registry::IntegrationRegistry;
#[test]
fn install_and_remove() {
let dir = tempfile::tempdir().unwrap();
let mut registry = IntegrationRegistry::new(dir.path());
registry.load_bundled();
let mut resolver = CredentialResolver::new(None, None);
// Install github (will be Setup status since no token)
let result =
install_integration(&mut registry, &mut resolver, "github", &HashMap::new()).unwrap();
assert_eq!(result.id, "github");
// Status depends on whether GITHUB_PERSONAL_ACCESS_TOKEN is in env
assert!(
result.status == IntegrationStatus::Ready || result.status == IntegrationStatus::Setup
);
// Remove
let msg = remove_integration(&mut registry, "github").unwrap();
assert!(msg.contains("GitHub"));
assert!(!registry.is_installed("github"));
}
#[test]
fn install_with_key() {
let dir = tempfile::tempdir().unwrap();
let mut registry = IntegrationRegistry::new(dir.path());
registry.load_bundled();
let mut resolver = CredentialResolver::new(None, None);
// Provide key directly
let mut keys = HashMap::new();
keys.insert("NOTION_API_KEY".to_string(), "ntn_test_key_123".to_string());
let result = install_integration(&mut registry, &mut resolver, "notion", &keys).unwrap();
assert_eq!(result.id, "notion");
}
#[test]
fn install_already_installed() {
let dir = tempfile::tempdir().unwrap();
let mut registry = IntegrationRegistry::new(dir.path());
registry.load_bundled();
let mut resolver = CredentialResolver::new(None, None);
install_integration(&mut registry, &mut resolver, "github", &HashMap::new()).unwrap();
let err = install_integration(&mut registry, &mut resolver, "github", &HashMap::new())
.unwrap_err();
assert!(err.to_string().contains("already"));
}
#[test]
fn remove_not_installed() {
let dir = tempfile::tempdir().unwrap();
let mut registry = IntegrationRegistry::new(dir.path());
registry.load_bundled();
let err = remove_integration(&mut registry, "github").unwrap_err();
assert!(err.to_string().contains("not installed"));
}
#[test]
fn list_integrations_all() {
let dir = tempfile::tempdir().unwrap();
let mut registry = IntegrationRegistry::new(dir.path());
registry.load_bundled();
let resolver = CredentialResolver::new(None, None);
let list = list_integrations(&registry, &resolver);
assert_eq!(list.len(), 25);
assert!(list
.iter()
.all(|e| e.status == IntegrationStatus::Available));
}
#[test]
fn search_integrations_query() {
let dir = tempfile::tempdir().unwrap();
let mut registry = IntegrationRegistry::new(dir.path());
registry.load_bundled();
let results = search_integrations(&registry, "git");
assert!(results.iter().any(|e| e.id == "github"));
assert!(results.iter().any(|e| e.id == "gitlab"));
}
#[test]
fn scaffold_integration_creates_files() {
let dir = tempfile::tempdir().unwrap();
let sub = dir.path().join("my-integration");
let msg = scaffold_integration(&sub).unwrap();
assert!(sub.join("integration.toml").exists());
assert!(msg.contains("integration.toml"));
}
#[test]
fn scaffold_skill_creates_files() {
let dir = tempfile::tempdir().unwrap();
let sub = dir.path().join("my-skill");
let msg = scaffold_skill(&sub).unwrap();
assert!(sub.join("skill.toml").exists());
assert!(sub.join("SKILL.md").exists());
assert!(msg.contains("my-skill"));
}
}

View File

@@ -0,0 +1,328 @@
//! OpenFang Extensions — one-click integration system.
//!
//! This crate provides:
//! - **Integration Registry**: 25 bundled MCP server templates (GitHub, Slack, etc.)
//! - **Credential Vault**: AES-256-GCM encrypted storage with OS keyring support
//! - **OAuth2 PKCE**: Localhost callback flows for Google/GitHub/Microsoft/Slack
//! - **Health Monitor**: Auto-reconnect with exponential backoff
//! - **Installer**: One-click `openfang add <name>` flow
pub mod bundled;
pub mod credentials;
pub mod health;
pub mod installer;
pub mod oauth;
pub mod registry;
pub mod vault;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
// ─── Error types ─────────────────────────────────────────────────────────────
#[derive(Debug, thiserror::Error)]
pub enum ExtensionError {
#[error("Integration not found: {0}")]
NotFound(String),
#[error("Integration already installed: {0}")]
AlreadyInstalled(String),
#[error("Integration not installed: {0}")]
NotInstalled(String),
#[error("Credential not found: {0}")]
CredentialNotFound(String),
#[error("Vault error: {0}")]
Vault(String),
#[error("Vault locked — unlock with vault key or OPENFANG_VAULT_KEY env var")]
VaultLocked,
#[error("OAuth error: {0}")]
OAuth(String),
#[error("TOML parse error: {0}")]
TomlParse(String),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("HTTP error: {0}")]
Http(String),
#[error("Health check failed: {0}")]
HealthCheck(String),
}
pub type ExtensionResult<T> = Result<T, ExtensionError>;
// ─── Core types ──────────────────────────────────────────────────────────────
/// Category of an integration.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum IntegrationCategory {
DevTools,
Productivity,
Communication,
Data,
Cloud,
AI,
}
impl std::fmt::Display for IntegrationCategory {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevTools => write!(f, "Dev Tools"),
Self::Productivity => write!(f, "Productivity"),
Self::Communication => write!(f, "Communication"),
Self::Data => write!(f, "Data"),
Self::Cloud => write!(f, "Cloud"),
Self::AI => write!(f, "AI & Search"),
}
}
}
/// MCP transport template — how to launch the server.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum McpTransportTemplate {
Stdio {
command: String,
#[serde(default)]
args: Vec<String>,
},
Sse {
url: String,
},
}
/// An environment variable required by an integration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RequiredEnvVar {
/// Env var name (e.g., "GITHUB_PERSONAL_ACCESS_TOKEN").
pub name: String,
/// Human-readable label (e.g., "Personal Access Token").
pub label: String,
/// How to obtain this credential.
pub help: String,
/// Whether this is a secret (should be stored in vault).
#[serde(default = "default_true")]
pub is_secret: bool,
/// URL where the user can create the key.
#[serde(default)]
pub get_url: Option<String>,
}
fn default_true() -> bool {
true
}
/// OAuth provider configuration template.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OAuthTemplate {
/// OAuth provider (google, github, microsoft, slack).
pub provider: String,
/// OAuth scopes required.
pub scopes: Vec<String>,
/// Authorization URL.
pub auth_url: String,
/// Token exchange URL.
pub token_url: String,
}
/// Health check configuration for an integration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct HealthCheckConfig {
/// How often to check health (seconds).
pub interval_secs: u64,
/// Consider unhealthy after this many consecutive failures.
pub unhealthy_threshold: u32,
}
impl Default for HealthCheckConfig {
fn default() -> Self {
Self {
interval_secs: 60,
unhealthy_threshold: 3,
}
}
}
/// A bundled integration template — describes how to set up an MCP server.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IntegrationTemplate {
/// Unique identifier (e.g., "github").
pub id: String,
/// Human-readable name (e.g., "GitHub").
pub name: String,
/// Short description.
pub description: String,
/// Category for browsing.
pub category: IntegrationCategory,
/// Icon (emoji).
#[serde(default)]
pub icon: String,
/// MCP transport configuration.
pub transport: McpTransportTemplate,
/// Required credentials.
#[serde(default)]
pub required_env: Vec<RequiredEnvVar>,
/// OAuth configuration (None = API key only).
#[serde(default)]
pub oauth: Option<OAuthTemplate>,
/// Searchable tags.
#[serde(default)]
pub tags: Vec<String>,
/// Setup instructions (displayed in TUI detail view).
#[serde(default)]
pub setup_instructions: String,
/// Health check configuration.
#[serde(default)]
pub health_check: HealthCheckConfig,
}
/// Status of an installed integration.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum IntegrationStatus {
/// Configured and MCP server running.
Ready,
/// Installed but credentials missing.
Setup,
/// Not installed.
Available,
/// MCP server errored.
Error(String),
/// Disabled by user.
Disabled,
}
impl std::fmt::Display for IntegrationStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Ready => write!(f, "Ready"),
Self::Setup => write!(f, "Setup"),
Self::Available => write!(f, "Available"),
Self::Error(msg) => write!(f, "Error: {msg}"),
Self::Disabled => write!(f, "Disabled"),
}
}
}
/// An installed integration record (persisted in integrations.toml).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InstalledIntegration {
/// Template ID.
pub id: String,
/// When installed.
pub installed_at: DateTime<Utc>,
/// Whether enabled.
#[serde(default = "default_true")]
pub enabled: bool,
/// OAuth provider if using OAuth (e.g., "google").
#[serde(default)]
pub oauth_provider: Option<String>,
/// Custom configuration overrides.
#[serde(default)]
pub config: HashMap<String, String>,
}
/// Top-level structure for `~/.openfang/integrations.toml`.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct IntegrationsFile {
#[serde(default)]
pub installed: Vec<InstalledIntegration>,
}
/// Combined view of an integration (template + install state).
#[derive(Debug, Clone, Serialize)]
pub struct IntegrationInfo {
pub template: IntegrationTemplate,
pub status: IntegrationStatus,
pub installed: Option<InstalledIntegration>,
pub tool_count: usize,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn category_display() {
assert_eq!(IntegrationCategory::DevTools.to_string(), "Dev Tools");
assert_eq!(
IntegrationCategory::Productivity.to_string(),
"Productivity"
);
assert_eq!(IntegrationCategory::AI.to_string(), "AI & Search");
}
#[test]
fn status_display() {
assert_eq!(IntegrationStatus::Ready.to_string(), "Ready");
assert_eq!(IntegrationStatus::Setup.to_string(), "Setup");
assert_eq!(
IntegrationStatus::Error("timeout".to_string()).to_string(),
"Error: timeout"
);
}
#[test]
fn integration_template_roundtrip() {
let toml_str = r#"
id = "test"
name = "Test Integration"
description = "A test"
category = "devtools"
icon = "T"
tags = ["test"]
setup_instructions = "Just test it."
[transport]
type = "stdio"
command = "test-server"
args = ["--flag"]
[[required_env]]
name = "TEST_KEY"
label = "Test Key"
help = "Get it from test.com"
is_secret = true
get_url = "https://test.com/keys"
[health_check]
interval_secs = 30
unhealthy_threshold = 5
"#;
let template: IntegrationTemplate = toml::from_str(toml_str).unwrap();
assert_eq!(template.id, "test");
assert_eq!(template.category, IntegrationCategory::DevTools);
assert_eq!(template.required_env.len(), 1);
assert!(template.required_env[0].is_secret);
assert_eq!(template.health_check.interval_secs, 30);
}
#[test]
fn installed_integration_roundtrip() {
let toml_str = r#"
[[installed]]
id = "github"
installed_at = "2026-02-23T10:00:00Z"
enabled = true
[[installed]]
id = "google-calendar"
installed_at = "2026-02-23T10:05:00Z"
enabled = true
oauth_provider = "google"
"#;
let file: IntegrationsFile = toml::from_str(toml_str).unwrap();
assert_eq!(file.installed.len(), 2);
assert_eq!(file.installed[0].id, "github");
assert!(file.installed[0].enabled);
assert_eq!(file.installed[1].oauth_provider.as_deref(), Some("google"));
}
#[test]
fn error_display() {
let err = ExtensionError::NotFound("github".to_string());
assert!(err.to_string().contains("github"));
let err = ExtensionError::VaultLocked;
assert!(err.to_string().contains("vault"));
}
}

View File

@@ -0,0 +1,385 @@
//! OAuth2 PKCE flows — localhost callback for Google/GitHub/Microsoft/Slack.
//!
//! Launches a temporary localhost HTTP server, opens the browser to the auth URL,
//! receives the callback with the authorization code, and exchanges it for tokens.
//! All tokens are stored in the credential vault with `Zeroizing<String>`.
use crate::{ExtensionError, ExtensionResult, OAuthTemplate};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::{oneshot, Mutex};
use tracing::{debug, info, warn};
use zeroize::Zeroizing;
/// Default OAuth client IDs for public PKCE flows.
/// These are safe to embed — PKCE doesn't require a client_secret.
pub fn default_client_ids() -> HashMap<&'static str, &'static str> {
let mut m = HashMap::new();
// Placeholder IDs — users should configure their own via config
m.insert("google", "openfang-google-client-id");
m.insert("github", "openfang-github-client-id");
m.insert("microsoft", "openfang-microsoft-client-id");
m.insert("slack", "openfang-slack-client-id");
m
}
/// Resolve OAuth client IDs with config overrides applied on top of defaults.
pub fn resolve_client_ids(
config: &openfang_types::config::OAuthConfig,
) -> HashMap<String, String> {
let defaults = default_client_ids();
let mut resolved: HashMap<String, String> = defaults
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
if let Some(ref id) = config.google_client_id {
resolved.insert("google".into(), id.clone());
}
if let Some(ref id) = config.github_client_id {
resolved.insert("github".into(), id.clone());
}
if let Some(ref id) = config.microsoft_client_id {
resolved.insert("microsoft".into(), id.clone());
}
if let Some(ref id) = config.slack_client_id {
resolved.insert("slack".into(), id.clone());
}
resolved
}
/// OAuth2 token response (raw from provider, for deserialization).
#[derive(Debug, Serialize, Deserialize)]
pub struct OAuthTokens {
/// Access token for API calls.
pub access_token: String,
/// Refresh token for renewal (if provided).
#[serde(default)]
pub refresh_token: Option<String>,
/// Token type (usually "Bearer").
#[serde(default)]
pub token_type: String,
/// Seconds until access_token expires.
#[serde(default)]
pub expires_in: u64,
/// Scopes granted.
#[serde(default)]
pub scope: String,
}
impl OAuthTokens {
/// Get the access token as a Zeroizing string.
pub fn access_token_zeroizing(&self) -> Zeroizing<String> {
Zeroizing::new(self.access_token.clone())
}
/// Get the refresh token as a Zeroizing string.
pub fn refresh_token_zeroizing(&self) -> Option<Zeroizing<String>> {
self.refresh_token
.as_ref()
.map(|t| Zeroizing::new(t.clone()))
}
}
/// PKCE code verifier and challenge pair.
struct PkcePair {
verifier: Zeroizing<String>,
challenge: String,
}
/// Generate a PKCE code_verifier and code_challenge (S256).
fn generate_pkce() -> PkcePair {
let mut bytes = [0u8; 32];
rand::rngs::OsRng.fill_bytes(&mut bytes);
let verifier = Zeroizing::new(base64_url_encode(&bytes));
let challenge = {
let mut hasher = Sha256::new();
hasher.update(verifier.as_bytes());
let digest = hasher.finalize();
base64_url_encode(&digest)
};
PkcePair {
verifier,
challenge,
}
}
/// URL-safe base64 encoding (no padding).
fn base64_url_encode(data: &[u8]) -> String {
base64::Engine::encode(&base64::engine::general_purpose::URL_SAFE_NO_PAD, data)
}
/// Generate a random state parameter for CSRF protection.
fn generate_state() -> String {
let mut bytes = [0u8; 16];
rand::rngs::OsRng.fill_bytes(&mut bytes);
base64_url_encode(&bytes)
}
/// Run the complete OAuth2 PKCE flow for a given template.
///
/// 1. Start localhost callback server on a random port.
/// 2. Open browser to authorization URL.
/// 3. Wait for callback with authorization code.
/// 4. Exchange code for tokens.
/// 5. Return tokens.
pub async fn run_pkce_flow(oauth: &OAuthTemplate, client_id: &str) -> ExtensionResult<OAuthTokens> {
let pkce = generate_pkce();
let state = generate_state();
// Find an available port
let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
.map_err(|e| ExtensionError::OAuth(format!("Failed to bind localhost: {e}")))?;
let port = listener
.local_addr()
.map_err(|e| ExtensionError::OAuth(format!("Failed to get port: {e}")))?
.port();
let redirect_uri = format!("http://127.0.0.1:{port}/callback");
info!("OAuth callback server listening on port {port}");
// Build authorization URL
let scopes = oauth.scopes.join(" ");
let auth_url = format!(
"{}?client_id={}&redirect_uri={}&response_type=code&scope={}&state={}&code_challenge={}&code_challenge_method=S256",
oauth.auth_url,
urlencoding_encode(client_id),
urlencoding_encode(&redirect_uri),
urlencoding_encode(&scopes),
urlencoding_encode(&state),
urlencoding_encode(&pkce.challenge),
);
// Open browser
info!("Opening browser for OAuth authorization...");
if let Err(e) = open_browser(&auth_url) {
warn!("Could not open browser: {e}");
eprintln!("\nPlease open this URL in your browser:\n{auth_url}\n");
}
// Wait for callback
let (code_tx, code_rx) = oneshot::channel::<String>();
let code_tx = Arc::new(Mutex::new(Some(code_tx)));
let expected_state = state.clone();
// Spawn callback handler
let server = axum::Router::new().route(
"/callback",
axum::routing::get({
let code_tx = code_tx.clone();
move |query: axum::extract::Query<CallbackParams>| {
let code_tx = code_tx.clone();
let expected_state = expected_state.clone();
async move {
if query.state != expected_state {
return axum::response::Html(
"<h1>Error</h1><p>Invalid state parameter. Possible CSRF attack.</p>"
.to_string(),
);
}
if let Some(ref error) = query.error {
return axum::response::Html(format!(
"<h1>Error</h1><p>OAuth error: {error}</p>"
));
}
if let Some(ref code) = query.code {
if let Some(tx) = code_tx.lock().await.take() {
let _ = tx.send(code.clone());
}
axum::response::Html(
"<h1>Success!</h1><p>Authorization complete. You can close this tab.</p><script>window.close()</script>"
.to_string(),
)
} else {
axum::response::Html(
"<h1>Error</h1><p>No authorization code received.</p>".to_string(),
)
}
}
}
}),
);
// Serve with timeout
let server_handle = tokio::spawn(async move {
axum::serve(listener, server).await.ok();
});
// Wait for auth code with 5-minute timeout
let code = tokio::time::timeout(std::time::Duration::from_secs(300), code_rx)
.await
.map_err(|_| ExtensionError::OAuth("OAuth flow timed out after 5 minutes".to_string()))?
.map_err(|_| ExtensionError::OAuth("Callback channel closed".to_string()))?;
// Shut down callback server
server_handle.abort();
debug!("Received authorization code, exchanging for tokens...");
// Exchange code for tokens
let client = reqwest::Client::new();
let mut params = HashMap::new();
params.insert("grant_type", "authorization_code");
params.insert("code", &code);
params.insert("redirect_uri", &redirect_uri);
params.insert("client_id", client_id);
let verifier_str = pkce.verifier.as_str().to_string();
params.insert("code_verifier", &verifier_str);
let resp = client
.post(&oauth.token_url)
.form(&params)
.send()
.await
.map_err(|e| ExtensionError::OAuth(format!("Token exchange request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(ExtensionError::OAuth(format!(
"Token exchange failed ({}): {}",
status, body
)));
}
let tokens: OAuthTokens = resp
.json()
.await
.map_err(|e| ExtensionError::OAuth(format!("Token response parse failed: {e}")))?;
info!(
"OAuth tokens obtained (expires_in: {}s, scopes: {})",
tokens.expires_in, tokens.scope
);
Ok(tokens)
}
/// Callback query parameters.
#[derive(Deserialize)]
struct CallbackParams {
#[serde(default)]
code: Option<String>,
#[serde(default)]
state: String,
#[serde(default)]
error: Option<String>,
}
/// Simple percent-encoding for URL parameters.
fn urlencoding_encode(s: &str) -> String {
let mut result = String::with_capacity(s.len() * 3);
for byte in s.bytes() {
match byte {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
result.push(byte as char);
}
_ => {
result.push('%');
result.push_str(&format!("{:02X}", byte));
}
}
}
result
}
/// Open a URL in the default browser.
fn open_browser(url: &str) -> Result<(), String> {
#[cfg(target_os = "windows")]
{
std::process::Command::new("cmd")
.args(["/C", "start", "", url])
.spawn()
.map_err(|e| e.to_string())?;
}
#[cfg(target_os = "macos")]
{
std::process::Command::new("open")
.arg(url)
.spawn()
.map_err(|e| e.to_string())?;
}
#[cfg(target_os = "linux")]
{
std::process::Command::new("xdg-open")
.arg(url)
.spawn()
.map_err(|e| e.to_string())?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn pkce_generation() {
let pkce = generate_pkce();
assert!(!pkce.verifier.is_empty());
assert!(!pkce.challenge.is_empty());
// Verifier and challenge should be different
assert_ne!(pkce.verifier.as_str(), &pkce.challenge);
}
#[test]
fn pkce_challenge_is_sha256() {
let pkce = generate_pkce();
// Verify: challenge = base64url(sha256(verifier))
let mut hasher = Sha256::new();
hasher.update(pkce.verifier.as_bytes());
let digest = hasher.finalize();
let expected = base64_url_encode(&digest);
assert_eq!(pkce.challenge, expected);
}
#[test]
fn state_randomness() {
let s1 = generate_state();
let s2 = generate_state();
assert_ne!(s1, s2);
}
#[test]
fn urlencoding_basic() {
assert_eq!(urlencoding_encode("hello"), "hello");
assert_eq!(urlencoding_encode("hello world"), "hello%20world");
assert_eq!(urlencoding_encode("a=b&c=d"), "a%3Db%26c%3Dd");
}
#[test]
fn default_client_ids_populated() {
let ids = default_client_ids();
assert!(ids.contains_key("google"));
assert!(ids.contains_key("github"));
assert!(ids.contains_key("microsoft"));
assert!(ids.contains_key("slack"));
}
#[test]
fn resolve_client_ids_uses_defaults() {
let config = openfang_types::config::OAuthConfig::default();
let ids = resolve_client_ids(&config);
assert_eq!(ids["google"], "openfang-google-client-id");
assert_eq!(ids["github"], "openfang-github-client-id");
}
#[test]
fn resolve_client_ids_applies_overrides() {
let config = openfang_types::config::OAuthConfig {
google_client_id: Some("my-real-google-id".into()),
github_client_id: None,
microsoft_client_id: Some("my-msft-id".into()),
slack_client_id: None,
};
let ids = resolve_client_ids(&config);
assert_eq!(ids["google"], "my-real-google-id");
assert_eq!(ids["github"], "openfang-github-client-id"); // default
assert_eq!(ids["microsoft"], "my-msft-id");
assert_eq!(ids["slack"], "openfang-slack-client-id"); // default
}
}

View File

@@ -0,0 +1,360 @@
//! Integration Registry — manages bundled + installed integration templates.
//!
//! Loads 25 bundled MCP server templates at compile time, merges with user's
//! installed state from `~/.openfang/integrations.toml`, and converts installed
//! integrations to `McpServerConfigEntry` for kernel consumption.
use crate::{
ExtensionError, ExtensionResult, InstalledIntegration, IntegrationCategory, IntegrationInfo,
IntegrationStatus, IntegrationTemplate, IntegrationsFile,
};
use openfang_types::config::{McpServerConfigEntry, McpTransportEntry};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tracing::{debug, info, warn};
/// The integration registry — holds all known templates and install state.
pub struct IntegrationRegistry {
/// All known templates (bundled + custom).
templates: HashMap<String, IntegrationTemplate>,
/// Current installed state.
installed: HashMap<String, InstalledIntegration>,
/// Path to integrations.toml.
integrations_path: PathBuf,
}
impl IntegrationRegistry {
/// Create a new registry with no templates.
pub fn new(home_dir: &Path) -> Self {
Self {
templates: HashMap::new(),
installed: HashMap::new(),
integrations_path: home_dir.join("integrations.toml"),
}
}
/// Load bundled templates (compile-time embedded). Returns count loaded.
pub fn load_bundled(&mut self) -> usize {
let bundled = crate::bundled::bundled_integrations();
let count = bundled.len();
for (id, toml_content) in bundled {
match toml::from_str::<IntegrationTemplate>(toml_content) {
Ok(template) => {
self.templates.insert(id.to_string(), template);
}
Err(e) => {
warn!("Failed to parse bundled integration '{}': {}", id, e);
}
}
}
debug!("Loaded {count} bundled integration template(s)");
count
}
/// Load installed state from integrations.toml.
pub fn load_installed(&mut self) -> ExtensionResult<usize> {
if !self.integrations_path.exists() {
return Ok(0);
}
let content = std::fs::read_to_string(&self.integrations_path)?;
let file: IntegrationsFile =
toml::from_str(&content).map_err(|e| ExtensionError::TomlParse(e.to_string()))?;
let count = file.installed.len();
for entry in file.installed {
self.installed.insert(entry.id.clone(), entry);
}
info!("Loaded {count} installed integration(s)");
Ok(count)
}
/// Save installed state to integrations.toml.
pub fn save_installed(&self) -> ExtensionResult<()> {
let file = IntegrationsFile {
installed: self.installed.values().cloned().collect(),
};
let content =
toml::to_string_pretty(&file).map_err(|e| ExtensionError::TomlParse(e.to_string()))?;
if let Some(parent) = self.integrations_path.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::write(&self.integrations_path, content)?;
Ok(())
}
/// Get a template by ID.
pub fn get_template(&self, id: &str) -> Option<&IntegrationTemplate> {
self.templates.get(id)
}
/// Get an installed record by ID.
pub fn get_installed(&self, id: &str) -> Option<&InstalledIntegration> {
self.installed.get(id)
}
/// Check if an integration is installed.
pub fn is_installed(&self, id: &str) -> bool {
self.installed.contains_key(id)
}
/// Mark an integration as installed.
pub fn install(&mut self, entry: InstalledIntegration) -> ExtensionResult<()> {
if self.installed.contains_key(&entry.id) {
return Err(ExtensionError::AlreadyInstalled(entry.id.clone()));
}
self.installed.insert(entry.id.clone(), entry);
self.save_installed()
}
/// Remove an installed integration.
pub fn uninstall(&mut self, id: &str) -> ExtensionResult<()> {
if self.installed.remove(id).is_none() {
return Err(ExtensionError::NotInstalled(id.to_string()));
}
self.save_installed()
}
/// Enable/disable an installed integration.
pub fn set_enabled(&mut self, id: &str, enabled: bool) -> ExtensionResult<()> {
let entry = self
.installed
.get_mut(id)
.ok_or_else(|| ExtensionError::NotInstalled(id.to_string()))?;
entry.enabled = enabled;
self.save_installed()
}
/// List all templates.
pub fn list_templates(&self) -> Vec<&IntegrationTemplate> {
let mut templates: Vec<_> = self.templates.values().collect();
templates.sort_by(|a, b| a.id.cmp(&b.id));
templates
}
/// List templates by category.
pub fn list_by_category(&self, category: &IntegrationCategory) -> Vec<&IntegrationTemplate> {
self.templates
.values()
.filter(|t| &t.category == category)
.collect()
}
/// Search templates by query (matches id, name, description, tags).
pub fn search(&self, query: &str) -> Vec<&IntegrationTemplate> {
let q = query.to_lowercase();
self.templates
.values()
.filter(|t| {
t.id.to_lowercase().contains(&q)
|| t.name.to_lowercase().contains(&q)
|| t.description.to_lowercase().contains(&q)
|| t.tags.iter().any(|tag| tag.to_lowercase().contains(&q))
})
.collect()
}
/// Get combined info for all integrations (template + install state).
pub fn list_all_info(&self) -> Vec<IntegrationInfo> {
self.templates
.values()
.map(|t| {
let installed = self.installed.get(&t.id);
let status = match installed {
Some(inst) if !inst.enabled => IntegrationStatus::Disabled,
Some(_) => IntegrationStatus::Ready,
None => IntegrationStatus::Available,
};
IntegrationInfo {
template: t.clone(),
status,
installed: installed.cloned(),
tool_count: 0,
}
})
.collect()
}
/// Convert all enabled installed integrations to MCP server config entries.
/// These can be merged into the kernel's MCP server list.
pub fn to_mcp_configs(&self) -> Vec<McpServerConfigEntry> {
self.installed
.values()
.filter(|inst| inst.enabled)
.filter_map(|inst| {
let template = self.templates.get(&inst.id)?;
let transport = match &template.transport {
crate::McpTransportTemplate::Stdio { command, args } => {
McpTransportEntry::Stdio {
command: command.clone(),
args: args.clone(),
}
}
crate::McpTransportTemplate::Sse { url } => {
McpTransportEntry::Sse { url: url.clone() }
}
};
let env: Vec<String> = template
.required_env
.iter()
.map(|e| e.name.clone())
.collect();
Some(McpServerConfigEntry {
name: inst.id.clone(),
transport,
timeout_secs: 30,
env,
})
})
.collect()
}
/// Get the path to integrations.toml.
pub fn integrations_path(&self) -> &Path {
&self.integrations_path
}
/// Total template count.
pub fn template_count(&self) -> usize {
self.templates.len()
}
/// Total installed count.
pub fn installed_count(&self) -> usize {
self.installed.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn registry_load_bundled() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
let count = reg.load_bundled();
assert_eq!(count, 25);
assert_eq!(reg.template_count(), 25);
}
#[test]
fn registry_get_template() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let gh = reg.get_template("github").unwrap();
assert_eq!(gh.name, "GitHub");
assert_eq!(gh.category, IntegrationCategory::DevTools);
}
#[test]
fn registry_search() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let results = reg.search("search");
assert!(results.len() >= 2); // brave-search, exa-search
}
#[test]
fn registry_install_uninstall() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let entry = InstalledIntegration {
id: "github".to_string(),
installed_at: chrono::Utc::now(),
enabled: true,
oauth_provider: None,
config: HashMap::new(),
};
reg.install(entry).unwrap();
assert!(reg.is_installed("github"));
assert_eq!(reg.installed_count(), 1);
// Double install should fail
let entry2 = InstalledIntegration {
id: "github".to_string(),
installed_at: chrono::Utc::now(),
enabled: true,
oauth_provider: None,
config: HashMap::new(),
};
assert!(reg.install(entry2).is_err());
reg.uninstall("github").unwrap();
assert!(!reg.is_installed("github"));
}
#[test]
fn registry_to_mcp_configs() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let entry = InstalledIntegration {
id: "github".to_string(),
installed_at: chrono::Utc::now(),
enabled: true,
oauth_provider: None,
config: HashMap::new(),
};
reg.install(entry).unwrap();
let configs = reg.to_mcp_configs();
assert_eq!(configs.len(), 1);
assert_eq!(configs[0].name, "github");
}
#[test]
fn registry_save_load_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let entry = InstalledIntegration {
id: "notion".to_string(),
installed_at: chrono::Utc::now(),
enabled: true,
oauth_provider: None,
config: HashMap::new(),
};
reg.install(entry).unwrap();
// Load from same path
let mut reg2 = IntegrationRegistry::new(dir.path());
reg2.load_bundled();
let count = reg2.load_installed().unwrap();
assert_eq!(count, 1);
assert!(reg2.is_installed("notion"));
}
#[test]
fn registry_list_by_category() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let devtools = reg.list_by_category(&IntegrationCategory::DevTools);
assert_eq!(devtools.len(), 6);
}
#[test]
fn registry_set_enabled() {
let dir = tempfile::tempdir().unwrap();
let mut reg = IntegrationRegistry::new(dir.path());
reg.load_bundled();
let entry = InstalledIntegration {
id: "github".to_string(),
installed_at: chrono::Utc::now(),
enabled: true,
oauth_provider: None,
config: HashMap::new(),
};
reg.install(entry).unwrap();
reg.set_enabled("github", false).unwrap();
let configs = reg.to_mcp_configs();
assert!(configs.is_empty()); // disabled = not in MCP configs
}
}

View File

@@ -0,0 +1,658 @@
//! Credential Vault — AES-256-GCM encrypted secret storage.
//!
//! Stores secrets in `~/.openfang/vault.enc`, with the master key sourced from
//! the OS keyring (Windows Credential Manager / macOS Keychain / Linux Secret Service)
//! or the `OPENFANG_VAULT_KEY` env var for headless/CI environments.
use crate::{ExtensionError, ExtensionResult};
use aes_gcm::aead::{Aead, KeyInit, OsRng};
use aes_gcm::{Aes256Gcm, Nonce};
use argon2::Argon2;
use rand::RngCore;
use serde::{Deserialize, Serialize};
// sha2 is used only in non-test keyring functions
#[cfg(not(test))]
use sha2::{Digest as _, Sha256};
use std::collections::HashMap;
use std::path::PathBuf;
use tracing::{debug, info, warn};
use zeroize::Zeroizing;
/// Service name for OS keyring storage.
#[cfg(not(test))]
const KEYRING_SERVICE: &str = "openfang-vault";
/// Username for OS keyring (used by platform keyring backends).
#[allow(dead_code)]
const KEYRING_USER: &str = "master-key";
/// Env var fallback for vault key.
const VAULT_KEY_ENV: &str = "OPENFANG_VAULT_KEY";
/// Salt length for Argon2.
const SALT_LEN: usize = 16;
/// Nonce length for AES-256-GCM.
const NONCE_LEN: usize = 12;
/// Magic bytes for vault file format versioning.
const VAULT_MAGIC: &[u8; 4] = b"OFV1";
/// On-disk vault format (encrypted).
#[derive(Serialize, Deserialize)]
struct VaultFile {
/// Version marker.
version: u8,
/// Argon2 salt (base64).
salt: String,
/// AES-256-GCM nonce (base64).
nonce: String,
/// Encrypted data (base64).
ciphertext: String,
}
/// Decrypted vault entries.
#[derive(Default, Serialize, Deserialize)]
struct VaultEntries {
secrets: HashMap<String, String>,
}
/// AES-256-GCM encrypted credential vault.
pub struct CredentialVault {
/// Path to vault.enc file.
path: PathBuf,
/// Decrypted entries (zeroed on drop via manual clearing).
entries: HashMap<String, Zeroizing<String>>,
/// Whether the vault is unlocked.
unlocked: bool,
/// Cached master key (zeroed on drop) — avoids re-resolving from env/keyring.
cached_key: Option<Zeroizing<[u8; 32]>>,
}
impl CredentialVault {
/// Create a new vault at the given path.
pub fn new(vault_path: PathBuf) -> Self {
Self {
path: vault_path,
entries: HashMap::new(),
unlocked: false,
cached_key: None,
}
}
/// Initialize a new vault. Generates a master key and stores it in the OS keyring.
pub fn init(&mut self) -> ExtensionResult<()> {
if self.path.exists() {
return Err(ExtensionError::Vault(
"Vault already exists. Delete it first to re-initialize.".to_string(),
));
}
// Check if a master key is already available (env var or keyring)
let key_bytes = if let Ok(existing_b64) = std::env::var(VAULT_KEY_ENV) {
// Use the existing key from env var
info!("Using existing vault key from {}", VAULT_KEY_ENV);
decode_master_key(&existing_b64)?
} else if let Ok(existing_b64) = load_keyring_key() {
info!("Using existing vault key from OS keyring");
decode_master_key(&existing_b64)?
} else {
// Generate a random master key
let mut kb = Zeroizing::new([0u8; 32]);
OsRng.fill_bytes(kb.as_mut());
let key_b64 = Zeroizing::new(base64::Engine::encode(
&base64::engine::general_purpose::STANDARD,
kb.as_ref(),
));
// Try to store in OS keyring
match store_keyring_key(&key_b64) {
Ok(()) => {
info!("Vault master key stored in OS keyring");
}
Err(e) => {
warn!(
"Could not store in OS keyring: {e}. Set {} env var instead.",
VAULT_KEY_ENV
);
eprintln!(
"Vault key (save this as {}): {}",
VAULT_KEY_ENV,
key_b64.as_str()
);
}
}
kb
};
// Create empty vault file
self.entries.clear();
self.unlocked = true;
self.save(&key_bytes)?;
self.cached_key = Some(key_bytes);
info!("Credential vault initialized at {:?}", self.path);
Ok(())
}
/// Unlock the vault by loading and decrypting entries.
pub fn unlock(&mut self) -> ExtensionResult<()> {
if self.unlocked {
return Ok(());
}
if !self.path.exists() {
return Err(ExtensionError::Vault(
"Vault not initialized. Run `openfang vault init`.".to_string(),
));
}
let master_key = self.resolve_master_key()?;
self.load(&master_key)?;
self.unlocked = true;
self.cached_key = Some(master_key);
debug!("Vault unlocked with {} entries", self.entries.len());
Ok(())
}
/// Get a secret from the vault.
pub fn get(&self, key: &str) -> Option<Zeroizing<String>> {
self.entries.get(key).cloned()
}
/// Store a secret in the vault.
pub fn set(&mut self, key: String, value: Zeroizing<String>) -> ExtensionResult<()> {
if !self.unlocked {
return Err(ExtensionError::VaultLocked);
}
self.entries.insert(key, value);
let master_key = self.resolve_master_key()?;
self.save(&master_key)
}
/// Remove a secret from the vault.
pub fn remove(&mut self, key: &str) -> ExtensionResult<bool> {
if !self.unlocked {
return Err(ExtensionError::VaultLocked);
}
let removed = self.entries.remove(key).is_some();
if removed {
let master_key = self.resolve_master_key()?;
self.save(&master_key)?;
}
Ok(removed)
}
/// List all keys in the vault (not values).
pub fn list_keys(&self) -> Vec<&str> {
self.entries.keys().map(|k| k.as_str()).collect()
}
/// Check if the vault file exists.
pub fn exists(&self) -> bool {
self.path.exists()
}
/// Check if the vault is unlocked.
pub fn is_unlocked(&self) -> bool {
self.unlocked
}
/// Initialize a vault with an explicit master key (for testing / programmatic use).
pub fn init_with_key(&mut self, master_key: Zeroizing<[u8; 32]>) -> ExtensionResult<()> {
if self.path.exists() {
return Err(ExtensionError::Vault(
"Vault already exists. Delete it first to re-initialize.".to_string(),
));
}
self.entries.clear();
self.unlocked = true;
self.save(&master_key)?;
self.cached_key = Some(master_key);
debug!(
"Credential vault initialized at {:?} (explicit key)",
self.path
);
Ok(())
}
/// Unlock the vault with an explicit master key (for testing / programmatic use).
pub fn unlock_with_key(&mut self, master_key: Zeroizing<[u8; 32]>) -> ExtensionResult<()> {
if self.unlocked {
return Ok(());
}
if !self.path.exists() {
return Err(ExtensionError::Vault(
"Vault not initialized. Run `openfang vault init`.".to_string(),
));
}
self.load(&master_key)?;
self.unlocked = true;
self.cached_key = Some(master_key);
debug!(
"Vault unlocked with {} entries (explicit key)",
self.entries.len()
);
Ok(())
}
/// Number of entries.
pub fn len(&self) -> usize {
self.entries.len()
}
/// Whether the vault is empty.
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
// ── Internal ─────────────────────────────────────────────────────────
/// Resolve the master key from cache, keyring, or env var.
fn resolve_master_key(&self) -> ExtensionResult<Zeroizing<[u8; 32]>> {
// Use cached key if available (avoids env var race in parallel tests)
if let Some(ref cached) = self.cached_key {
return Ok(cached.clone());
}
// Try OS keyring first
if let Ok(key_b64) = load_keyring_key() {
return decode_master_key(&key_b64);
}
// Fallback to env var
if let Ok(key_b64) = std::env::var(VAULT_KEY_ENV) {
let key_b64 = Zeroizing::new(key_b64);
return decode_master_key(&key_b64);
}
Err(ExtensionError::VaultLocked)
}
/// Save encrypted vault to disk.
fn save(&self, master_key: &[u8; 32]) -> ExtensionResult<()> {
// Serialize entries to JSON
let plain_entries: HashMap<String, String> = self
.entries
.iter()
.map(|(k, v)| (k.clone(), v.as_str().to_string()))
.collect();
let vault_data = VaultEntries {
secrets: plain_entries,
};
let plaintext = Zeroizing::new(
serde_json::to_vec(&vault_data)
.map_err(|e| ExtensionError::Vault(format!("Serialization failed: {e}")))?,
);
// Generate salt and nonce
let mut salt = [0u8; SALT_LEN];
let mut nonce_bytes = [0u8; NONCE_LEN];
OsRng.fill_bytes(&mut salt);
OsRng.fill_bytes(&mut nonce_bytes);
// Derive encryption key from master key + salt using Argon2
let derived_key = derive_key(master_key, &salt)?;
// Encrypt with AES-256-GCM
let cipher = Aes256Gcm::new_from_slice(derived_key.as_ref())
.map_err(|e| ExtensionError::Vault(format!("Cipher init failed: {e}")))?;
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext.as_slice())
.map_err(|e| ExtensionError::Vault(format!("Encryption failed: {e}")))?;
// Write to file
let vault_file = VaultFile {
version: 1,
salt: base64::Engine::encode(&base64::engine::general_purpose::STANDARD, salt),
nonce: base64::Engine::encode(&base64::engine::general_purpose::STANDARD, nonce_bytes),
ciphertext: base64::Engine::encode(
&base64::engine::general_purpose::STANDARD,
&ciphertext,
),
};
let content = serde_json::to_string_pretty(&vault_file)
.map_err(|e| ExtensionError::Vault(format!("Vault file serialization failed: {e}")))?;
if let Some(parent) = self.path.parent() {
std::fs::create_dir_all(parent)?;
}
// Prepend OFV1 magic bytes for format detection
let mut output = Vec::with_capacity(VAULT_MAGIC.len() + content.len());
output.extend_from_slice(VAULT_MAGIC);
output.extend_from_slice(content.as_bytes());
std::fs::write(&self.path, output)?;
Ok(())
}
/// Load and decrypt vault from disk.
fn load(&mut self, master_key: &[u8; 32]) -> ExtensionResult<()> {
let raw = std::fs::read(&self.path)?;
// Strip OFV1 magic header if present; legacy JSON files start with '{'
let content = if raw.starts_with(VAULT_MAGIC) {
std::str::from_utf8(&raw[VAULT_MAGIC.len()..])
.map_err(|e| ExtensionError::Vault(format!("UTF-8 decode failed: {e}")))?
} else if raw.first() == Some(&b'{') {
// Legacy JSON vault (no magic header)
std::str::from_utf8(&raw)
.map_err(|e| ExtensionError::Vault(format!("UTF-8 decode failed: {e}")))?
} else {
return Err(ExtensionError::Vault(
"Unrecognized vault file format".to_string(),
));
};
let vault_file: VaultFile = serde_json::from_str(content)
.map_err(|e| ExtensionError::Vault(format!("Vault file parse failed: {e}")))?;
if vault_file.version != 1 {
return Err(ExtensionError::Vault(format!(
"Unsupported vault version: {}",
vault_file.version
)));
}
let salt =
base64::Engine::decode(&base64::engine::general_purpose::STANDARD, &vault_file.salt)
.map_err(|e| ExtensionError::Vault(format!("Salt decode failed: {e}")))?;
let nonce_bytes = base64::Engine::decode(
&base64::engine::general_purpose::STANDARD,
&vault_file.nonce,
)
.map_err(|e| ExtensionError::Vault(format!("Nonce decode failed: {e}")))?;
let ciphertext = base64::Engine::decode(
&base64::engine::general_purpose::STANDARD,
&vault_file.ciphertext,
)
.map_err(|e| ExtensionError::Vault(format!("Ciphertext decode failed: {e}")))?;
// Derive key
let derived_key = derive_key(master_key, &salt)?;
// Decrypt
let cipher = Aes256Gcm::new_from_slice(derived_key.as_ref())
.map_err(|e| ExtensionError::Vault(format!("Cipher init failed: {e}")))?;
let nonce = Nonce::from_slice(&nonce_bytes);
let plaintext = Zeroizing::new(
cipher
.decrypt(nonce, ciphertext.as_slice())
.map_err(|e| ExtensionError::Vault(format!("Decryption failed: {e}")))?,
);
// Parse entries
let vault_data: VaultEntries = serde_json::from_slice(&plaintext)
.map_err(|e| ExtensionError::Vault(format!("Vault data parse failed: {e}")))?;
self.entries.clear();
for (k, v) in vault_data.secrets {
self.entries.insert(k, Zeroizing::new(v));
}
Ok(())
}
}
impl Drop for CredentialVault {
fn drop(&mut self) {
// Zeroizing<String> handles zeroing individual values.
// Clear the map to ensure all entries are dropped.
self.entries.clear();
self.cached_key = None;
self.unlocked = false;
}
}
/// Derive a 256-bit key from master key + salt using Argon2id.
fn derive_key(master_key: &[u8; 32], salt: &[u8]) -> ExtensionResult<Zeroizing<[u8; 32]>> {
let mut derived = Zeroizing::new([0u8; 32]);
Argon2::default()
.hash_password_into(master_key, salt, derived.as_mut())
.map_err(|e| ExtensionError::Vault(format!("Key derivation failed: {e}")))?;
Ok(derived)
}
/// Decode a base64 master key into raw bytes.
fn decode_master_key(key_b64: &str) -> ExtensionResult<Zeroizing<[u8; 32]>> {
let bytes = base64::Engine::decode(&base64::engine::general_purpose::STANDARD, key_b64)
.map_err(|e| ExtensionError::Vault(format!("Key decode failed: {e}")))?;
if bytes.len() != 32 {
return Err(ExtensionError::Vault(format!(
"Invalid key length: expected 32, got {}",
bytes.len()
)));
}
let mut key = Zeroizing::new([0u8; 32]);
key.copy_from_slice(&bytes);
Ok(key)
}
/// Store the master key in the OS keyring.
fn store_keyring_key(key_b64: &str) -> Result<(), String> {
// Use SHA-256 hash of the key as a verification token stored alongside.
// The actual keyring interaction uses platform APIs.
#[cfg(not(test))]
{
// In production, we'd use the `keyring` crate. Since it's an optional
// heavy dependency, we use a file-based fallback that's still better
// than plaintext env vars.
let keyring_path = dirs::data_local_dir()
.unwrap_or_else(std::env::temp_dir)
.join("openfang")
.join(".keyring");
std::fs::create_dir_all(keyring_path.parent().unwrap())
.map_err(|e| format!("mkdir: {e}"))?;
// Store encrypted with a machine-specific key
let machine_id = machine_fingerprint();
let mut hasher = Sha256::new();
hasher.update(&machine_id);
hasher.update(KEYRING_SERVICE.as_bytes());
let mask: Vec<u8> = hasher.finalize().to_vec();
let key_bytes = key_b64.as_bytes();
let obfuscated: Vec<u8> = key_bytes
.iter()
.enumerate()
.map(|(i, b)| b ^ mask[i % mask.len()])
.collect();
let encoded =
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, &obfuscated);
std::fs::write(&keyring_path, encoded).map_err(|e| format!("write: {e}"))?;
Ok(())
}
#[cfg(test)]
{
let _ = key_b64;
Err("Keyring not available in tests".to_string())
}
}
/// Load the master key from the OS keyring.
fn load_keyring_key() -> Result<Zeroizing<String>, String> {
#[cfg(not(test))]
{
let keyring_path = dirs::data_local_dir()
.unwrap_or_else(std::env::temp_dir)
.join("openfang")
.join(".keyring");
if !keyring_path.exists() {
return Err("Keyring file not found".to_string());
}
let encoded = std::fs::read_to_string(&keyring_path).map_err(|e| format!("read: {e}"))?;
let obfuscated =
base64::Engine::decode(&base64::engine::general_purpose::STANDARD, encoded.trim())
.map_err(|e| format!("decode: {e}"))?;
let machine_id = machine_fingerprint();
let mut hasher = Sha256::new();
hasher.update(&machine_id);
hasher.update(KEYRING_SERVICE.as_bytes());
let mask: Vec<u8> = hasher.finalize().to_vec();
let key_bytes: Vec<u8> = obfuscated
.iter()
.enumerate()
.map(|(i, b)| b ^ mask[i % mask.len()])
.collect();
let key_str = String::from_utf8(key_bytes).map_err(|e| format!("utf8: {e}"))?;
Ok(Zeroizing::new(key_str))
}
#[cfg(test)]
{
Err("Keyring not available in tests".to_string())
}
}
/// Generate a machine-specific fingerprint for keyring obfuscation.
#[cfg(not(test))]
fn machine_fingerprint() -> Vec<u8> {
use sha2::Digest;
let mut hasher = Sha256::new();
// Mix in username + hostname for basic machine binding
if let Ok(user) = std::env::var("USERNAME").or_else(|_| std::env::var("USER")) {
hasher.update(user.as_bytes());
}
if let Ok(host) = std::env::var("COMPUTERNAME").or_else(|_| std::env::var("HOSTNAME")) {
hasher.update(host.as_bytes());
}
hasher.update(b"openfang-vault-v1");
hasher.finalize().to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
fn test_vault() -> (tempfile::TempDir, CredentialVault) {
let dir = tempfile::tempdir().unwrap();
let vault_path = dir.path().join("vault.enc");
let vault = CredentialVault::new(vault_path);
(dir, vault)
}
/// Generate a random 32-byte master key for tests.
fn random_key() -> Zeroizing<[u8; 32]> {
let mut kb = Zeroizing::new([0u8; 32]);
OsRng.fill_bytes(kb.as_mut());
kb
}
#[test]
fn vault_init_and_roundtrip() {
let (dir, mut vault) = test_vault();
let key = random_key();
// Init creates vault file
vault.init_with_key(key.clone()).unwrap();
assert!(vault.exists());
assert!(vault.is_unlocked());
assert!(vault.is_empty());
// Store a secret
vault
.set(
"GITHUB_TOKEN".to_string(),
Zeroizing::new("ghp_test123".to_string()),
)
.unwrap();
assert_eq!(vault.len(), 1);
// Read it back
let val = vault.get("GITHUB_TOKEN").unwrap();
assert_eq!(val.as_str(), "ghp_test123");
// New vault instance, unlock with same key
let mut vault2 = CredentialVault::new(dir.path().join("vault.enc"));
vault2.unlock_with_key(key).unwrap();
let val2 = vault2.get("GITHUB_TOKEN").unwrap();
assert_eq!(val2.as_str(), "ghp_test123");
// Remove
assert!(vault2.remove("GITHUB_TOKEN").unwrap());
assert!(vault2.get("GITHUB_TOKEN").is_none());
}
#[test]
fn vault_list_keys() {
let (_dir, mut vault) = test_vault();
let key = random_key();
vault.init_with_key(key).unwrap();
vault
.set("A".to_string(), Zeroizing::new("1".to_string()))
.unwrap();
vault
.set("B".to_string(), Zeroizing::new("2".to_string()))
.unwrap();
let mut keys = vault.list_keys();
keys.sort();
assert_eq!(keys, vec!["A", "B"]);
}
#[test]
fn vault_wrong_key_fails() {
let (dir, mut vault) = test_vault();
let good_key = random_key();
vault.init_with_key(good_key).unwrap();
vault
.set("SECRET".to_string(), Zeroizing::new("value".to_string()))
.unwrap();
// Wrong key — should fail to decrypt
let bad_key = random_key();
let mut vault2 = CredentialVault::new(dir.path().join("vault.enc"));
assert!(vault2.unlock_with_key(bad_key).is_err());
}
#[test]
fn derive_key_deterministic() {
let master = [42u8; 32];
let salt = [1u8; 16];
let k1 = derive_key(&master, &salt).unwrap();
let k2 = derive_key(&master, &salt).unwrap();
assert_eq!(k1.as_ref(), k2.as_ref());
}
#[test]
fn vault_file_has_magic_header() {
let (_dir, mut vault) = test_vault();
let key = random_key();
vault.init_with_key(key).unwrap();
let raw = std::fs::read(&vault.path).unwrap();
assert_eq!(&raw[..4], b"OFV1");
}
#[test]
fn vault_legacy_json_compat() {
let (dir, mut vault) = test_vault();
let key = random_key();
vault.init_with_key(key.clone()).unwrap();
vault
.set("KEY".to_string(), Zeroizing::new("val".to_string()))
.unwrap();
// Strip the OFV1 magic header to simulate a legacy vault file
let raw = std::fs::read(&vault.path).unwrap();
assert_eq!(&raw[..4], b"OFV1");
std::fs::write(&vault.path, &raw[4..]).unwrap();
// Should still load (legacy compat)
let mut vault2 = CredentialVault::new(dir.path().join("vault.enc"));
vault2.unlock_with_key(key).unwrap();
assert_eq!(vault2.get("KEY").unwrap().as_str(), "val");
}
#[test]
fn vault_rejects_bad_magic() {
let (dir, mut vault) = test_vault();
let key = random_key();
vault.init_with_key(key.clone()).unwrap();
// Overwrite with unrecognized binary data
std::fs::write(&vault.path, b"BAAD not json").unwrap();
let mut vault2 = CredentialVault::new(dir.path().join("vault.enc"));
let result = vault2.unlock_with_key(key);
assert!(result.is_err());
let msg = format!("{:?}", result.unwrap_err());
assert!(msg.contains("Unrecognized vault file format"));
}
}