diff --git a/.ci/sample_tests/quickstart/py.integration.cloudbuild.yaml b/.ci/sample_tests/quickstart/py.integration.cloudbuild.yaml index 52752404a5..da8daf678f 100644 --- a/.ci/sample_tests/quickstart/py.integration.cloudbuild.yaml +++ b/.ci/sample_tests/quickstart/py.integration.cloudbuild.yaml @@ -23,8 +23,8 @@ steps: - | set -ex export VERSION=$(cat ./cmd/version.txt) - chmod +x .ci/sample_tests/run_py_tests.sh - .ci/sample_tests/run_py_tests.sh + chmod +x .ci/sample_tests/run_tests.sh + .ci/sample_tests/run_tests.sh env: - 'CLOUD_SQL_INSTANCE=${_CLOUD_SQL_INSTANCE}' - 'GCP_PROJECT=${_GCP_PROJECT}' diff --git a/.github/workflows/deploy_dev_docs.yaml b/.github/workflows/deploy_dev_docs.yaml index d51207e1ad..d71f1db273 100644 --- a/.github/workflows/deploy_dev_docs.yaml +++ b/.github/workflows/deploy_dev_docs.yaml @@ -40,7 +40,7 @@ jobs: group: docs-deployment cancel-in-progress: false steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod @@ -56,7 +56,7 @@ jobs: node-version: "22" - name: Cache dependencies - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} diff --git a/.github/workflows/deploy_previous_version_docs.yaml b/.github/workflows/deploy_previous_version_docs.yaml index 5c238d18b4..1c642262e7 100644 --- a/.github/workflows/deploy_previous_version_docs.yaml +++ b/.github/workflows/deploy_previous_version_docs.yaml @@ -30,14 +30,14 @@ jobs: steps: - name: Checkout main branch (for latest templates and theme) - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: ref: 'main' submodules: 'recursive' fetch-depth: 0 - name: Checkout old content from tag into a temporary directory - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: ref: ${{ github.event.inputs.version_tag }} path: 'old_version_source' # Checkout into a temp subdir diff --git a/.github/workflows/deploy_versioned_docs.yaml b/.github/workflows/deploy_versioned_docs.yaml index 42d0bd1a20..67e809935e 100644 --- a/.github/workflows/deploy_versioned_docs.yaml +++ b/.github/workflows/deploy_versioned_docs.yaml @@ -30,7 +30,7 @@ jobs: cancel-in-progress: false steps: - name: Checkout Code at Tag - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: ref: ${{ github.event.release.tag_name }} diff --git a/.github/workflows/docs_preview_clean.yaml b/.github/workflows/docs_preview_clean.yaml index ba44bfcc8b..a3a1f07857 100644 --- a/.github/workflows/docs_preview_clean.yaml +++ b/.github/workflows/docs_preview_clean.yaml @@ -34,7 +34,7 @@ jobs: group: "preview-${{ github.event.number }}" cancel-in-progress: true steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: ref: versioned-gh-pages diff --git a/.github/workflows/docs_preview_deploy.yaml b/.github/workflows/docs_preview_deploy.yaml index 769b4c5dc5..fda0e4895f 100644 --- a/.github/workflows/docs_preview_deploy.yaml +++ b/.github/workflows/docs_preview_deploy.yaml @@ -49,7 +49,7 @@ jobs: group: "preview-${{ github.event.number }}" cancel-in-progress: true steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: # Checkout the PR's HEAD commit (supports forks). ref: ${{ github.event.pull_request.head.sha }} @@ -67,7 +67,7 @@ jobs: node-version: "22" - name: Cache dependencies - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} diff --git a/.github/workflows/link_checker_workflow.yaml b/.github/workflows/link_checker_workflow.yaml index 189016dbc4..591221d16e 100644 --- a/.github/workflows/link_checker_workflow.yaml +++ b/.github/workflows/link_checker_workflow.yaml @@ -22,39 +22,47 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - name: Restore lychee cache - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5 with: path: .lycheecache key: cache-lychee-${{ github.sha }} restore-keys: cache-lychee- - name: Link Checker + id: lychee-check uses: lycheeverse/lychee-action@a8c4c7cb88f0c7386610c35eb25108e448569cb0 # v2 + continue-on-error: true with: args: > - --verbose + --quiet --no-progress --cache --max-cache-age 1d --exclude '^neo4j\+.*' --exclude '^bolt://.*' README.md docs/ - output: /tmp/foo.txt - fail: true - jobSummary: true - debug: true + output: lychee-report.md + format: markdown + fail: true + jobSummary: false + debug: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # This step only runs if the 'lychee_check' step fails, ensuring the - # context note only appears when the developer needs to troubleshoot. - - name: Display Link Context Note on Failure - if: ${{ failure() }} + + - name: Display Failure Report + # Run this ONLY if the link checker failed + if: steps.lychee-check.outcome == 'failure' run: | echo "## Link Resolution Note" >> $GITHUB_STEP_SUMMARY echo "Local links and directory changes work differently on GitHub than on the docsite." >> $GITHUB_STEP_SUMMARY echo "You must ensure fixes pass the **GitHub check** and also work with **\`hugo server\`**." >> $GITHUB_STEP_SUMMARY + echo "See [Link Checking and Fixing with Lychee](https://github.com/googleapis/genai-toolbox/blob/main/DEVELOPER.md#link-checking-and-fixing-with-lychee) for more details." >> $GITHUB_STEP_SUMMARY echo "---" >> $GITHUB_STEP_SUMMARY + echo "### Broken Links Found" >> $GITHUB_STEP_SUMMARY + cat ./lychee-report.md >> $GITHUB_STEP_SUMMARY + + exit 1 diff --git a/.github/workflows/publish-mcp.yml b/.github/workflows/publish-mcp.yml index dc84fbb759..32264b393c 100644 --- a/.github/workflows/publish-mcp.yml +++ b/.github/workflows/publish-mcp.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - name: Wait for image in Artifact Registry shell: bash diff --git a/.lycheeignore b/.lycheeignore index baec3c4449..236e2c8394 100644 --- a/.lycheeignore +++ b/.lycheeignore @@ -23,8 +23,7 @@ https://cloud.dgraph.io/login https://dgraph.io/docs # MySQL Community downloads and main site (often protected by bot mitigation) -https://dev.mysql.com/downloads/installer/ -https://www.mysql.com/ +^https?://(.*\.)?mysql\.com/.* # Claude desktop download link https://claude.ai/download @@ -37,9 +36,9 @@ https://dev.mysql.com/doc/refman/8.4/en/sql-prepared-statements.html https://dev.mysql.com/doc/refman/8.4/en/user-names.html # npmjs links can occasionally trigger rate limiting during high-frequency CI builds -https://www.npmjs.com/package/@toolbox-sdk/core -https://www.npmjs.com/package/@toolbox-sdk/adk +^https?://(www\.)?npmjs\.com/.* + https://www.oceanbase.com/ # Ignore social media and blog profiles to reduce external request overhead -https://medium.com/@mcp_toolbox +https://medium.com/@mcp_toolbox \ No newline at end of file diff --git a/cmd/root.go b/cmd/root.go index d0d11e1a07..5e59997211 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -35,6 +35,7 @@ import ( yaml "github.com/goccy/go-yaml" "github.com/googleapis/genai-toolbox/internal/auth" "github.com/googleapis/genai-toolbox/internal/cli/invoke" + "github.com/googleapis/genai-toolbox/internal/cli/skills" "github.com/googleapis/genai-toolbox/internal/embeddingmodels" "github.com/googleapis/genai-toolbox/internal/log" "github.com/googleapis/genai-toolbox/internal/prebuiltconfigs" @@ -162,6 +163,7 @@ import ( _ "github.com/googleapis/genai-toolbox/internal/tools/looker/lookerrundashboard" _ "github.com/googleapis/genai-toolbox/internal/tools/looker/lookerrunlook" _ "github.com/googleapis/genai-toolbox/internal/tools/looker/lookerupdateprojectfile" + _ "github.com/googleapis/genai-toolbox/internal/tools/looker/lookervalidateproject" _ "github.com/googleapis/genai-toolbox/internal/tools/mindsdb/mindsdbexecutesql" _ "github.com/googleapis/genai-toolbox/internal/tools/mindsdb/mindsdbsql" _ "github.com/googleapis/genai-toolbox/internal/tools/mongodb/mongodbaggregate" @@ -401,6 +403,8 @@ func NewCommand(opts ...Option) *Command { // Register subcommands for tool invocation baseCmd.AddCommand(invoke.NewCommand(cmd)) + // Register subcommands for skill generation + baseCmd.AddCommand(skills.NewCommand(cmd)) return cmd } diff --git a/cmd/root_test.go b/cmd/root_test.go index 3c55e83d93..f26bd1706a 100644 --- a/cmd/root_test.go +++ b/cmd/root_test.go @@ -2370,7 +2370,7 @@ func TestPrebuiltTools(t *testing.T) { wantToolset: server.ToolsetConfigs{ "looker_tools": tools.ToolsetConfig{ Name: "looker_tools", - ToolNames: []string{"get_models", "get_explores", "get_dimensions", "get_measures", "get_filters", "get_parameters", "query", "query_sql", "query_url", "get_looks", "run_look", "make_look", "get_dashboards", "run_dashboard", "make_dashboard", "add_dashboard_element", "add_dashboard_filter", "generate_embed_url", "health_pulse", "health_analyze", "health_vacuum", "dev_mode", "get_projects", "get_project_files", "get_project_file", "create_project_file", "update_project_file", "delete_project_file", "get_connections", "get_connection_schemas", "get_connection_databases", "get_connection_tables", "get_connection_table_columns"}, + ToolNames: []string{"get_models", "get_explores", "get_dimensions", "get_measures", "get_filters", "get_parameters", "query", "query_sql", "query_url", "get_looks", "run_look", "make_look", "get_dashboards", "run_dashboard", "make_dashboard", "add_dashboard_element", "add_dashboard_filter", "generate_embed_url", "health_pulse", "health_analyze", "health_vacuum", "dev_mode", "get_projects", "get_project_files", "get_project_file", "create_project_file", "update_project_file", "delete_project_file", "validate_project", "get_connections", "get_connection_schemas", "get_connection_databases", "get_connection_tables", "get_connection_table_columns"}, }, }, }, diff --git a/cmd/skill_generate_test.go b/cmd/skill_generate_test.go new file mode 100644 index 0000000000..3b91dc590b --- /dev/null +++ b/cmd/skill_generate_test.go @@ -0,0 +1,179 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func TestGenerateSkill(t *testing.T) { + // Create a temporary directory for tests + tmpDir := t.TempDir() + outputDir := filepath.Join(tmpDir, "skills") + + // Create a tools.yaml file with a sqlite tool + toolsFileContent := ` +sources: + my-sqlite: + kind: sqlite + database: test.db +tools: + hello-sqlite: + kind: sqlite-sql + source: my-sqlite + description: "hello tool" + statement: "SELECT 'hello' as greeting" +` + + toolsFilePath := filepath.Join(tmpDir, "tools.yaml") + if err := os.WriteFile(toolsFilePath, []byte(toolsFileContent), 0644); err != nil { + t.Fatalf("failed to write tools file: %v", err) + } + + args := []string{ + "skills-generate", + "--tools-file", toolsFilePath, + "--output-dir", outputDir, + "--name", "hello-sqlite", + "--description", "hello tool", + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, got, err := invokeCommandWithContext(ctx, args) + if err != nil { + t.Fatalf("command failed: %v\nOutput: %s", err, got) + } + + // Verify generated directory structure + skillPath := filepath.Join(outputDir, "hello-sqlite") + if _, err := os.Stat(skillPath); os.IsNotExist(err) { + t.Fatalf("skill directory not created: %s", skillPath) + } + + // Check SKILL.md + skillMarkdown := filepath.Join(skillPath, "SKILL.md") + content, err := os.ReadFile(skillMarkdown) + if err != nil { + t.Fatalf("failed to read SKILL.md: %v", err) + } + + expectedFrontmatter := `--- +name: hello-sqlite +description: hello tool +---` + if !strings.HasPrefix(string(content), expectedFrontmatter) { + t.Errorf("SKILL.md does not have expected frontmatter format.\nExpected prefix:\n%s\nGot:\n%s", expectedFrontmatter, string(content)) + } + + if !strings.Contains(string(content), "## Usage") { + t.Errorf("SKILL.md does not contain '## Usage' section") + } + + if !strings.Contains(string(content), "## Scripts") { + t.Errorf("SKILL.md does not contain '## Scripts' section") + } + + if !strings.Contains(string(content), "### hello-sqlite") { + t.Errorf("SKILL.md does not contain '### hello-sqlite' tool header") + } + + // Check script file + scriptFilename := "hello-sqlite.js" + scriptPath := filepath.Join(skillPath, "scripts", scriptFilename) + if _, err := os.Stat(scriptPath); os.IsNotExist(err) { + t.Fatalf("script file not created: %s", scriptPath) + } + + scriptContent, err := os.ReadFile(scriptPath) + if err != nil { + t.Fatalf("failed to read script file: %v", err) + } + if !strings.Contains(string(scriptContent), "hello-sqlite") { + t.Errorf("script file does not contain expected tool name") + } + + // Check assets + assetPath := filepath.Join(skillPath, "assets", "hello-sqlite.yaml") + if _, err := os.Stat(assetPath); os.IsNotExist(err) { + t.Fatalf("asset file not created: %s", assetPath) + } + assetContent, err := os.ReadFile(assetPath) + if err != nil { + t.Fatalf("failed to read asset file: %v", err) + } + if !strings.Contains(string(assetContent), "hello-sqlite") { + t.Errorf("asset file does not contain expected tool name") + } +} + +func TestGenerateSkill_NoConfig(t *testing.T) { + tmpDir := t.TempDir() + outputDir := filepath.Join(tmpDir, "skills") + + args := []string{ + "skills-generate", + "--output-dir", outputDir, + "--name", "test", + "--description", "test", + } + + _, _, err := invokeCommandWithContext(context.Background(), args) + if err == nil { + t.Fatal("expected command to fail when no configuration is provided and tools.yaml is missing") + } + + // Should not have created the directory if no config was processed + if _, err := os.Stat(outputDir); !os.IsNotExist(err) { + t.Errorf("output directory should not have been created") + } +} + +func TestGenerateSkill_MissingArguments(t *testing.T) { + tmpDir := t.TempDir() + toolsFilePath := filepath.Join(tmpDir, "tools.yaml") + if err := os.WriteFile(toolsFilePath, []byte("tools: {}"), 0644); err != nil { + t.Fatalf("failed to write tools file: %v", err) + } + + tests := []struct { + name string + args []string + }{ + { + name: "missing name", + args: []string{"skills-generate", "--tools-file", toolsFilePath, "--description", "test"}, + }, + { + name: "missing description", + args: []string{"skills-generate", "--tools-file", toolsFilePath, "--name", "test"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, got, err := invokeCommandWithContext(context.Background(), tt.args) + if err == nil { + t.Fatalf("expected command to fail due to missing arguments, but it succeeded\nOutput: %s", got) + } + }) + } +} diff --git a/docs/en/getting-started/local_quickstart.md b/docs/en/getting-started/local_quickstart.md index 9049082a01..414156f672 100644 --- a/docs/en/getting-started/local_quickstart.md +++ b/docs/en/getting-started/local_quickstart.md @@ -115,7 +115,7 @@ pip install google-genai 1. Update `my_agent/agent.py` with the following content to connect to Toolbox: ```py - {{< include "quickstart/python/adk/quickstart.py" >}} + {{< regionInclude "quickstart/python/adk/quickstart.py" "quickstart" >}} ```
diff --git a/docs/en/getting-started/quickstart/js/adk/package-lock.json b/docs/en/getting-started/quickstart/js/adk/package-lock.json index 84bc88e40a..9bcbc4080d 100644 --- a/docs/en/getting-started/quickstart/js/adk/package-lock.json +++ b/docs/en/getting-started/quickstart/js/adk/package-lock.json @@ -18,6 +18,7 @@ "resolved": "https://registry.npmjs.org/@google-cloud/paginator/-/paginator-5.0.2.tgz", "integrity": "sha512-DJS3s0OVH4zFDB1PzjxAsHqJT6sKVbRwwML0ZBP9PbU7Yebtu/7SWMRzvO2J3nUi9pRNITCfu4LJeooM2w4pjg==", "license": "Apache-2.0", + "peer": true, "dependencies": { "arrify": "^2.0.0", "extend": "^3.0.2" @@ -31,6 +32,7 @@ "resolved": "https://registry.npmjs.org/@google-cloud/projectify/-/projectify-4.0.0.tgz", "integrity": "sha512-MmaX6HeSvyPbWGwFq7mXdo0uQZLGBYCwziiLIGq5JVX+/bdI3SAq6bP98trV5eTWfLuvsMcIC1YJOF2vfteLFA==", "license": "Apache-2.0", + "peer": true, "engines": { "node": ">=14.0.0" } @@ -40,15 +42,17 @@ "resolved": "https://registry.npmjs.org/@google-cloud/promisify/-/promisify-4.0.0.tgz", "integrity": "sha512-Orxzlfb9c67A15cq2JQEyVc7wEsmFBmHjZWZYQMUyJ1qivXyMwdyNOs9odi79hze+2zqdTtu1E19IM/FtqZ10g==", "license": "Apache-2.0", + "peer": true, "engines": { "node": ">=14" } }, "node_modules/@google-cloud/storage": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@google-cloud/storage/-/storage-7.18.0.tgz", - "integrity": "sha512-r3ZwDMiz4nwW6R922Z1pwpePxyRwE5GdevYX63hRmAQUkUQJcBH/79EnQPDv5cOv1mFBgevdNWQfi3tie3dHrQ==", + "version": "7.19.0", + "resolved": "https://registry.npmjs.org/@google-cloud/storage/-/storage-7.19.0.tgz", + "integrity": "sha512-n2FjE7NAOYyshogdc7KQOl/VZb4sneqPjWouSyia9CMDdMhRX5+RIbqalNmC7LOLzuLAN89VlF2HvG8na9G+zQ==", "license": "Apache-2.0", + "peer": true, "dependencies": { "@google-cloud/paginator": "^5.0.0", "@google-cloud/projectify": "^4.0.0", @@ -56,7 +60,7 @@ "abort-controller": "^3.0.0", "async-retry": "^1.3.3", "duplexify": "^4.1.3", - "fast-xml-parser": "^4.4.1", + "fast-xml-parser": "^5.3.4", "gaxios": "^6.0.2", "google-auth-library": "^9.6.3", "html-entities": "^2.5.2", @@ -75,6 +79,7 @@ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", "license": "MIT", + "peer": true, "bin": { "uuid": "dist/bin/uuid" } @@ -97,7 +102,6 @@ "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.14.0.tgz", "integrity": "sha512-jirYprAAJU1svjwSDVCzyVq+FrJpJd5CSxR/g2Ga/gZ0ZYZpcWjMS75KJl9y71K1mDN+tcx6s21CzCbB2R840g==", "license": "Apache-2.0", - "peer": true, "dependencies": { "google-auth-library": "^9.14.2", "ws": "^8.18.0" @@ -136,7 +140,6 @@ "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.5.tgz", "integrity": "sha512-QakrKIGniGuRVfWBdMsDea/dx1PNE739QJ7gCM41s9q+qaCYTHCdsIBXQVVXry3mfWAiaM9kT22Hyz53Uw8mfg==", "license": "MIT", - "peer": true, "dependencies": { "ajv": "^6.12.6", "content-type": "^1.0.5", @@ -299,6 +302,7 @@ "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", "license": "MIT", + "peer": true, "engines": { "node": ">= 10" } @@ -307,13 +311,15 @@ "version": "0.12.5", "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.5.tgz", "integrity": "sha512-hWtVTC2q7hc7xZ/RLbxapMvDMgUnDvKvMOpKal4DrMyfGBUfB1oKaZlIRr6mJL+If3bAP6sV/QneGzF6tJjZDg==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@types/node": { "version": "24.10.1", "resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.1.tgz", "integrity": "sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ==", "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -323,6 +329,7 @@ "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.13.tgz", "integrity": "sha512-FGJ6udDNUCjd19pp0Q3iTiDkwhYup7J8hpMW9c4k53NrccQFFWKRho6hvtPPEhnXWKvukfwAlB6DbDz4yhH5Gg==", "license": "MIT", + "peer": true, "dependencies": { "@types/caseless": "*", "@types/node": "*", @@ -335,6 +342,7 @@ "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.5.tgz", "integrity": "sha512-jqdObeR2rxZZbPSGL+3VckHMYtu+f9//KXBsVny6JSX/pa38Fy+bGjuG8eW/H6USNQWhLi8Num++cU2yOCNz4A==", "license": "MIT", + "peer": true, "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", @@ -352,6 +360,7 @@ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "license": "MIT", + "peer": true, "engines": { "node": ">= 0.6" } @@ -361,6 +370,7 @@ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "license": "MIT", + "peer": true, "dependencies": { "mime-db": "1.52.0" }, @@ -372,13 +382,15 @@ "version": "4.0.5", "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz", "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/abort-controller": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", "license": "MIT", + "peer": true, "dependencies": { "event-target-shim": "^5.0.0" }, @@ -453,6 +465,7 @@ "resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz", "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -462,6 +475,7 @@ "resolved": "https://registry.npmjs.org/async-retry/-/async-retry-1.3.3.tgz", "integrity": "sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==", "license": "MIT", + "peer": true, "dependencies": { "retry": "0.13.1" } @@ -754,6 +768,7 @@ "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-4.1.3.tgz", "integrity": "sha512-M3BmBhwJRZsSx38lZyhE53Csddgzl5R7xGJNk7CVddZD6CcmwMCH8J+7AprIrQKH7TonKxaCjcv27Qmf+sQ+oA==", "license": "MIT", + "peer": true, "dependencies": { "end-of-stream": "^1.4.1", "inherits": "^2.0.3", @@ -802,6 +817,7 @@ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", "license": "MIT", + "peer": true, "dependencies": { "once": "^1.4.0" } @@ -871,6 +887,7 @@ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -901,7 +918,6 @@ "resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz", "integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==", "license": "MIT", - "peer": true, "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.0", @@ -973,9 +989,9 @@ "license": "MIT" }, "node_modules/fast-xml-parser": { - "version": "4.5.3", - "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.5.3.tgz", - "integrity": "sha512-RKihhV+SHsIUGXObeVy9AXiBbFwkVk7Syp8XgwN5U3JV416+Gwp/GO9i0JYKmikykgz/UHRrrV4ROuZEo/T0ig==", + "version": "5.3.5", + "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.3.5.tgz", + "integrity": "sha512-JeaA2Vm9ffQKp9VjvfzObuMCjUYAp5WDYhRYL5LrBPY/jUDlUtOvDfot0vKSkB9tuX885BDHjtw4fZadD95wnA==", "funding": [ { "type": "github", @@ -983,8 +999,9 @@ } ], "license": "MIT", + "peer": true, "dependencies": { - "strnum": "^1.1.1" + "strnum": "^2.1.2" }, "bin": { "fxparser": "src/cli/cli.js" @@ -1333,7 +1350,8 @@ "url": "https://patreon.com/mdevils" } ], - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/http-errors": { "version": "2.0.0", @@ -1365,6 +1383,7 @@ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", "license": "MIT", + "peer": true, "dependencies": { "@tootallnate/once": "2", "agent-base": "6", @@ -1379,6 +1398,7 @@ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", "license": "MIT", + "peer": true, "dependencies": { "debug": "4" }, @@ -1555,6 +1575,7 @@ "resolved": "https://registry.npmjs.org/mime/-/mime-3.0.0.tgz", "integrity": "sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==", "license": "MIT", + "peer": true, "bin": { "mime": "cli.js" }, @@ -1715,6 +1736,7 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "license": "MIT", + "peer": true, "dependencies": { "yocto-queue": "^0.1.0" }, @@ -1856,6 +1878,7 @@ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", "license": "MIT", + "peer": true, "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -1870,6 +1893,7 @@ "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", "license": "MIT", + "peer": true, "engines": { "node": ">= 4" } @@ -1879,6 +1903,7 @@ "resolved": "https://registry.npmjs.org/retry-request/-/retry-request-7.0.2.tgz", "integrity": "sha512-dUOvLMJ0/JJYEn8NrpOaGNE7X3vpI5XlZS/u0ANjqtcZVKnIxP7IgCFwrKTxENw29emmwug53awKtaMm4i9g5w==", "license": "MIT", + "peer": true, "dependencies": { "@types/request": "^2.48.8", "extend": "^3.0.2", @@ -2107,6 +2132,7 @@ "resolved": "https://registry.npmjs.org/stream-events/-/stream-events-1.0.5.tgz", "integrity": "sha512-E1GUzBSgvct8Jsb3v2X15pjzN1tYebtbLaMg+eBOUOAxgbLoSbT2NS91ckc5lJD1KfLjId+jXJRgo0qnV5Nerg==", "license": "MIT", + "peer": true, "dependencies": { "stubs": "^3.0.0" } @@ -2115,13 +2141,15 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.3.tgz", "integrity": "sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "license": "MIT", + "peer": true, "dependencies": { "safe-buffer": "~5.2.0" } @@ -2223,28 +2251,31 @@ } }, "node_modules/strnum": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/strnum/-/strnum-1.1.2.tgz", - "integrity": "sha512-vrN+B7DBIoTTZjnPNewwhx6cBA/H+IS7rfW68n7XxC1y7uoiGQBxaKzqucGUgavX15dJgiGztLJ8vxuEzwqBdA==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/strnum/-/strnum-2.1.2.tgz", + "integrity": "sha512-l63NF9y/cLROq/yqKXSLtcMeeyOfnSQlfMSlzFt/K73oIaD8DGaQWd7Z34X9GPiKqP5rbSh84Hl4bOlLcjiSrQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/NaturalIntelligence" } ], - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/stubs": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/stubs/-/stubs-3.0.0.tgz", "integrity": "sha512-PdHt7hHUJKxvTCgbKX9C1V/ftOcjJQgz8BZwNfV5c4B6dcGqlpelTbJ999jBGZ2jYiPAwcX5dP6oBwVlBlUbxw==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/teeny-request": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/teeny-request/-/teeny-request-9.0.0.tgz", "integrity": "sha512-resvxdc6Mgb7YEThw6G6bExlXKkv6+YbuzGg9xuXxSgxJF7Ozs+o8Y9+2R3sArdWdW8nOokoQb1yrpFB0pQK2g==", "license": "Apache-2.0", + "peer": true, "dependencies": { "http-proxy-agent": "^5.0.0", "https-proxy-agent": "^5.0.0", @@ -2261,6 +2292,7 @@ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", "license": "MIT", + "peer": true, "dependencies": { "debug": "4" }, @@ -2273,6 +2305,7 @@ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", "license": "MIT", + "peer": true, "dependencies": { "agent-base": "6", "debug": "4" @@ -2314,7 +2347,8 @@ "version": "7.16.0", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/unpipe": { "version": "1.0.0", @@ -2338,7 +2372,8 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/uuid": { "version": "9.0.1", @@ -2525,6 +2560,7 @@ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2537,7 +2573,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/docs/en/getting-started/quickstart/js/genkit/package-lock.json b/docs/en/getting-started/quickstart/js/genkit/package-lock.json index 1b00f903e3..cdb5744245 100644 --- a/docs/en/getting-started/quickstart/js/genkit/package-lock.json +++ b/docs/en/getting-started/quickstart/js/genkit/package-lock.json @@ -3351,13 +3351,13 @@ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "node_modules/axios": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz", - "integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==", + "version": "1.13.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", + "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", "license": "MIT", "dependencies": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.4", + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", "proxy-from-env": "^1.1.0" } }, @@ -4248,9 +4248,10 @@ } }, "node_modules/form-data": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", - "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", diff --git a/docs/en/getting-started/quickstart/js/langchain/package-lock.json b/docs/en/getting-started/quickstart/js/langchain/package-lock.json index a52001ef13..c71fb84620 100644 --- a/docs/en/getting-started/quickstart/js/langchain/package-lock.json +++ b/docs/en/getting-started/quickstart/js/langchain/package-lock.json @@ -18,7 +18,8 @@ "node_modules/@cfworker/json-schema": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/@cfworker/json-schema/-/json-schema-4.1.1.tgz", - "integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==" + "integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==", + "peer": true }, "node_modules/@google/generative-ai": { "version": "0.24.1", @@ -225,6 +226,7 @@ "version": "5.2.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "peer": true, "engines": { "node": ">=10" }, @@ -308,6 +310,7 @@ "version": "6.3.0", "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "peer": true, "engines": { "node": ">=10" }, @@ -420,6 +423,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -821,6 +825,7 @@ "version": "1.0.21", "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.21.tgz", "integrity": "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==", + "peer": true, "dependencies": { "base64-js": "^1.5.1" } @@ -873,9 +878,9 @@ } }, "node_modules/langsmith": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.4.3.tgz", - "integrity": "sha512-vuBAagBZulXj0rpZhUTxmHhrYIBk53z8e2Q8ty4OHVkahN4ul7Im3OZxD9jsXZB0EuncK1xRYtY8J3BW4vj1zw==", + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.5.2.tgz", + "integrity": "sha512-CfkcQsiajtTWknAcyItvJsKEQdY2VgDpm6U8pRI9wnM07mevnOv5EF+RcqWGwx37SEUxtyi2RXMwnKW8b06JtA==", "license": "MIT", "dependencies": { "@types/uuid": "^10.0.0", @@ -969,6 +974,7 @@ "version": "4.2.0", "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", + "peer": true, "bin": { "mustache": "bin/mustache" } @@ -1407,7 +1413,6 @@ "version": "3.25.76", "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/docs/en/getting-started/quickstart/python/adk/quickstart.py b/docs/en/getting-started/quickstart/python/adk/quickstart.py index 477ced578d..42a72fa38e 100644 --- a/docs/en/getting-started/quickstart/python/adk/quickstart.py +++ b/docs/en/getting-started/quickstart/python/adk/quickstart.py @@ -1,6 +1,21 @@ +# [START quickstart] +import asyncio + from google.adk import Agent from google.adk.apps import App +from google.adk.runners import InMemoryRunner from google.adk.tools.toolbox_toolset import ToolboxToolset +from google.genai.types import Content, Part + +prompt = """ +You're a helpful hotel assistant. You handle hotel searching, booking and +cancellations. When the user searches for a hotel, mention it's name, id, +location and price tier. Always mention hotel ids while performing any +searches. This is very important for any operations. For any bookings or +cancellations, please provide the appropriate confirmation. Be sure to +update checkin or checkout dates if mentioned by the user. +Don't ask for confirmations from the user. +""" # TODO(developer): update the TOOLBOX_URL to your toolbox endpoint toolset = ToolboxToolset( @@ -8,10 +23,35 @@ toolset = ToolboxToolset( ) root_agent = Agent( - name='root_agent', + name='hotel_assistant', model='gemini-2.5-flash', - instruction="You are a helpful AI assistant designed to provide accurate and useful information.", + instruction=prompt, tools=[toolset], ) app = App(root_agent=root_agent, name="my_agent") +# [END quickstart] + +queries = [ + "Find hotels in Basel with Basel in its name.", + "Can you book the Hilton Basel for me?", + "Oh wait, this is too expensive. Please cancel it and book the Hyatt Regency instead.", + "My check in dates would be from April 10, 2024 to April 19, 2024.", +] + +async def main(): + runner = InMemoryRunner(app=app) + session = await runner.session_service.create_session( + app_name=app.name, user_id="test_user" + ) + + for query in queries: + print(f"\nUser: {query}") + user_message = Content(parts=[Part.from_text(text=query)]) + + async for event in runner.run_async(user_id="test_user", session_id=session.id, new_message=user_message): + if event.is_final_response() and event.content and event.content.parts: + print(f"Agent: {event.content.parts[0].text}") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/docs/en/getting-started/quickstart/python/quickstart_test.py b/docs/en/getting-started/quickstart/python/quickstart_test.py index eb46bee1f8..b6c6e3a8a8 100755 --- a/docs/en/getting-started/quickstart/python/quickstart_test.py +++ b/docs/en/getting-started/quickstart/python/quickstart_test.py @@ -41,31 +41,29 @@ def golden_keywords(): class TestExecution: """Test framework execution and output validation.""" + _cached_output = None + @pytest.fixture(scope="function") def script_output(self, capsys): """Run the quickstart function and return its output.""" - - # TODO: Add better validation for ADK once we have a way to capture its - # output. - if ORCH_NAME == "adk": - return quickstart.app.root_agent.name - else: + if TestExecution._cached_output is None: asyncio.run(quickstart.main()) - - return capsys.readouterr() + out, err = capsys.readouterr() + TestExecution._cached_output = (out, err) + + class Output: + def __init__(self, out, err): + self.out = out + self.err = err + + return Output(*TestExecution._cached_output) def test_script_runs_without_errors(self, script_output): """Test that the script runs and produces no stderr.""" - if ORCH_NAME == "adk": - return assert script_output.err == "", f"Script produced stderr: {script_output.err}" def test_keywords_in_output(self, script_output, golden_keywords): """Test that expected keywords are present in the script's output.""" - - if ORCH_NAME == "adk": - assert script_output == "root_agent" - return output = script_output.out missing_keywords = [kw for kw in golden_keywords if kw not in output] assert not missing_keywords, f"Missing keywords in output: {missing_keywords}" diff --git a/docs/en/how-to/connect-ide/postgres_mcp.md b/docs/en/how-to/connect-ide/postgres_mcp.md index 44e3e09ade..7d778c6a4d 100644 --- a/docs/en/how-to/connect-ide/postgres_mcp.md +++ b/docs/en/how-to/connect-ide/postgres_mcp.md @@ -32,7 +32,7 @@ to expose your developer assistant tools to a Postgres instance: {{< notice tip >}} This guide can be used with [AlloyDB -Omni](https://cloud.google.com/alloydb/omni/current/docs/overview). +Omni](https://cloud.google.com/alloydb/omni/docs/overview). {{< /notice >}} ## Set up the database @@ -40,10 +40,10 @@ Omni](https://cloud.google.com/alloydb/omni/current/docs/overview). 1. Create or select a PostgreSQL instance. * [Install PostgreSQL locally](https://www.postgresql.org/download/) - * [Install AlloyDB Omni](https://cloud.google.com/alloydb/omni/current/docs/quickstart) + * [Install AlloyDB Omni](https://cloud.google.com/alloydb/omni/docs/quickstart) 1. Create or reuse [a database - user](https://cloud.google.com/alloydb/omni/current/docs/database-users/manage-users) + user](https://docs.cloud.google.com/alloydb/omni/containers/current/docs/database-users/manage-users) and have the username and password ready. ## Install MCP Toolbox diff --git a/docs/en/how-to/deploy_adk_agent.md b/docs/en/how-to/deploy_adk_agent.md index 973d84dfe6..cc247d4831 100644 --- a/docs/en/how-to/deploy_adk_agent.md +++ b/docs/en/how-to/deploy_adk_agent.md @@ -83,15 +83,12 @@ Toolbox instead of the local address. 2. Open your agent file (`my_agent/agent.py`). -3. Update the `ToolboxSyncClient` initialization to use your Cloud Run URL. +3. Update the `ToolboxToolset` initialization to point to your Cloud Run service URL. Replace the existing initialization code with the following: - {{% alert color="info" %}} -Since Cloud Run services are secured by default, you also need to provide an -authentication token. + {{% alert color="info" title="Note" %}} +Since Cloud Run services are secured by default, you also need to provide a workload identity. {{% /alert %}} - Replace your existing client initialization code with the following: - ```python from google.adk import Agent from google.adk.apps import App @@ -132,14 +129,14 @@ app = App(root_agent=root_agent, name="my_agent") Run the deployment command: ```bash -make backend +make deploy ``` This command will build your agent's container image and deploy it to Vertex AI. ## Step 6: Test your Deployment -Once the deployment command (`make backend`) completes, it will output the URL +Once the deployment command (`make deploy`) completes, it will output the URL for the Agent Engine Playground. You can click on this URL to open the Playground in your browser and start chatting with your agent to test the tools. diff --git a/docs/en/how-to/generate_skill.md b/docs/en/how-to/generate_skill.md new file mode 100644 index 0000000000..7fa731e85b --- /dev/null +++ b/docs/en/how-to/generate_skill.md @@ -0,0 +1,112 @@ +--- +title: "Generate Agent Skills" +type: docs +weight: 10 +description: > + How to generate agent skills from a toolset. +--- + +The `skills-generate` command allows you to convert a **toolset** into an **Agent Skill**. A toolset is a collection of tools, and the generated skill will contain metadata and execution scripts for all tools within that toolset, complying with the [Agent Skill specification](https://agentskills.io/specification). + +## Before you begin + +1. Make sure you have the `toolbox` executable in your PATH. +2. Make sure you have [Node.js](https://nodejs.org/) installed on your system. + +## Generating a Skill from a Toolset + +A skill package consists of a `SKILL.md` file (with required YAML frontmatter) and a set of Node.js scripts. Each tool defined in your toolset maps to a corresponding script in the generated Node.js scripts (`.js`) that work across different platforms (Linux, macOS, Windows). + + +### Command Usage + +The basic syntax for the command is: + +```bash +toolbox skills-generate \ + --name \ + --toolset \ + --description \ + --output-dir +``` + +- ``: Can be `--tools-file`, `--tools-files`, `--tools-folder`, and `--prebuilt`. See the [CLI Reference](../reference/cli.md) for details. +- `--name`: Name of the generated skill. +- `--description`: Description of the generated skill. +- `--toolset`: (Optional) Name of the toolset to convert into a skill. If not provided, all tools will be included. +- `--output-dir`: (Optional) Directory to output generated skills (default: "skills"). + +{{< notice note >}} +**Note:** The `` must follow the Agent Skill [naming convention](https://agentskills.io/specification): it must contain only lowercase alphanumeric characters and hyphens, cannot start or end with a hyphen, and cannot contain consecutive hyphens (e.g., `my-skill`, `data-processing`). +{{< /notice >}} + +### Example: Custom Tools File + +1. Create a `tools.yaml` file with a toolset and some tools: + + ```yaml + tools: + tool_a: + description: "First tool" + run: + command: "echo 'Tool A'" + tool_b: + description: "Second tool" + run: + command: "echo 'Tool B'" + toolsets: + my_toolset: + tools: + - tool_a + - tool_b + ``` + +2. Generate the skill: + + ```bash + toolbox --tools-file tools.yaml skills-generate \ + --name "my-skill" \ + --toolset "my_toolset" \ + --description "A skill containing multiple tools" \ + --output-dir "generated-skills" + ``` + +3. The generated skill directory structure: + + ```text + generated-skills/ + └── my-skill/ + ├── SKILL.md + ├── assets/ + │ ├── tool_a.yaml + │ └── tool_b.yaml + └── scripts/ + ├── tool_a.js + └── tool_b.js + ``` + + In this example, the skill contains two Node.js scripts (`tool_a.js` and `tool_b.js`), each mapping to a tool in the original toolset. + +### Example: Prebuilt Configuration + +You can also generate skills from prebuilt toolsets: + +```bash +toolbox --prebuilt alloydb-postgres-admin skills-generate \ + --name "alloydb-postgres-admin" \ + --description "skill for performing administrative operations on alloydb" +``` + +## Installing the Generated Skill in Gemini CLI + +Once you have generated a skill, you can install it into the Gemini CLI using the `gemini skills install` command. + +### Installation Command + +Provide the path to the directory containing the generated skill: + +```bash +gemini skills install /path/to/generated-skills/my-skill +``` + +Alternatively, use ~/.gemini/skills as the `--output-dir` to generate the skill straight to the Gemini CLI. diff --git a/docs/en/how-to/invoke_tool.md b/docs/en/how-to/invoke_tool.md index 7448de13fa..4fc23d3a2c 100644 --- a/docs/en/how-to/invoke_tool.md +++ b/docs/en/how-to/invoke_tool.md @@ -20,14 +20,15 @@ The `invoke` command allows you to invoke tools defined in your configuration di 1. Make sure you have the `toolbox` binary installed or built. 2. Make sure you have a valid tool configuration file (e.g., `tools.yaml`). -## Basic Usage +### Command Usage The basic syntax for the command is: ```bash -toolbox [--tools-file | --prebuilt ] invoke [params] +toolbox invoke [params] ``` +- ``: Can be `--tools-file`, `--tools-files`, `--tools-folder`, and `--prebuilt`. See the [CLI Reference](../reference/cli.md) for details. - ``: The name of the tool you want to call. This must match the name defined in your `tools.yaml`. - `[params]`: (Optional) A JSON string representing the arguments for the tool. diff --git a/docs/en/reference/cli.md b/docs/en/reference/cli.md index 150171f3aa..11549c2830 100644 --- a/docs/en/reference/cli.md +++ b/docs/en/reference/cli.md @@ -32,7 +32,8 @@ description: > ## Sub Commands -### `invoke` +
+invoke Executes a tool directly with the provided parameters. This is useful for testing tool configurations and parameters without needing a full client setup. @@ -42,8 +43,36 @@ Executes a tool directly with the provided parameters. This is useful for testin toolbox invoke [params] ``` -- ``: The name of the tool to execute (as defined in your configuration). -- `[params]`: (Optional) A JSON string containing the parameters for the tool. +**Arguments:** + +- `tool-name`: The name of the tool to execute (as defined in your configuration). +- `params`: (Optional) A JSON string containing the parameters for the tool. + +For more detailed instructions, see [Invoke Tools via CLI](../how-to/invoke_tool.md). + +
+ +
+skills-generate + +Generates a skill package from a specified toolset. Each tool in the toolset will have a corresponding Node.js execution script in the generated skill. + +**Syntax:** + +```bash +toolbox skills-generate --name --description --toolset --output-dir +``` + +**Flags:** + +- `--name`: Name of the generated skill. +- `--description`: Description of the generated skill. +- `--toolset`: (Optional) Name of the toolset to convert into a skill. If not provided, all tools will be included. +- `--output-dir`: (Optional) Directory to output generated skills (default: "skills"). + +For more detailed instructions, see [Generate Agent Skills](../how-to/generate_skill.md). + +
## Examples diff --git a/docs/en/reference/prebuilt-tools.md b/docs/en/reference/prebuilt-tools.md index d8a2e806b3..7a52236dfa 100644 --- a/docs/en/reference/prebuilt-tools.md +++ b/docs/en/reference/prebuilt-tools.md @@ -488,6 +488,7 @@ See [Usage Examples](../reference/cli.md#examples). * `create_project_file`: Create a new LookML file. * `update_project_file`: Update an existing LookML file. * `delete_project_file`: Delete a LookML file. + * `validate_project`: Check the syntax of a LookML project. * `get_connections`: Get the available connections in a Looker instance. * `get_connection_schemas`: Get the available schemas in a connection. * `get_connection_databases`: Get the available databases in a connection. diff --git a/docs/en/resources/sources/alloydb-pg.md b/docs/en/resources/sources/alloydb-pg.md index cf6d848ae6..b7fe99c759 100644 --- a/docs/en/resources/sources/alloydb-pg.md +++ b/docs/en/resources/sources/alloydb-pg.md @@ -194,6 +194,15 @@ Use environment variable replacement with the format ${ENV_NAME} instead of hardcoding your secrets into the configuration file. {{< /notice >}} +### Managed Connection Pooling + +Toolbox automatically supports [Managed Connection Pooling][alloydb-mcp]. If your AlloyDB instance has Managed Connection Pooling enabled, the connection will immediately benefit from increased throughput and reduced latency. + +The interface is identical, so there's no additional configuration required on the client. For more information on configuring your instance, see the [AlloyDB Managed Connection Pooling documentation][alloydb-mcp-docs]. + +[alloydb-mcp]: https://cloud.google.com/blog/products/databases/alloydb-managed-connection-pooling +[alloydb-mcp-docs]: https://cloud.google.com/alloydb/docs/configure-managed-connection-pooling + ## Reference | **field** | **type** | **required** | **description** | diff --git a/docs/en/resources/sources/cloud-sql-pg.md b/docs/en/resources/sources/cloud-sql-pg.md index 3b5f54781d..182b54e914 100644 --- a/docs/en/resources/sources/cloud-sql-pg.md +++ b/docs/en/resources/sources/cloud-sql-pg.md @@ -195,6 +195,15 @@ Use environment variable replacement with the format ${ENV_NAME} instead of hardcoding your secrets into the configuration file. {{< /notice >}} +### Managed Connection Pooling + +Toolbox automatically supports [Managed Connection Pooling][csql-mcp]. If your Cloud SQL for PostgreSQL instance has Managed Connection Pooling enabled, the connection will immediately benefit from increased throughput and reduced latency. + +The interface is identical, so there's no additional configuration required on the client. For more information on configuring your instance, see the [Cloud SQL Managed Connection Pooling documentation][csql-mcp-docs]. + +[csql-mcp]: https://docs.cloud.google.com/sql/docs/postgres/managed-connection-pooling +[csql-mcp-docs]: https://docs.cloud.google.com/sql/docs/postgres/configure-mcp + ## Reference | **field** | **type** | **required** | **description** | diff --git a/docs/en/resources/tools/cloudloggingadmin/_index.md b/docs/en/resources/tools/cloudloggingadmin/_index.md new file mode 100644 index 0000000000..a5b34e9a76 --- /dev/null +++ b/docs/en/resources/tools/cloudloggingadmin/_index.md @@ -0,0 +1,8 @@ +--- +title: "Cloud Logging Admin" +linkTitle: "Cloud Logging Admin" +type: docs +weight: 1 +description: > + Tools that work with Cloud Logging Admin Sources. +--- diff --git a/docs/en/resources/tools/looker/looker-validate-project.md b/docs/en/resources/tools/looker/looker-validate-project.md new file mode 100644 index 0000000000..956588b11d --- /dev/null +++ b/docs/en/resources/tools/looker/looker-validate-project.md @@ -0,0 +1,47 @@ +--- +title: "looker-validate-project" +type: docs +weight: 1 +description: > + A "looker-validate-project" tool checks the syntax of a LookML project and reports any errors +aliases: +- /resources/tools/looker-validate-project +--- + +## About + +A "looker-validate-project" tool checks the syntax of a LookML project and reports any errors + +It's compatible with the following sources: + +- [looker](../../sources/looker.md) + +`looker-validate-project` accepts a project_id parameter. + +## Example + +```yaml +tools: + validate_project: + kind: looker-validate-project + source: looker-source + description: | + This tool checks a LookML project for syntax errors. + + Prerequisite: The Looker session must be in Development Mode. Use `dev_mode: true` first. + + Parameters: + - project_id (required): The unique ID of the LookML project. + + Output: + A list of error details including the file path and line number, and also a list of models + that are not currently valid due to LookML errors. +``` + +## Reference + +| **field** | **type** | **required** | **description** | +|-------------|:--------:|:------------:|----------------------------------------------------| +| kind | string | true | Must be "looker-validate-project". | +| source | string | true | Name of the source Looker instance. | +| description | string | true | Description of the tool that is passed to the LLM. | diff --git a/docs/en/samples/pre_post_processing/_index.md b/docs/en/samples/pre_post_processing/_index.md index 9b3c5479a8..6fcf570027 100644 --- a/docs/en/samples/pre_post_processing/_index.md +++ b/docs/en/samples/pre_post_processing/_index.md @@ -1,14 +1,18 @@ --- -title: "Pre and Post processing" +title: "Pre- and Post- Processing" type: docs weight: 1 description: > - Pre and Post processing in GenAI applications. + Intercept and modify interactions between the agent and its tools either before or after a tool is executed. --- -Pre and post processing allow developers to intercept and modify interactions between the agent and its tools or the user. +Pre- and post- processing allow developers to intercept and modify interactions between the agent and its tools or the user. -> **Note**: These capabilities are typically features of **orchestration frameworks** (like LangChain, LangGraph, or Agent Builder) rather than the Toolbox SDK itself. However, Toolbox tools are designed to fully leverage these framework capabilities to support robust, secure, and compliant agent architectures. +{{< notice note >}} + +These capabilities are typically features of **orchestration frameworks** (like LangChain, LangGraph, or Agent Builder) rather than the Toolbox SDK itself. However, Toolbox tools are designed to fully leverage these framework capabilities to support robust, secure, and compliant agent architectures. + +{{< /notice >}} ## Types of Processing @@ -39,9 +43,12 @@ Wraps individual tool executions. This is best for logic specific to a single to - **Scope**: Intercepts the raw inputs (arguments) to a tool and its outputs. - **Use Cases**: Argument validation, output formatting, specific privacy rules for sensitive tools. -### Comparison with Other Levels +### Other Levels It is helpful to understand how tool-level processing differs from other scopes: - **Model Level**: Intercepts individual calls to the LLM (prompts and responses). Unlike tool-level, this applies globally to all text sent/received, making it better for global PII redaction or token tracking. - **Agent Level**: Wraps the high-level execution loop (e.g., a "turn" in the conversation). Unlike tool-level, this envelopes the entire turn (user input to final response), making it suitable for session management or end-to-end auditing. + + +## Samples diff --git a/docs/en/samples/pre_post_processing/python.md b/docs/en/samples/pre_post_processing/python.md index 20911c3439..ed1438720b 100644 --- a/docs/en/samples/pre_post_processing/python.md +++ b/docs/en/samples/pre_post_processing/python.md @@ -1,14 +1,14 @@ --- -title: "(Python) Pre and post processing" +title: "Python" type: docs -weight: 4 +weight: 1 description: > - How to add pre and post processing to your Python toolbox applications. + How to add pre- and post- processing to your Agents using Python. --- ## Prerequisites -This tutorial assumes that you have set up a basic toolbox application as described in the [local quickstart](../../getting-started/local_quickstart). +This tutorial assumes that you have set up Toolbox with a basic agent as described in the [local quickstart](../../getting-started/local_quickstart.md). This guide demonstrates how to implement these patterns in your Toolbox applications. @@ -32,7 +32,16 @@ The following example demonstrates how to use `ToolboxClient` with LangChain's m {{< include "python/langchain/agent.py" >}} ``` -For more information, see the [LangChain Middleware documentation](https://docs.langchain.com/oss/python/langchain/middleware/custom#wrap-style-hooks). You can also add model-level (`wrap_model`) and agent-level (`before_agent`, `after_agent`) hooks to intercept messages at different stages of the execution loop. See the [LangChain Middleware documentation](https://docs.langchain.com/oss/python/langchain/middleware/custom#wrap-style-hooks) for details on these additional hook types. {{% /tab %}} {{< /tabpane >}} + +## Results + +The output should look similar to the following. Note that exact responses may vary due to the non-deterministic nature of LLMs and differences between orchestration frameworks. + +``` +AI: Booking Confirmed! You earned 500 Loyalty Points with this stay. + +AI: Error: Maximum stay duration is 14 days. +``` diff --git a/docs/en/samples/pre_post_processing/python/__init__.py b/docs/en/samples/pre_post_processing/python/__init__.py index ed5bef506f..f5b7c1bfd2 100644 --- a/docs/en/samples/pre_post_processing/python/__init__.py +++ b/docs/en/samples/pre_post_processing/python/__init__.py @@ -1,3 +1,18 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + # This file makes the 'pre_post_processing/python' directory a Python package. # You can include any package-level initialization logic here if needed. diff --git a/docs/en/samples/pre_post_processing/python/agent_test.py b/docs/en/samples/pre_post_processing/python/agent_test.py index 727c074739..36c5b8e27d 100644 --- a/docs/en/samples/pre_post_processing/python/agent_test.py +++ b/docs/en/samples/pre_post_processing/python/agent_test.py @@ -23,19 +23,11 @@ ORCH_NAME = os.environ.get("ORCH_NAME") module_path = f"python.{ORCH_NAME}.agent" agent = importlib.import_module(module_path) - -@pytest.fixture(scope="module") -def golden_keywords(): - """Loads expected keywords from the golden.txt file.""" - golden_file_path = Path(__file__).resolve().parent.parent / "golden.txt" - if not golden_file_path.exists(): - pytest.fail(f"Golden file not found: {golden_file_path}") - try: - with open(golden_file_path, "r") as f: - return [line.strip() for line in f.readlines() if line.strip()] - except Exception as e: - pytest.fail(f"Could not read golden.txt: {e}") - +GOLDEN_KEYWORDS = [ + "AI:", + "Loyalty Points", + "POLICY CHECK: Intercepting 'update-hotel'", +] # --- Execution Tests --- class TestExecution: @@ -51,9 +43,9 @@ class TestExecution: """Test that the script runs and produces no stderr.""" assert script_output.err == "", f"Script produced stderr: {script_output.err}" - def test_keywords_in_output(self, script_output, golden_keywords): + def test_keywords_in_output(self, script_output): """Test that expected keywords are present in the script's output.""" output = script_output.out print(f"\nAgent Output:\n{output}\n") - missing_keywords = [kw for kw in golden_keywords if kw not in output] + missing_keywords = [kw for kw in GOLDEN_KEYWORDS if kw not in output] assert not missing_keywords, f"Missing keywords in output: {missing_keywords}" diff --git a/docs/en/samples/pre_post_processing/python/langchain/agent.py b/docs/en/samples/pre_post_processing/python/langchain/agent.py index a363c2bad3..5e174943a7 100644 --- a/docs/en/samples/pre_post_processing/python/langchain/agent.py +++ b/docs/en/samples/pre_post_processing/python/langchain/agent.py @@ -47,7 +47,13 @@ async def enforce_business_rules(request, handler): except ValueError: pass # Ignore invalid date formats - return await handler(request) + # PRE: Code here runs BEFORE the tool execution + + # EXEC: Execute the tool (or next middleware) + result = await handler(request) + + # POST: Code here runs AFTER the tool execution + return result # Post processing @@ -58,8 +64,12 @@ async def enrich_response(request, handler): Adds loyalty points information to successful bookings. Standardizes output format. """ + # PRE: Code here runs BEFORE the tool execution + + # EXEC: Execute the tool (or next middleware) result = await handler(request) + # POST: Code here runs AFTER the tool execution if isinstance(result, ToolMessage): content = str(result.content) tool_name = request.tool_call["name"] @@ -79,6 +89,7 @@ async def main(): system_prompt=system_prompt, model=model, tools=tools, + # add any pre and post processing methods middleware=[enforce_business_rules, enrich_response], ) @@ -88,7 +99,6 @@ async def main(): ) print("-" * 50) - print("Final Client Response:") last_ai_msg = response["messages"][-1].content print(f"AI: {last_ai_msg}") diff --git a/internal/cli/skills/command.go b/internal/cli/skills/command.go new file mode 100644 index 0000000000..d8b2d286a9 --- /dev/null +++ b/internal/cli/skills/command.go @@ -0,0 +1,237 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skills + +import ( + "context" + _ "embed" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/googleapis/genai-toolbox/internal/log" + "github.com/googleapis/genai-toolbox/internal/server" + "github.com/googleapis/genai-toolbox/internal/server/resources" + "github.com/googleapis/genai-toolbox/internal/tools" + + "github.com/spf13/cobra" +) + +// RootCommand defines the interface for required by skills-generate subcommand. +// This allows subcommands to access shared resources and functionality without +// direct coupling to the root command's implementation. +type RootCommand interface { + // Config returns a copy of the current server configuration. + Config() server.ServerConfig + + // LoadConfig loads and merges the configuration from files, folders, and prebuilts. + LoadConfig(ctx context.Context) error + + // Setup initializes the runtime environment, including logging and telemetry. + // It returns the updated context and a shutdown function to be called when finished. + Setup(ctx context.Context) (context.Context, func(context.Context) error, error) + + // Logger returns the logger instance. + Logger() log.Logger +} + +// Command is the command for generating skills. +type Command struct { + *cobra.Command + rootCmd RootCommand + name string + description string + toolset string + outputDir string +} + +// NewCommand creates a new Command. +func NewCommand(rootCmd RootCommand) *cobra.Command { + cmd := &Command{ + rootCmd: rootCmd, + } + cmd.Command = &cobra.Command{ + Use: "skills-generate", + Short: "Generate skills from tool configurations", + RunE: func(c *cobra.Command, args []string) error { + return cmd.run(c) + }, + } + + cmd.Flags().StringVar(&cmd.name, "name", "", "Name of the generated skill.") + cmd.Flags().StringVar(&cmd.description, "description", "", "Description of the generated skill") + cmd.Flags().StringVar(&cmd.toolset, "toolset", "", "Name of the toolset to convert into a skill. If not provided, all tools will be included.") + cmd.Flags().StringVar(&cmd.outputDir, "output-dir", "skills", "Directory to output generated skills") + + _ = cmd.MarkFlagRequired("name") + _ = cmd.MarkFlagRequired("description") + return cmd.Command +} + +func (c *Command) run(cmd *cobra.Command) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + ctx, shutdown, err := c.rootCmd.Setup(ctx) + if err != nil { + return err + } + defer func() { + _ = shutdown(ctx) + }() + + logger := c.rootCmd.Logger() + + // Load and merge tool configurations + if err := c.rootCmd.LoadConfig(ctx); err != nil { + return err + } + + if err := os.MkdirAll(c.outputDir, 0755); err != nil { + errMsg := fmt.Errorf("error creating output directory: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + logger.InfoContext(ctx, fmt.Sprintf("Generating skill '%s'...", c.name)) + + // Initialize toolbox and collect tools + allTools, err := c.collectTools(ctx) + if err != nil { + errMsg := fmt.Errorf("error collecting tools: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + if len(allTools) == 0 { + logger.InfoContext(ctx, "No tools found to generate.") + return nil + } + + // Generate the combined skill directory + skillPath := filepath.Join(c.outputDir, c.name) + if err := os.MkdirAll(skillPath, 0755); err != nil { + errMsg := fmt.Errorf("error creating skill directory: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + // Generate assets directory + assetsPath := filepath.Join(skillPath, "assets") + if err := os.MkdirAll(assetsPath, 0755); err != nil { + errMsg := fmt.Errorf("error creating assets dir: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + // Generate scripts directory + scriptsPath := filepath.Join(skillPath, "scripts") + if err := os.MkdirAll(scriptsPath, 0755); err != nil { + errMsg := fmt.Errorf("error creating scripts dir: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + // Iterate over keys to ensure deterministic order + var toolNames []string + for name := range allTools { + toolNames = append(toolNames, name) + } + sort.Strings(toolNames) + + for _, toolName := range toolNames { + // Generate YAML config in asset directory + minimizedContent, err := generateToolConfigYAML(c.rootCmd.Config(), toolName) + if err != nil { + errMsg := fmt.Errorf("error generating filtered config for %s: %w", toolName, err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + specificToolsFileName := fmt.Sprintf("%s.yaml", toolName) + if minimizedContent != nil { + destPath := filepath.Join(assetsPath, specificToolsFileName) + if err := os.WriteFile(destPath, minimizedContent, 0644); err != nil { + errMsg := fmt.Errorf("error writing filtered config for %s: %w", toolName, err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + } + + // Generate wrapper script in scripts directory + scriptContent, err := generateScriptContent(toolName, specificToolsFileName) + if err != nil { + errMsg := fmt.Errorf("error generating script content for %s: %w", toolName, err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + scriptFilename := filepath.Join(scriptsPath, fmt.Sprintf("%s.js", toolName)) + if err := os.WriteFile(scriptFilename, []byte(scriptContent), 0755); err != nil { + errMsg := fmt.Errorf("error writing script %s: %w", scriptFilename, err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + } + + // Generate SKILL.md + skillContent, err := generateSkillMarkdown(c.name, c.description, allTools) + if err != nil { + errMsg := fmt.Errorf("error generating SKILL.md content: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + skillMdPath := filepath.Join(skillPath, "SKILL.md") + if err := os.WriteFile(skillMdPath, []byte(skillContent), 0644); err != nil { + errMsg := fmt.Errorf("error writing SKILL.md: %w", err) + logger.ErrorContext(ctx, errMsg.Error()) + return errMsg + } + + logger.InfoContext(ctx, fmt.Sprintf("Successfully generated skill '%s' with %d tools.", c.name, len(allTools))) + + return nil +} + +func (c *Command) collectTools(ctx context.Context) (map[string]tools.Tool, error) { + // Initialize Resources + sourcesMap, authServicesMap, embeddingModelsMap, toolsMap, toolsetsMap, promptsMap, promptsetsMap, err := server.InitializeConfigs(ctx, c.rootCmd.Config()) + if err != nil { + return nil, fmt.Errorf("failed to initialize resources: %w", err) + } + + resourceMgr := resources.NewResourceManager(sourcesMap, authServicesMap, embeddingModelsMap, toolsMap, toolsetsMap, promptsMap, promptsetsMap) + + result := make(map[string]tools.Tool) + + if c.toolset == "" { + return toolsMap, nil + } + + ts, ok := resourceMgr.GetToolset(c.toolset) + if !ok { + return nil, fmt.Errorf("toolset %q not found", c.toolset) + } + + for _, t := range ts.Tools { + if t != nil { + tool := *t + result[tool.McpManifest().Name] = tool + } + } + + return result, nil +} diff --git a/internal/cli/skills/generator.go b/internal/cli/skills/generator.go new file mode 100644 index 0000000000..a9e20fc9e3 --- /dev/null +++ b/internal/cli/skills/generator.go @@ -0,0 +1,296 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skills + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + "text/template" + + "github.com/goccy/go-yaml" + "github.com/googleapis/genai-toolbox/internal/server" + "github.com/googleapis/genai-toolbox/internal/sources" + "github.com/googleapis/genai-toolbox/internal/tools" + "github.com/googleapis/genai-toolbox/internal/util/parameters" +) + +const skillTemplate = `--- +name: {{.SkillName}} +description: {{.SkillDescription}} +--- + +## Usage + +All scripts can be executed using Node.js. Replace ` + "`" + `` + "`" + ` and ` + "`" + `` + "`" + ` with actual values. + +**Bash:** +` + "`" + `node scripts/.js '{"": ""}'` + "`" + ` + +**PowerShell:** +` + "`" + `node scripts/.js '{\"\": \"\"}'` + "`" + ` + +## Scripts + +{{range .Tools}} +### {{.Name}} + +{{.Description}} + +{{.ParametersSchema}} + +--- +{{end}} +` + +type toolTemplateData struct { + Name string + Description string + ParametersSchema string +} + +type skillTemplateData struct { + SkillName string + SkillDescription string + Tools []toolTemplateData +} + +// generateSkillMarkdown generates the content of the SKILL.md file. +// It includes usage instructions and a reference section for each tool in the skill, +// detailing its description and parameters. +func generateSkillMarkdown(skillName, skillDescription string, toolsMap map[string]tools.Tool) (string, error) { + var toolsData []toolTemplateData + + // Order tools based on name + var toolNames []string + for name := range toolsMap { + toolNames = append(toolNames, name) + } + sort.Strings(toolNames) + + for _, name := range toolNames { + tool := toolsMap[name] + manifest := tool.Manifest() + + parametersSchema, err := formatParameters(manifest.Parameters) + if err != nil { + return "", err + } + + toolsData = append(toolsData, toolTemplateData{ + Name: name, + Description: manifest.Description, + ParametersSchema: parametersSchema, + }) + } + + data := skillTemplateData{ + SkillName: skillName, + SkillDescription: skillDescription, + Tools: toolsData, + } + + tmpl, err := template.New("markdown").Parse(skillTemplate) + if err != nil { + return "", fmt.Errorf("error parsing markdown template: %w", err) + } + + var buf strings.Builder + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("error executing markdown template: %w", err) + } + + return buf.String(), nil +} + +const nodeScriptTemplate = `#!/usr/bin/env node + +const { spawn, execSync } = require('child_process'); +const path = require('path'); +const fs = require('fs'); + +const toolName = "{{.Name}}"; +const toolsFileName = "{{.ToolsFileName}}"; + +function getToolboxPath() { + try { + const checkCommand = process.platform === 'win32' ? 'where toolbox' : 'which toolbox'; + const globalPath = execSync(checkCommand, { stdio: 'pipe', encoding: 'utf-8' }).trim(); + if (globalPath) { + return globalPath.split('\n')[0].trim(); + } + } catch (e) { + // Ignore error; + } + const localPath = path.resolve(__dirname, '../../../toolbox'); + if (fs.existsSync(localPath)) { + return localPath; + } + throw new Error("Toolbox binary not found"); +} + +let toolboxBinary; +try { + toolboxBinary = getToolboxPath(); +} catch (err) { + console.error("Error:", err.message); + process.exit(1); +} + +let configArgs = []; +if (toolsFileName) { + configArgs.push("--tools-file", path.join(__dirname, "..", "assets", toolsFileName)); +} + +const args = process.argv.slice(2); +const toolboxArgs = [...configArgs, "invoke", toolName, ...args]; + +const child = spawn(toolboxBinary, toolboxArgs, { stdio: 'inherit' }); + +child.on('close', (code) => { + process.exit(code); +}); + +child.on('error', (err) => { + console.error("Error executing toolbox:", err); + process.exit(1); +}); +` + +type scriptData struct { + Name string + ToolsFileName string +} + +// generateScriptContent creates the content for a Node.js wrapper script. +// This script invokes the toolbox CLI with the appropriate configuration +// (using a generated tools file) and arguments to execute the specific tool. +func generateScriptContent(name string, toolsFileName string) (string, error) { + data := scriptData{ + Name: name, + ToolsFileName: toolsFileName, + } + + tmpl, err := template.New("script").Parse(nodeScriptTemplate) + if err != nil { + return "", fmt.Errorf("error parsing script template: %w", err) + } + + var buf strings.Builder + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("error executing script template: %w", err) + } + + return buf.String(), nil +} + +// formatParameters converts a list of parameter manifests into a formatted JSON schema string. +// This schema is used in the skill documentation to describe the input parameters for a tool. +func formatParameters(params []parameters.ParameterManifest) (string, error) { + if len(params) == 0 { + return "", nil + } + + properties := make(map[string]interface{}) + var required []string + + for _, p := range params { + paramMap := map[string]interface{}{ + "type": p.Type, + "description": p.Description, + } + if p.Default != nil { + paramMap["default"] = p.Default + } + properties[p.Name] = paramMap + if p.Required { + required = append(required, p.Name) + } + } + + schema := map[string]interface{}{ + "type": "object", + "properties": properties, + } + if len(required) > 0 { + schema["required"] = required + } + + schemaJSON, err := json.MarshalIndent(schema, "", " ") + if err != nil { + return "", fmt.Errorf("error generating parameters schema: %w", err) + } + + return fmt.Sprintf("#### Parameters\n\n```json\n%s\n```", string(schemaJSON)), nil +} + +// generateToolConfigYAML generates the YAML configuration for a single tool and its dependency (source). +// It extracts the relevant tool and source configurations from the server config and formats them +// into a YAML document suitable for inclusion in the skill's assets. +func generateToolConfigYAML(cfg server.ServerConfig, toolName string) ([]byte, error) { + toolCfg, ok := cfg.ToolConfigs[toolName] + if !ok { + return nil, fmt.Errorf("error finding tool config: %s", toolName) + } + + var buf bytes.Buffer + encoder := yaml.NewEncoder(&buf) + + // Process Tool Config + toolWrapper := struct { + Kind string `yaml:"kind"` + Config tools.ToolConfig `yaml:",inline"` + }{ + Kind: "tools", + Config: toolCfg, + } + + if err := encoder.Encode(toolWrapper); err != nil { + return nil, fmt.Errorf("error encoding tool config: %w", err) + } + + // Process Source Config + var toolMap map[string]interface{} + b, err := yaml.Marshal(toolCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling tool config: %w", err) + } + if err := yaml.Unmarshal(b, &toolMap); err != nil { + return nil, fmt.Errorf("error unmarshaling tool config map: %w", err) + } + + if sourceName, ok := toolMap["source"].(string); ok && sourceName != "" { + sourceCfg, ok := cfg.SourceConfigs[sourceName] + if !ok { + return nil, fmt.Errorf("error finding source config: %s", sourceName) + } + + sourceWrapper := struct { + Kind string `yaml:"kind"` + Config sources.SourceConfig `yaml:",inline"` + }{ + Kind: "sources", + Config: sourceCfg, + } + + if err := encoder.Encode(sourceWrapper); err != nil { + return nil, fmt.Errorf("error encoding source config: %w", err) + } + } + + return buf.Bytes(), nil +} diff --git a/internal/cli/skills/generator_test.go b/internal/cli/skills/generator_test.go new file mode 100644 index 0000000000..bd3a462180 --- /dev/null +++ b/internal/cli/skills/generator_test.go @@ -0,0 +1,347 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skills + +import ( + "context" + "strings" + "testing" + + "github.com/googleapis/genai-toolbox/internal/server" + "github.com/googleapis/genai-toolbox/internal/sources" + "github.com/googleapis/genai-toolbox/internal/tools" + "github.com/googleapis/genai-toolbox/internal/util/parameters" + "go.opentelemetry.io/otel/trace" +) + +type MockToolConfig struct { + Name string `yaml:"name"` + Type string `yaml:"type"` + Source string `yaml:"source"` + Other string `yaml:"other"` + Parameters parameters.Parameters `yaml:"parameters"` +} + +func (m MockToolConfig) ToolConfigType() string { + return m.Type +} + +func (m MockToolConfig) Initialize(map[string]sources.Source) (tools.Tool, error) { + return nil, nil +} + +type MockSourceConfig struct { + Name string `yaml:"name"` + Type string `yaml:"type"` + ConnectionString string `yaml:"connection_string"` +} + +func (m MockSourceConfig) SourceConfigType() string { + return m.Type +} + +func (m MockSourceConfig) Initialize(context.Context, trace.Tracer) (sources.Source, error) { + return nil, nil +} + +func TestFormatParameters(t *testing.T) { + tests := []struct { + name string + params []parameters.ParameterManifest + wantContains []string + wantErr bool + }{ + { + name: "empty parameters", + params: []parameters.ParameterManifest{}, + wantContains: []string{""}, + }, + { + name: "single required string parameter", + params: []parameters.ParameterManifest{ + { + Name: "param1", + Description: "A test parameter", + Type: "string", + Required: true, + }, + }, + wantContains: []string{ + "## Parameters", + "```json", + `"type": "object"`, + `"properties": {`, + `"param1": {`, + `"type": "string"`, + `"description": "A test parameter"`, + `"required": [`, + `"param1"`, + }, + }, + { + name: "mixed parameters with defaults", + params: []parameters.ParameterManifest{ + { + Name: "param1", + Description: "Param 1", + Type: "string", + Required: true, + }, + { + Name: "param2", + Description: "Param 2", + Type: "integer", + Default: 42, + Required: false, + }, + }, + wantContains: []string{ + `"param1": {`, + `"param2": {`, + `"default": 42`, + `"required": [`, + `"param1"`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := formatParameters(tt.params) + if (err != nil) != tt.wantErr { + t.Errorf("formatParameters() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + + if len(tt.params) == 0 { + if got != "" { + t.Errorf("formatParameters() = %v, want empty string", got) + } + return + } + + for _, want := range tt.wantContains { + if !strings.Contains(got, want) { + t.Errorf("formatParameters() result missing expected string: %s\nGot:\n%s", want, got) + } + } + }) + } +} + +func TestGenerateSkillMarkdown(t *testing.T) { + toolsMap := map[string]tools.Tool{ + "tool1": server.MockTool{ + Description: "First tool", + Params: []parameters.Parameter{ + parameters.NewStringParameter("p1", "d1"), + }, + }, + } + + got, err := generateSkillMarkdown("MySkill", "My Description", toolsMap) + if err != nil { + t.Fatalf("generateSkillMarkdown() error = %v", err) + } + + expectedSubstrings := []string{ + "name: MySkill", + "description: My Description", + "## Usage", + "All scripts can be executed using Node.js", + "**Bash:**", + "`node scripts/.js '{\"\": \"\"}'`", + "**PowerShell:**", + "`node scripts/.js '{\"\": \"\"}'`", + "## Scripts", + "### tool1", + "First tool", + "## Parameters", + } + + for _, s := range expectedSubstrings { + if !strings.Contains(got, s) { + t.Errorf("generateSkillMarkdown() missing substring %q", s) + } + } +} + +func TestGenerateScriptContent(t *testing.T) { + tests := []struct { + name string + toolName string + toolsFileName string + wantContains []string + }{ + { + name: "basic script", + toolName: "test-tool", + toolsFileName: "", + wantContains: []string{ + `const toolName = "test-tool";`, + `const toolsFileName = "";`, + `const toolboxArgs = [...configArgs, "invoke", toolName, ...args];`, + }, + }, + { + name: "script with tools file", + toolName: "complex-tool", + toolsFileName: "tools.yaml", + wantContains: []string{ + `const toolName = "complex-tool";`, + `const toolsFileName = "tools.yaml";`, + `configArgs.push("--tools-file", path.join(__dirname, "..", "assets", toolsFileName));`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := generateScriptContent(tt.toolName, tt.toolsFileName) + if err != nil { + t.Fatalf("generateScriptContent() error = %v", err) + } + + for _, s := range tt.wantContains { + if !strings.Contains(got, s) { + t.Errorf("generateScriptContent() missing substring %q\nGot:\n%s", s, got) + } + } + }) + } +} + +func TestGenerateToolConfigYAML(t *testing.T) { + cfg := server.ServerConfig{ + ToolConfigs: server.ToolConfigs{ + "tool1": MockToolConfig{ + Name: "tool1", + Type: "custom-tool", + Source: "src1", + Other: "foo", + }, + "toolNoSource": MockToolConfig{ + Name: "toolNoSource", + Type: "http", + }, + "toolWithParams": MockToolConfig{ + Name: "toolWithParams", + Type: "custom-tool", + Parameters: []parameters.Parameter{ + parameters.NewStringParameter("param1", "desc1"), + }, + }, + "toolWithMissingSource": MockToolConfig{ + Name: "toolWithMissingSource", + Type: "custom-tool", + Source: "missing-src", + }, + }, + SourceConfigs: server.SourceConfigs{ + "src1": MockSourceConfig{ + Name: "src1", + Type: "postgres", + ConnectionString: "conn1", + }, + }, + } + + tests := []struct { + name string + toolName string + wantContains []string + wantErr bool + wantNil bool + }{ + { + name: "tool with source", + toolName: "tool1", + wantContains: []string{ + "kind: tools", + "name: tool1", + "type: custom-tool", + "source: src1", + "other: foo", + "---", + "kind: sources", + "name: src1", + "type: postgres", + "connection_string: conn1", + }, + }, + { + name: "tool without source", + toolName: "toolNoSource", + wantContains: []string{ + "kind: tools", + "name: toolNoSource", + "type: http", + }, + }, + { + name: "tool with parameters", + toolName: "toolWithParams", + wantContains: []string{ + "kind: tools", + "name: toolWithParams", + "type: custom-tool", + "parameters:", + "- name: param1", + "type: string", + "description: desc1", + }, + }, + { + name: "non-existent tool", + toolName: "missing-tool", + wantErr: true, + }, + { + name: "tool with missing source config", + toolName: "toolWithMissingSource", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotBytes, err := generateToolConfigYAML(cfg, tt.toolName) + if (err != nil) != tt.wantErr { + t.Errorf("generateToolConfigYAML() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + + if tt.wantNil { + if gotBytes != nil { + t.Errorf("generateToolConfigYAML() expected nil, got %s", string(gotBytes)) + } + return + } + + got := string(gotBytes) + for _, want := range tt.wantContains { + if !strings.Contains(got, want) { + t.Errorf("generateToolConfigYAML() result missing expected string: %q\nGot:\n%s", want, got) + } + } + }) + } +} diff --git a/internal/prebuiltconfigs/tools/looker.yaml b/internal/prebuiltconfigs/tools/looker.yaml index 442cd11106..c6bbd51c56 100644 --- a/internal/prebuiltconfigs/tools/looker.yaml +++ b/internal/prebuiltconfigs/tools/looker.yaml @@ -959,6 +959,21 @@ tools: Output: A confirmation message upon successful file deletion. + validate_project: + kind: looker-validate-project + source: looker-source + description: | + This tool checks a LookML project for syntax errors. + + Prerequisite: The Looker session must be in Development Mode. Use `dev_mode: true` first. + + Parameters: + - project_id (required): The unique ID of the LookML project. + + Output: + A list of error details including the file path and line number, and also a list of models + that are not currently valid due to LookML errors. + get_connections: kind: looker-get-connections source: looker-source @@ -1072,6 +1087,7 @@ toolsets: - create_project_file - update_project_file - delete_project_file + - validate_project - get_connections - get_connection_schemas - get_connection_databases diff --git a/internal/server/common_test.go b/internal/server/common_test.go index 54109ac467..8944cfba20 100644 --- a/internal/server/common_test.go +++ b/internal/server/common_test.go @@ -24,7 +24,6 @@ import ( "testing" "github.com/go-chi/chi/v5" - "github.com/googleapis/genai-toolbox/internal/embeddingmodels" "github.com/googleapis/genai-toolbox/internal/log" "github.com/googleapis/genai-toolbox/internal/prompts" "github.com/googleapis/genai-toolbox/internal/server/resources" @@ -41,140 +40,6 @@ var ( _ prompts.Prompt = MockPrompt{} ) -// MockTool is used to mock tools in tests -type MockTool struct { - Name string - Description string - Params []parameters.Parameter - manifest tools.Manifest - unauthorized bool - requiresClientAuthrorization bool -} - -func (t MockTool) Invoke(context.Context, tools.SourceProvider, parameters.ParamValues, tools.AccessToken) (any, error) { - mock := []any{t.Name} - return mock, nil -} - -func (t MockTool) ToConfig() tools.ToolConfig { - return nil -} - -// claims is a map of user info decoded from an auth token -func (t MockTool) ParseParams(data map[string]any, claimsMap map[string]map[string]any) (parameters.ParamValues, error) { - return parameters.ParseParams(t.Params, data, claimsMap) -} - -func (t MockTool) EmbedParams(ctx context.Context, paramValues parameters.ParamValues, embeddingModelsMap map[string]embeddingmodels.EmbeddingModel) (parameters.ParamValues, error) { - return parameters.EmbedParams(ctx, t.Params, paramValues, embeddingModelsMap, nil) -} - -func (t MockTool) Manifest() tools.Manifest { - pMs := make([]parameters.ParameterManifest, 0, len(t.Params)) - for _, p := range t.Params { - pMs = append(pMs, p.Manifest()) - } - return tools.Manifest{Description: t.Description, Parameters: pMs} -} - -func (t MockTool) Authorized(verifiedAuthServices []string) bool { - // defaulted to true - return !t.unauthorized -} - -func (t MockTool) RequiresClientAuthorization(tools.SourceProvider) (bool, error) { - // defaulted to false - return t.requiresClientAuthrorization, nil -} - -func (t MockTool) GetParameters() parameters.Parameters { - return t.Params -} - -func (t MockTool) McpManifest() tools.McpManifest { - properties := make(map[string]parameters.ParameterMcpManifest) - required := make([]string, 0) - authParams := make(map[string][]string) - - for _, p := range t.Params { - name := p.GetName() - paramManifest, authParamList := p.McpManifest() - properties[name] = paramManifest - required = append(required, name) - - if len(authParamList) > 0 { - authParams[name] = authParamList - } - } - - toolsSchema := parameters.McpToolsSchema{ - Type: "object", - Properties: properties, - Required: required, - } - - mcpManifest := tools.McpManifest{ - Name: t.Name, - Description: t.Description, - InputSchema: toolsSchema, - } - - if len(authParams) > 0 { - mcpManifest.Metadata = map[string]any{ - "toolbox/authParams": authParams, - } - } - - return mcpManifest -} - -func (t MockTool) GetAuthTokenHeaderName(tools.SourceProvider) (string, error) { - return "Authorization", nil -} - -// MockPrompt is used to mock prompts in tests -type MockPrompt struct { - Name string - Description string - Args prompts.Arguments -} - -func (p MockPrompt) SubstituteParams(vals parameters.ParamValues) (any, error) { - return []prompts.Message{ - { - Role: "user", - Content: fmt.Sprintf("substituted %s", p.Name), - }, - }, nil -} - -func (p MockPrompt) ParseArgs(data map[string]any, claimsMap map[string]map[string]any) (parameters.ParamValues, error) { - var params parameters.Parameters - for _, arg := range p.Args { - params = append(params, arg.Parameter) - } - return parameters.ParseParams(params, data, claimsMap) -} - -func (p MockPrompt) Manifest() prompts.Manifest { - var argManifests []parameters.ParameterManifest - for _, arg := range p.Args { - argManifests = append(argManifests, arg.Manifest()) - } - return prompts.Manifest{ - Description: p.Description, - Arguments: argManifests, - } -} - -func (p MockPrompt) McpManifest() prompts.McpManifest { - return prompts.GetMcpManifest(p.Name, p.Description, p.Args) -} - -func (p MockPrompt) ToConfig() prompts.PromptConfig { - return nil -} - var tool1 = MockTool{ Name: "no_params", Params: []parameters.Parameter{}, diff --git a/internal/server/mocks.go b/internal/server/mocks.go new file mode 100644 index 0000000000..60aa4f6212 --- /dev/null +++ b/internal/server/mocks.go @@ -0,0 +1,159 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + + "github.com/googleapis/genai-toolbox/internal/embeddingmodels" + "github.com/googleapis/genai-toolbox/internal/prompts" + "github.com/googleapis/genai-toolbox/internal/tools" + "github.com/googleapis/genai-toolbox/internal/util/parameters" +) + +// MockTool is used to mock tools in tests +type MockTool struct { + Name string + Description string + Params []parameters.Parameter + manifest tools.Manifest + unauthorized bool + requiresClientAuthrorization bool +} + +func (t MockTool) Invoke(context.Context, tools.SourceProvider, parameters.ParamValues, tools.AccessToken) (any, error) { + mock := []any{t.Name} + return mock, nil +} + +func (t MockTool) ToConfig() tools.ToolConfig { + return nil +} + +// claims is a map of user info decoded from an auth token +func (t MockTool) ParseParams(data map[string]any, claimsMap map[string]map[string]any) (parameters.ParamValues, error) { + return parameters.ParseParams(t.Params, data, claimsMap) +} + +func (t MockTool) EmbedParams(ctx context.Context, paramValues parameters.ParamValues, embeddingModelsMap map[string]embeddingmodels.EmbeddingModel) (parameters.ParamValues, error) { + return parameters.EmbedParams(ctx, t.Params, paramValues, embeddingModelsMap, nil) +} + +func (t MockTool) Manifest() tools.Manifest { + pMs := make([]parameters.ParameterManifest, 0, len(t.Params)) + for _, p := range t.Params { + pMs = append(pMs, p.Manifest()) + } + return tools.Manifest{Description: t.Description, Parameters: pMs} +} + +func (t MockTool) Authorized(verifiedAuthServices []string) bool { + // defaulted to true + return !t.unauthorized +} + +func (t MockTool) RequiresClientAuthorization(tools.SourceProvider) (bool, error) { + // defaulted to false + return t.requiresClientAuthrorization, nil +} + +func (t MockTool) GetParameters() parameters.Parameters { + return t.Params +} + +func (t MockTool) McpManifest() tools.McpManifest { + properties := make(map[string]parameters.ParameterMcpManifest) + required := make([]string, 0) + authParams := make(map[string][]string) + + for _, p := range t.Params { + name := p.GetName() + paramManifest, authParamList := p.McpManifest() + properties[name] = paramManifest + required = append(required, name) + + if len(authParamList) > 0 { + authParams[name] = authParamList + } + } + + toolsSchema := parameters.McpToolsSchema{ + Type: "object", + Properties: properties, + Required: required, + } + + mcpManifest := tools.McpManifest{ + Name: t.Name, + Description: t.Description, + InputSchema: toolsSchema, + } + + if len(authParams) > 0 { + mcpManifest.Metadata = map[string]any{ + "toolbox/authParams": authParams, + } + } + + return mcpManifest +} + +func (t MockTool) GetAuthTokenHeaderName(tools.SourceProvider) (string, error) { + return "Authorization", nil +} + +// MockPrompt is used to mock prompts in tests +type MockPrompt struct { + Name string + Description string + Args prompts.Arguments +} + +func (p MockPrompt) SubstituteParams(vals parameters.ParamValues) (any, error) { + return []prompts.Message{ + { + Role: "user", + Content: fmt.Sprintf("substituted %s", p.Name), + }, + }, nil +} + +func (p MockPrompt) ParseArgs(data map[string]any, claimsMap map[string]map[string]any) (parameters.ParamValues, error) { + var params parameters.Parameters + for _, arg := range p.Args { + params = append(params, arg.Parameter) + } + return parameters.ParseParams(params, data, claimsMap) +} + +func (p MockPrompt) Manifest() prompts.Manifest { + var argManifests []parameters.ParameterManifest + for _, arg := range p.Args { + argManifests = append(argManifests, arg.Manifest()) + } + return prompts.Manifest{ + Description: p.Description, + Arguments: argManifests, + } +} + +func (p MockPrompt) McpManifest() prompts.McpManifest { + return prompts.GetMcpManifest(p.Name, p.Description, p.Args) +} + +func (p MockPrompt) ToConfig() prompts.PromptConfig { + return nil +} diff --git a/internal/tools/looker/lookervalidateproject/lookervalidateproject.go b/internal/tools/looker/lookervalidateproject/lookervalidateproject.go new file mode 100644 index 0000000000..e36c3a4dd2 --- /dev/null +++ b/internal/tools/looker/lookervalidateproject/lookervalidateproject.go @@ -0,0 +1,177 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package lookervalidateproject + +import ( + "context" + "fmt" + + yaml "github.com/goccy/go-yaml" + "github.com/googleapis/genai-toolbox/internal/embeddingmodels" + "github.com/googleapis/genai-toolbox/internal/sources" + "github.com/googleapis/genai-toolbox/internal/tools" + "github.com/googleapis/genai-toolbox/internal/util" + "github.com/googleapis/genai-toolbox/internal/util/parameters" + + "github.com/looker-open-source/sdk-codegen/go/rtl" + v4 "github.com/looker-open-source/sdk-codegen/go/sdk/v4" +) + +const resourceType string = "looker-validate-project" + +func init() { + if !tools.Register(resourceType, newConfig) { + panic(fmt.Sprintf("tool type %q already registered", resourceType)) + } +} + +func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) { + actual := Config{Name: name} + if err := decoder.DecodeContext(ctx, &actual); err != nil { + return nil, err + } + return actual, nil +} + +type compatibleSource interface { + UseClientAuthorization() bool + GetAuthTokenHeaderName() string + LookerApiSettings() *rtl.ApiSettings + GetLookerSDK(string) (*v4.LookerSDK, error) +} + +type Config struct { + Name string `yaml:"name" validate:"required"` + Type string `yaml:"type" validate:"required"` + Source string `yaml:"source" validate:"required"` + Description string `yaml:"description" validate:"required"` + AuthRequired []string `yaml:"authRequired"` + Annotations *tools.ToolAnnotations `yaml:"annotations,omitempty"` +} + +// validate interface +var _ tools.ToolConfig = Config{} + +func (cfg Config) ToolConfigType() string { + return resourceType +} + +func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) { + projectIdParameter := parameters.NewStringParameter("project_id", "The id of the project to validate") + params := parameters.Parameters{projectIdParameter} + + annotations := cfg.Annotations + if annotations == nil { + readOnlyHint := true + annotations = &tools.ToolAnnotations{ + ReadOnlyHint: &readOnlyHint, + } + } + + mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, params, annotations) + + // finish tool setup + return Tool{ + Config: cfg, + Parameters: params, + manifest: tools.Manifest{ + Description: cfg.Description, + Parameters: params.Manifest(), + AuthRequired: cfg.AuthRequired, + }, + mcpManifest: mcpManifest, + }, nil +} + +// validate interface +var _ tools.Tool = Tool{} + +type Tool struct { + Config + Parameters parameters.Parameters `yaml:"parameters"` + manifest tools.Manifest + mcpManifest tools.McpManifest +} + +func (t Tool) ToConfig() tools.ToolConfig { + return t.Config +} + +func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) { + source, err := tools.GetCompatibleSource[compatibleSource](resourceMgr, t.Source, t.Name, t.Type) + if err != nil { + return nil, err + } + + logger, err := util.LoggerFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("unable to get logger from ctx: %s", err) + } + + sdk, err := source.GetLookerSDK(string(accessToken)) + if err != nil { + return nil, fmt.Errorf("error getting sdk: %w", err) + } + + mapParams := params.AsMap() + projectId, ok := mapParams["project_id"].(string) + if !ok { + return nil, fmt.Errorf("'project_id' must be a string, got %T", mapParams["project_id"]) + } + + resp, err := sdk.ValidateProject(projectId, "", source.LookerApiSettings()) + if err != nil { + return nil, fmt.Errorf("error making validate_project request: %w", err) + } + + logger.DebugContext(ctx, "Got response of %v\n", resp) + + return resp, nil +} + +func (t Tool) EmbedParams(ctx context.Context, paramValues parameters.ParamValues, embeddingModelsMap map[string]embeddingmodels.EmbeddingModel) (parameters.ParamValues, error) { + return parameters.EmbedParams(ctx, t.Parameters, paramValues, embeddingModelsMap, nil) +} + +func (t Tool) Manifest() tools.Manifest { + return t.manifest +} + +func (t Tool) McpManifest() tools.McpManifest { + return t.mcpManifest +} + +func (t Tool) Authorized(verifiedAuthServices []string) bool { + return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices) +} + +func (t Tool) RequiresClientAuthorization(resourceMgr tools.SourceProvider) (bool, error) { + source, err := tools.GetCompatibleSource[compatibleSource](resourceMgr, t.Source, t.Name, t.Type) + if err != nil { + return false, err + } + return source.UseClientAuthorization(), nil +} + +func (t Tool) GetAuthTokenHeaderName(resourceMgr tools.SourceProvider) (string, error) { + source, err := tools.GetCompatibleSource[compatibleSource](resourceMgr, t.Source, t.Name, t.Type) + if err != nil { + return "", err + } + return source.GetAuthTokenHeaderName(), nil +} + +func (t Tool) GetParameters() parameters.Parameters { + return t.Parameters +} diff --git a/internal/tools/looker/lookervalidateproject/lookervalidateproject_test.go b/internal/tools/looker/lookervalidateproject/lookervalidateproject_test.go new file mode 100644 index 0000000000..c71721ac9d --- /dev/null +++ b/internal/tools/looker/lookervalidateproject/lookervalidateproject_test.go @@ -0,0 +1,109 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lookervalidateproject_test + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/googleapis/genai-toolbox/internal/server" + "github.com/googleapis/genai-toolbox/internal/testutils" + lkr "github.com/googleapis/genai-toolbox/internal/tools/looker/lookervalidateproject" +) + +func TestParseFromYamlLookerValidateProject(t *testing.T) { + ctx, err := testutils.ContextWithNewLogger() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + tcs := []struct { + desc string + in string + want server.ToolConfigs + }{ + { + desc: "basic example", + in: ` + kind: tools + name: example_tool + type: looker-validate-project + source: my-instance + description: some description + `, + want: server.ToolConfigs{ + "example_tool": lkr.Config{ + Name: "example_tool", + Type: "looker-validate-project", + Source: "my-instance", + Description: "some description", + AuthRequired: []string{}, + }, + }, + }, + } + for _, tc := range tcs { + t.Run(tc.desc, func(t *testing.T) { + // Parse contents + _, _, _, got, _, _, err := server.UnmarshalResourceConfig(ctx, testutils.FormatYaml(tc.in)) + if err != nil { + t.Fatalf("unable to unmarshal: %s", err) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Fatalf("incorrect parse: diff %v", diff) + } + }) + } + +} + +func TestFailParseFromYamlLookerValidateProject(t *testing.T) { + ctx, err := testutils.ContextWithNewLogger() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + tcs := []struct { + desc string + in string + err string + }{ + { + desc: "Invalid method", + in: ` + kind: tools + name: example_tool + type: looker-validate-project + source: my-instance + method: GOT + description: some description + `, + err: "error unmarshaling tools: unable to parse tool \"example_tool\" as type \"looker-validate-project\": [3:1] unknown field \"method\"\n 1 | authRequired: []\n 2 | description: some description\n> 3 | method: GOT\n ^\n 4 | name: example_tool\n 5 | source: my-instance\n 6 | type: looker-validate-project", + }, + } + for _, tc := range tcs { + t.Run(tc.desc, func(t *testing.T) { + // Parse contents + _, _, _, _, _, _, err := server.UnmarshalResourceConfig(ctx, testutils.FormatYaml(tc.in)) + if err == nil { + t.Fatalf("expect parsing to fail") + } + errStr := err.Error() + if !strings.Contains(errStr, tc.err) { + t.Fatalf("unexpected error string: got %q, want substring %q", errStr, tc.err) + } + }) + } + +} diff --git a/tests/looker/looker_integration_test.go b/tests/looker/looker_integration_test.go index 944cbcb926..3aa795683e 100644 --- a/tests/looker/looker_integration_test.go +++ b/tests/looker/looker_integration_test.go @@ -222,6 +222,11 @@ func TestLooker(t *testing.T) { "source": "my-instance", "description": "Simple tool to test end to end functionality.", }, + "validate_project": map[string]any{ + "type": "looker-validate-project", + "source": "my-instance", + "description": "Simple tool to test end to end functionality.", + }, "generate_embed_url": map[string]any{ "type": "looker-generate-embed-url", "source": "my-instance", @@ -1446,6 +1451,23 @@ func TestLooker(t *testing.T) { }, }, ) + tests.RunToolGetTestByName(t, "validate_project", + map[string]any{ + "validate_project": map[string]any{ + "description": "Simple tool to test end to end functionality.", + "authRequired": []any{}, + "parameters": []any{ + map[string]any{ + "authSources": []any{}, + "description": "The id of the project to validate", + "name": "project_id", + "required": true, + "type": "string", + }, + }, + }, + }, + ) tests.RunToolGetTestByName(t, "generate_embed_url", map[string]any{ "generate_embed_url": map[string]any{ @@ -1665,6 +1687,9 @@ func TestLooker(t *testing.T) { wantResult = "deleted" tests.RunToolInvokeParametersTest(t, "delete_project_file", []byte(`{"project_id": "the_look", "file_path": "foo.view.lkml"}`), wantResult) + wantResult = "\"errors\":[]" + tests.RunToolInvokeParametersTest(t, "validate_project", []byte(`{"project_id": "the_look"}`), wantResult) + wantResult = "production" tests.RunToolInvokeParametersTest(t, "dev_mode", []byte(`{"devMode": false}`), wantResult)