Compare commits

..

9 Commits

Author SHA1 Message Date
duwenxin
4ff819386a rebase 2025-07-23 16:40:15 -04:00
duwenxin
6119d401f2 rebase 2025-07-23 16:33:27 -04:00
duwenxin
967cd98cb0 fix param test 2025-07-23 16:32:20 -04:00
duwenxin
2a94a33d63 rebase 2025-07-23 16:32:20 -04:00
duwenxin
f93693c92d rebase 2025-07-23 16:32:20 -04:00
Dennis Geurts
9903df0715 feat: Mongodb atlas - more tools (#788)
Add 6 MongoDB Tools:

- mongodb-delete-one
- mongodb-delete-many
- mongodb-insert-one
- mongodb-insert-many
- mongodb-update-one
- mongodb-update-many

---------

Co-authored-by: Venkatesh Shanbhag <91714892+theshanbhag@users.noreply.github.com>
Co-authored-by: Wenxin Du <117315983+duwenxin99@users.noreply.github.com>
Co-authored-by: Mend Renovate <bot@renovateapp.com>
Co-authored-by: Yuan <45984206+Yuan325@users.noreply.github.com>
Co-authored-by: duwenxin <duwenxin@google.com>
2025-07-23 16:32:20 -04:00
duwenxin99
ed6d6b8e4a rebase 2025-07-23 16:32:07 -04:00
Dennis Geurts
b261be23a1 feat: Add support for MongoDB find and findone tools (#696)
Co-authored-by: Wenxin Du <117315983+duwenxin99@users.noreply.github.com>
Co-authored-by: Mend Renovate <bot@renovateapp.com>
Co-authored-by: Yuan <45984206+Yuan325@users.noreply.github.com>
Co-authored-by: Venkatesh Shanbhag <91714892+theshanbhag@users.noreply.github.com>
Co-authored-by: duwenxin <duwenxin@google.com>
2025-07-23 16:32:07 -04:00
Venkatesh Shanbhag
61b08a345e mongodb source 2025-07-23 16:30:45 -04:00
103 changed files with 771 additions and 5791 deletions

View File

@@ -153,26 +153,6 @@ steps:
"BigQuery" \
bigquery \
bigquery
- id: "dataplex"
name: golang:1
waitFor: ["compile-test-binary"]
entrypoint: /bin/bash
env:
- "GOPATH=/gopath"
- "DATAPLEX_PROJECT=$PROJECT_ID"
- "SERVICE_ACCOUNT_EMAIL=$SERVICE_ACCOUNT_EMAIL"
secretEnv: ["CLIENT_ID"]
volumes:
- name: "go"
path: "/gopath"
args:
- -c
- |
.ci/test_with_coverage.sh \
"Dataplex" \
dataplex \
dataplex
- id: "postgres"
name: golang:1
@@ -445,7 +425,7 @@ steps:
"Valkey" \
valkey \
valkey
- id: "firestore"
name: golang:1
waitFor: ["compile-test-binary"]
@@ -486,8 +466,26 @@ steps:
"Looker" \
looker \
looker
- id: "mongodb"
name : golang:1
waitFor: ["compile-test-binary"]
entrypoint: /bin/bash
env:
- "GOPATH=/gopath"
- "SERVICE_ACCOUNT_EMAIL=$SERVICE_ACCOUNT_EMAIL"
- "MONGODB_DATABASE=$_MONGODB_DATABASE"
secretEnv: ["CLIENT_ID", "MONGODB_URI"]
volumes:
- name: "go"
path: "/gopath"
args:
- -c
- |
.ci/test_with_coverage.sh \
"MongoDB" \
mongodb \
mongodb
- id: "alloydbwaitforoperation"
name: golang:1
@@ -566,6 +564,8 @@ availableSecrets:
env: LOOKER_CLIENT_ID
- versionName: projects/107716898620/secrets/looker_client_secret/versions/latest
env: LOOKER_CLIENT_SECRET
- versionName: projects/$PROJECT_ID/secrets/mongodb_uri/versions/latest
env: MONGODB_URI
options:
@@ -598,4 +598,5 @@ substitutions:
_DGRAPHURL: "https://play.dgraph.io"
_COUCHBASE_BUCKET: "couchbase-bucket"
_COUCHBASE_SCOPE: "couchbase-scope"
_LOOKER_VERIFY_SSL: "true"
_LOOKER_VERIFY_SSL: "true"
_MONGODB_DATABASE: "test"

View File

@@ -18,7 +18,6 @@ releaseType: simple
versionFile: "cmd/version.txt"
extraFiles: [
"README.md",
"docs/en/getting-started/colab_quickstart.ipynb",
"docs/en/getting-started/introduction/_index.md",
"docs/en/getting-started/local_quickstart.md",
"docs/en/getting-started/local_quickstart_js.md",
@@ -26,17 +25,13 @@ extraFiles: [
"docs/en/getting-started/mcp_quickstart/_index.md",
"docs/en/samples/bigquery/local_quickstart.md",
"docs/en/samples/bigquery/mcp_quickstart/_index.md",
"docs/en/getting-started/colab_quickstart.ipynb",
"docs/en/samples/bigquery/colab_quickstart_bigquery.ipynb",
"docs/en/samples/looker/looker_gemini.md",
"docs/en/samples/looker/looker_mcp_inspector.md",
"docs/en/how-to/connect-ide/alloydb_pg_mcp.md",
"docs/en/how-to/connect-ide/alloydb_pg_admin_mcp.md",
"docs/en/how-to/connect-ide/bigquery_mcp.md",
"docs/en/how-to/connect-ide/cloud_sql_pg_mcp.md",
"docs/en/how-to/connect-ide/cloud_sql_mssql_mcp.md",
"docs/en/how-to/connect-ide/cloud_sql_mysql_mcp.md",
"docs/en/how-to/connect-ide/firestore_mcp.md",
"docs/en/how-to/connect-ide/looker_mcp.md",
"docs/en/how-to/connect-ide/postgres_mcp.md",
"docs/en/how-to/connect-ide/spanner_mcp.md",
"docs/en/how-to/connect-ide/alloydb_pg_mcp.md",
"docs/en/how-to/connect-ide/cloud_sql_mysql_mcp.md",
"docs/en/how-to/connect-ide/cloud_sql_pg_mcp.md",
"docs/en/how-to/connect-ide/postgres_mcp.md",
"docs/en/how-to/connect-ide/cloud_sql_mssql_mcp.md",
]

2
.gitignore vendored
View File

@@ -20,4 +20,4 @@ node_modules
# executable
genai-toolbox
toolbox
toolbox

View File

@@ -1,43 +1,5 @@
# Changelog
## [0.10.0](https://github.com/googleapis/genai-toolbox/compare/v0.9.0...v0.10.0) (2025-07-25)
### Features
* Add `Map` parameters support ([#928](https://github.com/googleapis/genai-toolbox/issues/928)) ([4468bc9](https://github.com/googleapis/genai-toolbox/commit/4468bc920bbf27dce4ab160197587b7c12fcd20f))
* Add Dataplex source and tool ([#847](https://github.com/googleapis/genai-toolbox/issues/847)) ([30c16a5](https://github.com/googleapis/genai-toolbox/commit/30c16a559e8d49a9a717935269e69b97ec25519a))
* Add Looker source and tool ([#923](https://github.com/googleapis/genai-toolbox/issues/923)) ([c67e01b](https://github.com/googleapis/genai-toolbox/commit/c67e01bcf998e7b884be30ebb1fd277c89ed6ffc))
* Add support for null optional parameter ([#802](https://github.com/googleapis/genai-toolbox/issues/802)) ([a817b12](https://github.com/googleapis/genai-toolbox/commit/a817b120ca5e09ce80eb8d7544ebbe81fc28b082)), closes [#736](https://github.com/googleapis/genai-toolbox/issues/736)
* **prebuilt/alloydb-admin-config:** Add alloydb control plane as a prebuilt config ([#937](https://github.com/googleapis/genai-toolbox/issues/937)) ([0b28b72](https://github.com/googleapis/genai-toolbox/commit/0b28b72aa0ca2cdc87afbddbeb7f4dbb9688593d))
* **prebuilt/mysql,prebuilt/mssql:** Add generic mysql and mssql prebuilt tools ([#983](https://github.com/googleapis/genai-toolbox/issues/983)) ([c600c30](https://github.com/googleapis/genai-toolbox/commit/c600c30374443b6106c1f10b60cd334fd202789b))
* **server/mcp:** Support MCP version 2025-06-18 ([#898](https://github.com/googleapis/genai-toolbox/issues/898)) ([313d3ca](https://github.com/googleapis/genai-toolbox/commit/313d3ca0d084a3a6e7ac9a21a862aa31bf3edadd))
* **sources/mssql:** Add support for encrypt connection parameter ([#874](https://github.com/googleapis/genai-toolbox/issues/874)) ([14a868f](https://github.com/googleapis/genai-toolbox/commit/14a868f2a0780b94c2ca104419b2ff098778303b))
* **sources/firestore:** Add Firestore as Source ([#786](https://github.com/googleapis/genai-toolbox/issues/786)) ([2bb790e](https://github.com/googleapis/genai-toolbox/commit/2bb790e4f8194b677fe0ba40122d409d0e3e687e))
* **sources/mongodb:** Add MongoDB Source ([#969](https://github.com/googleapis/genai-toolbox/issues/969)) ([74dbd61](https://github.com/googleapis/genai-toolbox/commit/74dbd6124daab6192dd880dbd1d15f36861abf74))
* **tools/alloydb-wait-for-operation:** Add wait for operation tool with exponential backoff ([#920](https://github.com/googleapis/genai-toolbox/issues/920)) ([3f6ec29](https://github.com/googleapis/genai-toolbox/commit/3f6ec2944ede18ee02b10157cc048145bdaec87a))
* **tools/mongodb-aggregate:** Add MongoDB `aggregate` Tools ([#977](https://github.com/googleapis/genai-toolbox/issues/977)) ([bd399bb](https://github.com/googleapis/genai-toolbox/commit/bd399bb0fb7134469345ed9a1111ea4209440867))
* **tools/mongodb-delete:** Add MongoDB `delete` Tools ([#974](https://github.com/googleapis/genai-toolbox/issues/974)) ([78e9752](https://github.com/googleapis/genai-toolbox/commit/78e9752f620e065246f3e7b9d37062e492247c8a))
* **tools/mongodb-find:** Add MongoDB `find` Tools ([#970](https://github.com/googleapis/genai-toolbox/issues/970)) ([a747475](https://github.com/googleapis/genai-toolbox/commit/a7474752d8d7ea7af1e80a3c4533d2fd4154d897))
* **tools/mongodb-insert:** Add MongoDB `insert` Tools ([#975](https://github.com/googleapis/genai-toolbox/issues/975)) ([4c63f0c](https://github.com/googleapis/genai-toolbox/commit/4c63f0c1e402817a0c8fec611635e99290308d0e))
* **tools/mongodb-update:** Add MongoDB `update` Tools ([#972](https://github.com/googleapis/genai-toolbox/issues/972)) ([dfde52c](https://github.com/googleapis/genai-toolbox/commit/dfde52ca9a8e25e2f3944f52b4c2e307072b6c37))
* **tools/neo4j-execute-cypher:** Add neo4j-execute-cypher for Neo4j sources ([#946](https://github.com/googleapis/genai-toolbox/issues/946)) ([81d0505](https://github.com/googleapis/genai-toolbox/commit/81d05053b2e08338fd6eabe4849c309064f76b6b))
* **tools/neo4j-schema:** Add neo4j-schema tool ([#978](https://github.com/googleapis/genai-toolbox/issues/978)) ([be7db3d](https://github.com/googleapis/genai-toolbox/commit/be7db3dff263625ce64fdb726e81164996b7a708))
* **tools/wait:** Create wait for tool ([#885](https://github.com/googleapis/genai-toolbox/issues/885)) ([ed5ef4c](https://github.com/googleapis/genai-toolbox/commit/ed5ef4caea10ba1dbc49c0fc0a0d2b91cf341d3b))
### Bug Fixes
* Fix document preview pipeline for forked PRs ([#950](https://github.com/googleapis/genai-toolbox/issues/950)) ([481cc60](https://github.com/googleapis/genai-toolbox/commit/481cc608bae807d9e92497bc8863066916f7ef21))
* **prebuilt/firestore:** Mark database field as required in the firestore prebuilt tools ([#959](https://github.com/googleapis/genai-toolbox/issues/959)) ([15417d4](https://github.com/googleapis/genai-toolbox/commit/15417d4e0c7b173e81edbbeb672e53884d186104))
* **prebuilt/cloud-sql-mssql:** Correct source reference for execute_sql tool in cloud-sql-mssql.yaml prebuilt config ([#938](https://github.com/googleapis/genai-toolbox/issues/938)) ([d16728e](https://github.com/googleapis/genai-toolbox/commit/d16728e5c603eab37700876a6ddacbf709fd5823))
* **prebuilt/cloud-sql-mysql:** Update list_table tool ([#924](https://github.com/googleapis/genai-toolbox/issues/924)) ([2083ba5](https://github.com/googleapis/genai-toolbox/commit/2083ba50483951e9ee6101bb832aa68823cd96a5))
* Replace 'float' with 'number' in McpManifest ([#985](https://github.com/googleapis/genai-toolbox/issues/985)) ([59e23e1](https://github.com/googleapis/genai-toolbox/commit/59e23e17250a516e3931996114f32ac6526a4f8e))
* **server/api:** Add logger to context in tool invoke handler ([#891](https://github.com/googleapis/genai-toolbox/issues/891)) ([8ce311f](https://github.com/googleapis/genai-toolbox/commit/8ce311f256481e8f11ecb4aa505b95a562f394ef))
* **sources/looker:** Add agent tag to Looker API calls. ([#966](https://github.com/googleapis/genai-toolbox/issues/966)) ([f55dd6f](https://github.com/googleapis/genai-toolbox/commit/f55dd6fcd099f23bd89df62b268c4a53d16f3bac))
* **tools/bigquery-execute-sql:** Ensure invoke always returns a non-null value ([#925](https://github.com/googleapis/genai-toolbox/issues/925)) ([9a55b80](https://github.com/googleapis/genai-toolbox/commit/9a55b804821a6ccfcd157bcfaee7e599c4a5cb63))
* **tools/mysqlsql:** Unmarshal json data from database during invoke ([#979](https://github.com/googleapis/genai-toolbox/issues/979)) ([ccc3498](https://github.com/googleapis/genai-toolbox/commit/ccc3498cf0a4c43eb909e3850b9e6f582cd48f2a)), closes [#840](https://github.com/googleapis/genai-toolbox/issues/840)
## [0.9.0](https://github.com/googleapis/genai-toolbox/compare/v0.8.0...v0.9.0) (2025-07-11)

View File

@@ -114,7 +114,7 @@ To install Toolbox as a binary:
<!-- {x-release-please-start-version} -->
```sh
# see releases page for other versions
export VERSION=0.10.0
export VERSION=0.9.0
curl -O https://storage.googleapis.com/genai-toolbox/v$VERSION/linux/amd64/toolbox
chmod +x toolbox
```
@@ -127,23 +127,12 @@ You can also install Toolbox as a container:
```sh
# see releases page for other versions
export VERSION=0.10.0
export VERSION=0.9.0
docker pull us-central1-docker.pkg.dev/database-toolbox/toolbox/toolbox:$VERSION
```
</details>
<details>
<summary>Homebrew</summary>
To install Toolbox using Homebrew on macOS or Linux:
```sh
brew install mcp-toolbox
```
</details>
<details>
<summary>Compile from source</summary>
@@ -151,7 +140,7 @@ To install from source, ensure you have the latest version of
[Go installed](https://go.dev/doc/install), and then run the following command:
```sh
go install github.com/googleapis/genai-toolbox@v0.10.0
go install github.com/googleapis/genai-toolbox@v0.9.0
```
<!-- {x-release-please-end} -->
@@ -165,18 +154,8 @@ execute `toolbox` to start the server:
```sh
./toolbox --tools-file "tools.yaml"
```
> [!NOTE]
> Toolbox enables dynamic reloading by default. To disable, use the
> `--disable-reload` flag.
#### Homebrew Users
If you installed Toolbox using Homebrew, the `toolbox` binary is available in your system path. You can start the server with the same command:
```sh
toolbox --tools-file "tools.yaml"
```
> Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
You can use `toolbox help` for a full list of flags! To stop the server, send a
terminate signal (`ctrl+c` on most platforms).
@@ -530,9 +509,9 @@ For more detailed instructions on using the Toolbox Core SDK, see the
// Convert the tool using the tbgenkit package
// Use this tool with Genkit Go
genkitTool, err := tbgenkit.ToGenkitTool(tool, g)
if err != nil {
log.Fatalf("Failed to convert tool: %v\n", err)
}
if err != nil {
log.Fatalf("Failed to convert tool: %v\n", err)
}
}
```

View File

@@ -1,20 +0,0 @@
load("//tools/build_defs/go:go_library.bzl", "go_library")
load("//tools/build_defs/go:go_test.bzl", "go_test")
go_library(
name = "cmd",
srcs = [
"options.go",
"root.go",
],
embedsrcs = ["version.txt"],
)
go_test(
name = "cmd_test",
srcs = [
"options_test.go",
"root_test.go",
],
library = ":cmd",
)

View File

@@ -51,7 +51,6 @@ import (
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigquerysql"
_ "github.com/googleapis/genai-toolbox/internal/tools/bigtable"
_ "github.com/googleapis/genai-toolbox/internal/tools/couchbase"
_ "github.com/googleapis/genai-toolbox/internal/tools/dataplex/dataplexsearchentries"
_ "github.com/googleapis/genai-toolbox/internal/tools/dgraph"
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestoredeletedocuments"
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestoregetdocuments"
@@ -85,7 +84,6 @@ import (
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlsql"
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jcypher"
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jexecutecypher"
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jschema"
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgresexecutesql"
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgressql"
_ "github.com/googleapis/genai-toolbox/internal/tools/redis"
@@ -105,7 +103,6 @@ import (
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmysql"
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
_ "github.com/googleapis/genai-toolbox/internal/sources/couchbase"
_ "github.com/googleapis/genai-toolbox/internal/sources/dataplex"
_ "github.com/googleapis/genai-toolbox/internal/sources/dgraph"
_ "github.com/googleapis/genai-toolbox/internal/sources/firestore"
_ "github.com/googleapis/genai-toolbox/internal/sources/http"
@@ -216,7 +213,7 @@ func NewCommand(opts ...Option) *Command {
flags.BoolVar(&cmd.cfg.TelemetryGCP, "telemetry-gcp", false, "Enable exporting directly to Google Cloud Monitoring.")
flags.StringVar(&cmd.cfg.TelemetryOTLP, "telemetry-otlp", "", "Enable exporting using OpenTelemetry Protocol (OTLP) to the specified endpoint (e.g. 'http://127.0.0.1:4318')")
flags.StringVar(&cmd.cfg.TelemetryServiceName, "telemetry-service-name", "toolbox", "Sets the value of the service.name resource attribute for telemetry data.")
flags.StringVar(&cmd.prebuiltConfig, "prebuilt", "", "Use a prebuilt tool configuration by source type. Cannot be used with --tools-file. Allowed: 'alloydb-postgres-admin', alloydb-postgres', 'bigquery', 'cloud-sql-mysql', 'cloud-sql-postgres', 'cloud-sql-mssql', 'dataplex', 'firestore', 'mssql', 'mysql', 'postgres', 'spanner', 'spanner-postgres'.")
flags.StringVar(&cmd.prebuiltConfig, "prebuilt", "", "Use a prebuilt tool configuration by source type. Cannot be used with --tools-file. Allowed: 'alloydb-postgres-admin', alloydb-postgres', 'bigquery', 'cloud-sql-mysql', 'cloud-sql-postgres', 'cloud-sql-mssql', 'firestore', 'postgres', 'spanner', 'spanner-postgres'.")
flags.BoolVar(&cmd.cfg.Stdio, "stdio", false, "Listens via MCP STDIO instead of acting as a remote HTTP server.")
flags.BoolVar(&cmd.cfg.DisableReload, "disable-reload", false, "Disables dynamic reloading of tools file.")

View File

@@ -1167,10 +1167,7 @@ func TestPrebuiltTools(t *testing.T) {
cloudsqlpg_config, _ := prebuiltconfigs.Get("cloud-sql-postgres")
cloudsqlmysql_config, _ := prebuiltconfigs.Get("cloud-sql-mysql")
cloudsqlmssql_config, _ := prebuiltconfigs.Get("cloud-sql-mssql")
dataplex_config, _ := prebuiltconfigs.Get("dataplex")
firestoreconfig, _ := prebuiltconfigs.Get("firestore")
mysql_config, _ := prebuiltconfigs.Get("mysql")
mssql_config, _ := prebuiltconfigs.Get("mssql")
looker_config, _ := prebuiltconfigs.Get("looker")
postgresconfig, _ := prebuiltconfigs.Get("postgres")
spanner_config, _ := prebuiltconfigs.Get("spanner")
@@ -1244,16 +1241,6 @@ func TestPrebuiltTools(t *testing.T) {
},
},
},
{
name: "dataplex prebuilt tools",
in: dataplex_config,
wantToolset: server.ToolsetConfigs{
"dataplex-tools": tools.ToolsetConfig{
Name: "dataplex-tools",
ToolNames: []string{"dataplex_search_entries"},
},
},
},
{
name: "firestore prebuilt tools",
in: firestoreconfig,
@@ -1264,26 +1251,6 @@ func TestPrebuiltTools(t *testing.T) {
},
},
},
{
name: "mysql prebuilt tools",
in: mysql_config,
wantToolset: server.ToolsetConfigs{
"mysql-database-tools": tools.ToolsetConfig{
Name: "mysql-database-tools",
ToolNames: []string{"execute_sql", "list_tables"},
},
},
},
{
name: "mssql prebuilt tools",
in: mssql_config,
wantToolset: server.ToolsetConfigs{
"mssql-database-tools": tools.ToolsetConfig{
Name: "mssql-database-tools",
ToolNames: []string{"execute_sql", "list_tables"},
},
},
},
{
name: "looker prebuilt tools",
in: looker_config,

View File

@@ -1 +1 @@
0.10.0
0.9.0

View File

@@ -234,7 +234,7 @@
},
"outputs": [],
"source": [
"version = \"0.10.0\" # x-release-please-version\n",
"version = \"0.9.0\" # x-release-please-version\n",
"! curl -O https://storage.googleapis.com/genai-toolbox/v{version}/linux/amd64/toolbox\n",
"\n",
"# Make the binary executable\n",

View File

@@ -86,7 +86,7 @@ To install Toolbox as a binary:
```sh
# see releases page for other versions
export VERSION=0.10.0
export VERSION=0.9.0
curl -O https://storage.googleapis.com/genai-toolbox/v$VERSION/linux/amd64/toolbox
chmod +x toolbox
```
@@ -97,17 +97,10 @@ You can also install Toolbox as a container:
```sh
# see releases page for other versions
export VERSION=0.10.0
export VERSION=0.9.0
docker pull us-central1-docker.pkg.dev/database-toolbox/toolbox/toolbox:$VERSION
```
{{% /tab %}}
{{% tab header="Homebrew" lang="en" %}}
To install Toolbox using Homebrew on macOS or Linux:
```sh
brew install mcp-toolbox
```
{{% /tab %}}
{{% tab header="Compile from source" lang="en" %}}
@@ -115,7 +108,7 @@ To install from source, ensure you have the latest version of
[Go installed](https://go.dev/doc/install), and then run the following command:
```sh
go install github.com/googleapis/genai-toolbox@v0.10.0
go install github.com/googleapis/genai-toolbox@v0.9.0
```
{{% /tab %}}
@@ -130,20 +123,10 @@ execute `toolbox` to start the server:
```sh
./toolbox --tools-file "tools.yaml"
```
{{< notice note >}}
Toolbox enables dynamic reloading by default. To disable, use the
`--disable-reload` flag.
Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
{{< /notice >}}
#### Homebrew Users
If you installed Toolbox using Homebrew, the `toolbox` binary is available in your system path. You can start the server with the same command:
```sh
toolbox --tools-file "tools.yaml"
```
You can use `toolbox help` for a full list of flags! To stop the server, send a
terminate signal (`ctrl+c` on most platforms).
@@ -156,7 +139,6 @@ Once your server is up and running, you can load the tools into your
application. See below the list of Client SDKs for using various frameworks:
#### Python
{{< tabpane text=true persist=header >}}
{{% tab header="Core" lang="en" %}}
@@ -169,7 +151,7 @@ from toolbox_core import ToolboxClient
# update the url to point to your server
async with ToolboxClient("http://127.0.0.1:5000") as client:
async with ToolboxClient("<http://127.0.0.1:5000>") as client:
# these tools can be passed to your application!
tools = await client.load_toolset("toolset_name")
@@ -190,7 +172,7 @@ from toolbox_langchain import ToolboxClient
# update the url to point to your server
async with ToolboxClient("http://127.0.0.1:5000") as client:
async with ToolboxClient("<http://127.0.0.1:5000>") as client:
# these tools can be passed to your application!
tools = client.load_toolset()
@@ -211,7 +193,7 @@ from toolbox_llamaindex import ToolboxClient
# update the url to point to your server
async with ToolboxClient("http://127.0.0.1:5000") as client:
async with ToolboxClient("<http://127.0.0.1:5000>") as client:
# these tools can be passed to your application
@@ -583,6 +565,4 @@ func main() {
For more detailed instructions on using the Toolbox Go SDK, see the
[project's README](https://github.com/googleapis/mcp-toolbox-sdk-go/blob/main/core/README.md).
For end-to-end samples on using the Toolbox Go SDK with orchestration
frameworks, see the [project's
samples](https://github.com/googleapis/mcp-toolbox-sdk-go/tree/main/core/samples)
For end-to-end samples on using the Toolbox Go SDK with orchestration frameworks, see the [project's samples](https://github.com/googleapis/mcp-toolbox-sdk-go/tree/main/core/samples)

View File

@@ -19,9 +19,7 @@ This guide assumes you have already done the following:
### Cloud Setup (Optional)
If you plan to use **Google Clouds Vertex AI** with your agent (e.g., using
`vertexai=True` or a Google GenAI model), follow these one-time setup steps for
local development:
If you plan to use **Google Clouds Vertex AI** with your agent (e.g., using `vertexai=True` or a Google GenAI model), follow these one-time setup steps for local development:
1. [Install the Google Cloud CLI](https://cloud.google.com/sdk/docs/install)
1. [Set up Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/set-up-adc-local-dev-environment)
@@ -156,6 +154,7 @@ postgres` and a password next time.
\q
```
## Step 2: Install and configure Toolbox
In this section, we will download Toolbox, configure our tools in a
@@ -171,7 +170,7 @@ In this section, we will download Toolbox, configure our tools in a
<!-- {x-release-please-start-version} -->
```bash
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/$OS/toolbox
curl -O https://storage.googleapis.com/genai-toolbox/v0.9.0/$OS/toolbox
```
<!-- {x-release-please-end} -->
@@ -272,10 +271,8 @@ In this section, we will download Toolbox, configure our tools in a
```bash
./toolbox --tools-file "tools.yaml"
```
{{< notice note >}}
Toolbox enables dynamic reloading by default. To disable, use the
`--disable-reload` flag.
Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
{{< /notice >}}
## Step 3: Connect your agent to Toolbox

View File

@@ -15,8 +15,7 @@ This guide assumes you have already done the following:
### Cloud Setup (Optional)
If you plan to use **Google Clouds Vertex AI** with your agent (e.g., using
Gemini or PaLM models), follow these one-time setup steps:
If you plan to use **Google Clouds Vertex AI** with your agent (e.g., using Gemini or PaLM models), follow these one-time setup steps:
1. [Install the Google Cloud CLI]
1. [Set up Application Default Credentials (ADC)]
@@ -30,8 +29,8 @@ Gemini or PaLM models), follow these one-time setup steps:
[Go (v1.24.2 or higher)]: https://go.dev/doc/install
[install-postgres]: https://www.postgresql.org/download/
[Install the Google Cloud CLI]: https://cloud.google.com/sdk/docs/install
[Set up Application Default Credentials (ADC)]:
https://cloud.google.com/docs/authentication/set-up-adc-local-dev-environment
[Set up Application Default Credentials (ADC)]: https://cloud.google.com/docs/authentication/set-up-adc-local-dev-environment
## Step 1: Set up your database
@@ -167,7 +166,7 @@ In this section, we will download Toolbox, configure our tools in a
<!-- {x-release-please-start-version} -->
```bash
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/$OS/toolbox
curl -O https://storage.googleapis.com/genai-toolbox/v0.9.0/$OS/toolbox
```
<!-- {x-release-please-end} -->
@@ -268,10 +267,8 @@ In this section, we will download Toolbox, configure our tools in a
```bash
./toolbox --tools-file "tools.yaml"
```
{{< notice note >}}
Toolbox enables dynamic reloading by default. To disable, use the
`--disable-reload` flag.
Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
{{< /notice >}}
## Step 3: Connect your agent to Toolbox
@@ -285,15 +282,13 @@ from Toolbox.
go mod init main
```
1. In a new terminal, install the
[SDK](https://pkg.go.dev/github.com/googleapis/mcp-toolbox-sdk-go).
1. In a new terminal, install the [SDK](https://pkg.go.dev/github.com/googleapis/mcp-toolbox-sdk-go).
```bash
go get github.com/googleapis/mcp-toolbox-sdk-go
```
1. Create a new file named `hotelagent.go` and copy the following code to create
an agent:
1. Create a new file named `hotelagent.go` and copy the following code to create an agent:
{{< tabpane persist=header >}}
{{< tab header="LangChain Go" lang="go" >}}
@@ -922,6 +917,5 @@ func main() {
```
{{< notice info >}}
For more information, visit the [Go SDK
repo](https://github.com/googleapis/mcp-toolbox-sdk-go).
{{</ notice >}}
For more information, visit the [Go SDK repo](https://github.com/googleapis/mcp-toolbox-sdk-go).
{{</ notice >}}

View File

@@ -15,8 +15,7 @@ This guide assumes you have already done the following:
### Cloud Setup (Optional)
If you plan to use **Google Clouds Vertex AI** with your agent (e.g., using
Gemini or PaLM models), follow these one-time setup steps:
If you plan to use **Google Clouds Vertex AI** with your agent (e.g., using Gemini or PaLM models), follow these one-time setup steps:
1. [Install the Google Cloud CLI]
1. [Set up Application Default Credentials (ADC)]
@@ -30,8 +29,8 @@ Gemini or PaLM models), follow these one-time setup steps:
[Node.js (v18 or higher)]: https://nodejs.org/
[install-postgres]: https://www.postgresql.org/download/
[Install the Google Cloud CLI]: https://cloud.google.com/sdk/docs/install
[Set up Application Default Credentials (ADC)]:
https://cloud.google.com/docs/authentication/set-up-adc-local-dev-environment
[Set up Application Default Credentials (ADC)]: https://cloud.google.com/docs/authentication/set-up-adc-local-dev-environment
## Step 1: Set up your database
@@ -167,7 +166,7 @@ In this section, we will download Toolbox, configure our tools in a
<!-- {x-release-please-start-version} -->
```bash
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/$OS/toolbox
curl -O https://storage.googleapis.com/genai-toolbox/v0.9.0/$OS/toolbox
```
<!-- {x-release-please-end} -->
@@ -268,7 +267,6 @@ In this section, we will download Toolbox, configure our tools in a
```bash
./toolbox --tools-file "tools.yaml"
```
{{< notice note >}}
Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
{{< /notice >}}
@@ -340,6 +338,7 @@ async function runApplication() {
model: "gemini-2.0-flash",
});
const client = new ToolboxClient("http://127.0.0.1:5000");
const toolboxTools = await client.loadToolset("my-toolset");
@@ -364,6 +363,7 @@ async function runApplication() {
},
};
for (const query of queries) {
const agentOutput = await agent.invoke(
{
@@ -575,4 +575,4 @@ main();
{{< notice info >}}
For more information, visit the [JS SDK repo](https://github.com/googleapis/mcp-toolbox-sdk-js).
{{</ notice >}}
{{</ notice >}}

View File

@@ -105,7 +105,7 @@ In this section, we will download Toolbox, configure our tools in a
<!-- {x-release-please-start-version} -->
```bash
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/$OS/toolbox
curl -O https://storage.googleapis.com/genai-toolbox/v0.9.0/$OS/toolbox
```
<!-- {x-release-please-end} -->
@@ -218,8 +218,7 @@ In this section, we will download Toolbox, configure our tools in a
1. Type `y` when it asks to install the inspector package.
1. It should show the following when the MCP Inspector is up and running (please
take note of `<YOUR_SESSION_TOKEN>`):
1. It should show the following when the MCP Inspector is up and running (please take note of `<YOUR_SESSION_TOKEN>`):
```bash
Starting MCP inspector...
@@ -237,8 +236,7 @@ In this section, we will download Toolbox, configure our tools in a
1. For `URL`, type in `http://127.0.0.1:5000/mcp`.
1. For `Configuration` -> `Proxy Session Token`, make sure
`<YOUR_SESSION_TOKEN>` is present.
1. For `Configuration` -> `Proxy Session Token`, make sure `<YOUR_SESSION_TOKEN>` is present.
1. Click Connect.
@@ -248,4 +246,4 @@ In this section, we will download Toolbox, configure our tools in a
![inspector_tools](./inspector_tools.png)
1. Test out your tools here!
1. Test out your tools here!

View File

@@ -7,56 +7,3 @@ description: >
aliases:
- /how-to/connect_tools_using_mcp
---
## `--prebuilt` Flag
The `--prebuilt` flag allows you to use predefined tool configurations for common database types without creating a custom `tools.yaml` file.
### Usage
```bash
./toolbox --prebuilt <source-type> [other-flags]
```
### Supported Source Types
The following prebuilt configurations are available:
- `alloydb-postgres` - AlloyDB PostgreSQL with execute_sql and list_tables tools
- `bigquery` - BigQuery with execute_sql, get_dataset_info, get_table_info, list_dataset_ids, and list_table_ids tools
- `cloud-sql-mysql` - Cloud SQL MySQL with execute_sql and list_tables tools
- `cloud-sql-postgres` - Cloud SQL PostgreSQL with execute_sql and list_tables tools
- `cloud-sql-mssql` - Cloud SQL SQL Server with execute_sql and list_tables tools
- `postgres` - PostgreSQL with execute_sql and list_tables tools
- `spanner` - Spanner (GoogleSQL) with execute_sql, execute_sql_dql, and list_tables tools
- `spanner-postgres` - Spanner (PostgreSQL) with execute_sql, execute_sql_dql, and list_tables tools
### Examples
#### PostgreSQL with STDIO transport
```bash
./toolbox --prebuilt postgres --stdio
```
This is commonly used in MCP client configurations:
#### BigQuery remote HTTP transport
```bash
./toolbox --prebuilt bigquery [--port 8080]
```
### Environment Variables
When using `--prebuilt`, you still need to provide database connection details through environment variables. The specific variables depend on the source type, see the documentation per database below for the complete list:
For PostgreSQL-based sources:
- `POSTGRES_HOST`
- `POSTGRES_PORT`
- `POSTGRES_DATABASE`
- `POSTGRES_USER`
- `POSTGRES_PASSWORD`
## Notes
The `--prebuilt` flag was added in version 0.6.0.

View File

@@ -1,342 +0,0 @@
---
title: "AlloyDB Admin API using MCP"
type: docs
weight: 2
description: >
Create your AlloyDB database with MCP Toolbox.
---
This guide covers how to use [MCP Toolbox for Databases][toolbox] to create
AlloyDB clusters and instances from IDE enabling their E2E journey.
- [Cursor][cursor]
- [Windsurf][windsurf] (Codium)
- [Visual Studio Code][vscode] (Copilot)
- [Cline][cline] (VS Code extension)
- [Claude desktop][claudedesktop]
- [Claude code][claudecode]
- [Gemini CLI][geminicli]
- [Gemini Code Assist][geminicodeassist]
[toolbox]: https://github.com/googleapis/genai-toolbox
[cursor]: #configure-your-mcp-client
[windsurf]: #configure-your-mcp-client
[vscode]: #configure-your-mcp-client
[cline]: #configure-your-mcp-client
[claudedesktop]: #configure-your-mcp-client
[claudecode]: #configure-your-mcp-client
[geminicli]: #configure-your-mcp-client
[geminicodeassist]: #configure-your-mcp-client
## Before you begin
1. In the Google Cloud console, on the [project selector
page](https://console.cloud.google.com/projectselector2/home/dashboard),
select or create a Google Cloud project.
1. [Make sure that billing is enabled for your Google Cloud
project](https://cloud.google.com/billing/docs/how-to/verify-billing-enabled#confirm_billing_is_enabled_on_a_project).
## Install MCP Toolbox
1. Download the latest version of Toolbox as a binary. Select the [correct
binary](https://github.com/googleapis/genai-toolbox/releases) corresponding
to your OS and CPU architecture. You are required to use Toolbox version
V0.10.0+:
<!-- {x-release-please-start-version} -->
{{< tabpane persist=header >}}
{{< tab header="linux/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/linux/amd64/toolbox
{{< /tab >}}
{{< tab header="darwin/arm64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/arm64/toolbox
{{< /tab >}}
{{< tab header="darwin/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/amd64/toolbox
{{< /tab >}}
{{< tab header="windows/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolbox.exe
{{< /tab >}}
{{< /tabpane >}}
<!-- {x-release-please-end} -->
1. Make the binary executable:
```bash
chmod +x toolbox
```
1. Verify the installation:
```bash
./toolbox --version
```
## Configure your MCP Client
{{< tabpane text=true >}}
{{% tab header="Claude code" lang="en" %}}
1. Install [Claude
Code](https://docs.anthropic.com/en/docs/agents-and-tools/claude-code/overview).
1. Create a `.mcp.json` file in your project root if it doesn't exist.
1. Generate Access token to be used as API_KEY using `gcloud auth
print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
1. Restart Claude code to apply the new configuration.
{{% /tab %}}
{{% tab header="Claude desktop" lang="en" %}}
1. Open [Claude desktop](https://claude.ai/download) and navigate to Settings.
1. Under the Developer tab, tap Edit Config to open the configuration file.
1. Generate Access token to be used as API_KEY using `gcloud auth
print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
1. Restart Claude desktop.
1. From the new chat screen, you should see a hammer (MCP) icon appear with the
new MCP server available.
{{% /tab %}}
{{% tab header="Cline" lang="en" %}}
1. Open the [Cline](https://github.com/cline/cline) extension in VS Code and tap
the **MCP Servers** icon.
1. Tap Configure MCP Servers to open the configuration file.
1. Generate Access token to be used as API_KEY using `gcloud auth
print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
1. You should see a green active status after the server is successfully
connected.
{{% /tab %}}
{{% tab header="Cursor" lang="en" %}}
1. Create a `.cursor` directory in your project root if it doesn't exist.
1. Create a `.cursor/mcp.json` file if it doesn't exist and open it.
1. Generate Access token to be used as API_KEY using `gcloud auth
print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
1. [Cursor](https://www.cursor.com/) and navigate to **Settings > Cursor
Settings > MCP**. You should see a green active status after the server is
successfully connected.
{{% /tab %}}
{{% tab header="Visual Studio Code (Copilot)" lang="en" %}}
1. Open [VS Code](https://code.visualstudio.com/docs/copilot/overview) and
create a `.vscode` directory in your project root if it doesn't exist.
1. Create a `.vscode/mcp.json` file if it doesn't exist and open it.
1. Generate Access token to be used as API_KEY using `gcloud auth
print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
{{% /tab %}}
{{% tab header="Windsurf" lang="en" %}}
1. Open [Windsurf](https://docs.codeium.com/windsurf) and navigate to the
Cascade assistant.
1. Tap on the hammer (MCP) icon, then Configure to open the configuration file.
1. Generate Access token to be used as API_KEY using `gcloud auth
print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
{{% /tab %}}
{{% tab header="Gemini CLI" lang="en" %}}
1. Install the [Gemini
CLI](https://github.com/google-gemini/gemini-cli?tab=readme-ov-file#quickstart).
1. In your working directory, create a folder named `.gemini`. Within it, create
a `settings.json` file.
1. Generate Access token to be used as API_KEY using `gcloud auth print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
{{% /tab %}}
{{% tab header="Gemini Code Assist" lang="en" %}}
1. Install the [Gemini Code
Assist](https://marketplace.visualstudio.com/items?itemName=Google.geminicodeassist)
extension in Visual Studio Code.
1. Enable Agent Mode in Gemini Code Assist chat.
1. In your working directory, create a folder named `.gemini`. Within it, create
a `settings.json` file.
1. Generate Access token to be used as API_KEY using `gcloud auth print-access-token`.
> **Note:** The lifetime of token is 1 hour.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"alloydb-admin": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt", "alloydb-postgres-admin", "--stdio"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
{{% /tab %}}
{{< /tabpane >}}
## Use Tools
Your AI tool is now connected to AlloyDB using MCP. Try asking your AI assistant
to create a database, cluster or instance.
The following tools are available to the LLM:
1. **alloydb-create-cluster**: creates alloydb cluster
1. **alloydb-create-instance**: creates alloydb instance (PRIMARY, READ_POOL or SECONDARY)
1. **alloydb-get-operation**: polls on operations API until the operation is done.
{{< notice note >}}
Prebuilt tools are pre-1.0, so expect some tool changes between versions. LLMs
will adapt to the tools available, so this shouldn't affect most users.
{{< /notice >}}
## Connect to your Data
After setting up an AlloyDB cluster and instance, you can [connect your IDE to
the
database](https://cloud.google.com/alloydb/docs/pre-built-tools-with-mcp-toolbox).

View File

@@ -1,333 +0,0 @@
---
title: "Firestore using MCP"
type: docs
weight: 2
description: >
Connect your IDE to Firestore using Toolbox.
---
[Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) is
an open protocol for connecting Large Language Models (LLMs) to data sources
like Firestore. This guide covers how to use [MCP Toolbox for Databases][toolbox]
to expose your developer assistant tools to a Firestore instance:
* [Cursor][cursor]
* [Windsurf][windsurf] (Codium)
* [Visual Studio Code][vscode] (Copilot)
* [Cline][cline] (VS Code extension)
* [Claude desktop][claudedesktop]
* [Claude code][claudecode]
* [Gemini CLI][geminicli]
* [Gemini Code Assist][geminicodeassist]
[toolbox]: https://github.com/googleapis/genai-toolbox
[cursor]: #configure-your-mcp-client
[windsurf]: #configure-your-mcp-client
[vscode]: #configure-your-mcp-client
[cline]: #configure-your-mcp-client
[claudedesktop]: #configure-your-mcp-client
[claudecode]: #configure-your-mcp-client
[geminicli]: #configure-your-mcp-client
[geminicodeassist]: #configure-your-mcp-client
## Set up Firestore
1. Create or select a Google Cloud project.
* [Create a new
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects)
* [Select an existing
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects)
1. [Enable the Firestore
API](https://console.cloud.google.com/apis/library/firestore.googleapis.com)
for your project.
1. [Create a Firestore
database](https://cloud.google.com/firestore/docs/create-database-web-mobile-client-library)
if you haven't already.
1. Set up authentication for your local environment.
* [Install gcloud CLI](https://cloud.google.com/sdk/docs/install)
* Run `gcloud auth application-default login` to authenticate
## Install MCP Toolbox
1. Download the latest version of Toolbox as a binary. Select the [correct
binary](https://github.com/googleapis/genai-toolbox/releases) corresponding
to your OS and CPU architecture. You are required to use Toolbox version
V0.10.0+:
<!-- {x-release-please-start-version} -->
{{< tabpane persist=header >}}
{{< tab header="linux/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/linux/amd64/toolbox
{{< /tab >}}
{{< tab header="darwin/arm64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/arm64/toolbox
{{< /tab >}}
{{< tab header="darwin/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/amd64/toolbox
{{< /tab >}}
{{< tab header="windows/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolbox
{{< /tab >}}
{{< /tabpane >}}
<!-- {x-release-please-end} -->
1. Make the binary executable:
```bash
chmod +x toolbox
```
1. Verify the installation:
```bash
./toolbox --version
```
## Configure your MCP Client
{{< tabpane text=true >}}
{{% tab header="Claude code" lang="en" %}}
1. Install [Claude
Code](https://docs.anthropic.com/en/docs/agents-and-tools/claude-code/overview).
1. Create a `.mcp.json` file in your project root if it doesn't exist.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
1. Restart Claude code to apply the new configuration.
{{% /tab %}}
{{% tab header="Claude desktop" lang="en" %}}
1. Open [Claude desktop](https://claude.ai/download) and navigate to Settings.
1. Under the Developer tab, tap Edit Config to open the configuration file.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
1. Restart Claude desktop.
1. From the new chat screen, you should see a hammer (MCP) icon appear with the
new MCP server available.
{{% /tab %}}
{{% tab header="Cline" lang="en" %}}
1. Open the [Cline](https://github.com/cline/cline) extension in VS Code and tap
the **MCP Servers** icon.
1. Tap Configure MCP Servers to open the configuration file.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
1. You should see a green active status after the server is successfully
connected.
{{% /tab %}}
{{% tab header="Cursor" lang="en" %}}
1. Create a `.cursor` directory in your project root if it doesn't exist.
1. Create a `.cursor/mcp.json` file if it doesn't exist and open it.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
1. [Cursor](https://www.cursor.com/) and navigate to **Settings > Cursor
Settings > MCP**. You should see a green active status after the server is
successfully connected.
{{% /tab %}}
{{% tab header="Visual Studio Code (Copilot)" lang="en" %}}
1. Open [VS Code](https://code.visualstudio.com/docs/copilot/overview) and
create a `.vscode` directory in your project root if it doesn't exist.
1. Create a `.vscode/mcp.json` file if it doesn't exist and open it.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
{{% /tab %}}
{{% tab header="Windsurf" lang="en" %}}
1. Open [Windsurf](https://docs.codeium.com/windsurf) and navigate to the
Cascade assistant.
1. Tap on the hammer (MCP) icon, then Configure to open the configuration file.
1. Add the following configuration, replace the environment variables with your
values, and save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
{{% /tab %}}
{{% tab header="Gemini CLI" lang="en" %}}
1. Install the [Gemini
CLI](https://github.com/google-gemini/gemini-cli?tab=readme-ov-file#quickstart).
1. In your working directory, create a folder named `.gemini`. Within it, create
a `settings.json` file.
1. Add the following configuration, replace the environment variables with your
values, and then save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
{{% /tab %}}
{{% tab header="Gemini Code Assist" lang="en" %}}
1. Install the [Gemini Code
Assist](https://marketplace.visualstudio.com/items?itemName=Google.geminicodeassist)
extension in Visual Studio Code.
1. Enable Agent Mode in Gemini Code Assist chat.
1. In your working directory, create a folder named `.gemini`. Within it, create
a `settings.json` file.
1. Add the following configuration, replace the environment variables with your
values, and then save:
```json
{
"mcpServers": {
"firestore": {
"command": "./PATH/TO/toolbox",
"args": ["--prebuilt","firestore","--stdio"],
"env": {
"FIRESTORE_PROJECT": "your-project-id",
"FIRESTORE_DATABASE": "(default)"
}
}
}
}
```
{{% /tab %}}
{{< /tabpane >}}
## Use Tools
Your AI tool is now connected to Firestore using MCP. Try asking your AI
assistant to list collections, get documents, query collections, or manage
security rules.
The following tools are available to the LLM:
1. **firestore-get-documents**: Gets multiple documents from Firestore by their
paths
1. **firestore-list-collections**: List Firestore collections for a given parent
path
1. **firestore-delete-documents**: Delete multiple documents from Firestore
1. **firestore-query-collection**: Query documents from a collection with
filtering, ordering, and limit options
1. **firestore-get-rules**: Retrieves the active Firestore security rules for
the current project
1. **firestore-validate-rules**: Validates Firestore security rules syntax and
errors
{{< notice note >}}
Prebuilt tools are pre-1.0, so expect some tool changes between versions. LLMs
will adapt to the tools available, so this shouldn't affect most users.
{{< /notice >}}

View File

@@ -46,19 +46,19 @@ to expose your developer assistant tools to a Looker instance:
<!-- {x-release-please-start-version} -->
{{< tabpane persist=header >}}
{{< tab header="linux/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/linux/amd64/toolbox
curl -O <https://storage.googleapis.com/genai-toolbox/v0.10.0/linux/amd64/toolbox>
{{< /tab >}}
{{< tab header="darwin/arm64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/arm64/toolbox
curl -O <https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/arm64/toolbox>
{{< /tab >}}
{{< tab header="darwin/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/amd64/toolbox
curl -O <https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/amd64/toolbox>
{{< /tab >}}
{{< tab header="windows/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolbox.exe
curl -O <https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolbox.exe>
{{< /tab >}}
{{< /tabpane >}}
<!-- {x-release-please-end} -->
@@ -90,8 +90,12 @@ curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolb
{
"mcpServers": {
"looker-toolbox": {
"command": "./PATH/TO/toolbox",
"args": ["--stdio", "--prebuilt", "looker"],
"command": "/PATH/TO/toolbox",
"args": [
"--stdio",
"--prebuilt",
"looker"
],
"env": {
"LOOKER_BASE_URL": "https://looker.example.com",
"LOOKER_CLIENT_ID": "",
@@ -117,8 +121,12 @@ curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolb
{
"mcpServers": {
"looker-toolbox": {
"command": "./PATH/TO/toolbox",
"args": ["--stdio", "--prebuilt", "looker"],
"command": "/PATH/TO/toolbox",
"args": [
"--stdio",
"--prebuilt",
"looker"
],
"env": {
"LOOKER_BASE_URL": "https://looker.example.com",
"LOOKER_CLIENT_ID": "",
@@ -147,8 +155,12 @@ curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolb
{
"mcpServers": {
"looker-toolbox": {
"command": "./PATH/TO/toolbox",
"args": ["--stdio", "--prebuilt", "looker"],
"command": "/PATH/TO/toolbox",
"args": [
"--stdio",
"--prebuilt",
"looker"
],
"env": {
"LOOKER_BASE_URL": "https://looker.example.com",
"LOOKER_CLIENT_ID": "",
@@ -175,8 +187,12 @@ curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolb
{
"mcpServers": {
"looker-toolbox": {
"command": "./PATH/TO/toolbox",
"args": ["--stdio", "--prebuilt", "looker"],
"command": "/PATH/TO/toolbox",
"args": [
"--stdio",
"--prebuilt",
"looker"
],
"env": {
"LOOKER_BASE_URL": "https://looker.example.com",
"LOOKER_CLIENT_ID": "",
@@ -205,8 +221,12 @@ curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolb
{
"mcpServers": {
"looker-toolbox": {
"command": "./PATH/TO/toolbox",
"args": ["--stdio", "--prebuilt", "looker"],
"command": "/PATH/TO/toolbox",
"args": [
"--stdio",
"--prebuilt",
"looker"
],
"env": {
"LOOKER_BASE_URL": "https://looker.example.com",
"LOOKER_CLIENT_ID": "",
@@ -232,8 +252,12 @@ curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolb
{
"mcpServers": {
"looker-toolbox": {
"command": "./PATH/TO/toolbox",
"args": ["--stdio", "--prebuilt", "looker"],
"command": "/PATH/TO/toolbox",
"args": [
"--stdio",
"--prebuilt",
"looker"
],
"env": {
"LOOKER_BASE_URL": "https://looker.example.com",
"LOOKER_CLIENT_ID": "",

View File

@@ -52,19 +52,19 @@ Omni](https://cloud.google.com/alloydb/omni/current/docs/overview).
<!-- {x-release-please-start-version} -->
{{< tabpane persist=header >}}
{{< tab header="linux/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/linux/amd64/toolbox
curl -O <https://storage.googleapis.com/genai-toolbox/v0.9.0/linux/amd64/toolbox>
{{< /tab >}}
{{< tab header="darwin/arm64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/arm64/toolbox
curl -O <https://storage.googleapis.com/genai-toolbox/v0.9.0/darwin/arm64/toolbox>
{{< /tab >}}
{{< tab header="darwin/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/darwin/amd64/toolbox
curl -O <https://storage.googleapis.com/genai-toolbox/v0.9.0/darwin/amd64/toolbox>
{{< /tab >}}
{{< tab header="windows/amd64" lang="bash" >}}
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/windows/amd64/toolbox.exe
curl -O <https://storage.googleapis.com/genai-toolbox/v0.9.0/windows/amd64/toolbox>
{{< /tab >}}
{{< /tabpane >}}
<!-- {x-release-please-end} -->

View File

@@ -28,9 +28,8 @@ Toolbox currently supports the following versions of MCP specification:
The auth implementation in Toolbox is not supported in MCP's auth specification.
This includes:
* [Authenticated Parameters](../resources/tools/_index.md#authenticated-parameters)
* [Authorized Invocations](../resources/tools/_index.md#authorized-invocations)
* [Authenticated Parameters](../resources/tools/_index.md#authenticated-parameters)
* [Authorized Invocations](../resources/tools/_index.md#authorized-invocations)
## Connecting to Toolbox with an MCP client
@@ -63,8 +62,7 @@ remote HTTP server. Logs will be set to the `warn` level by default. `debug` and
`info` logs are not supported with stdio.
{{< notice note >}}
Toolbox enables dynamic reloading by default. To disable, use the
`--disable-reload` flag.
Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
{{< /notice >}}
### Connecting via HTTP

View File

@@ -1,93 +0,0 @@
---
title: "Dataplex"
type: docs
weight: 1
description: >
Dataplex Universal Catalog is a unified, intelligent governance solution for data and AI assets in Google Cloud. Dataplex Universal Catalog powers AI, analytics, and business intelligence at scale.
---
# Dataplex Source
[Dataplex][dataplex-docs] Universal Catalog is a unified, intelligent governance
solution for data and AI assets in Google Cloud. Dataplex Universal Catalog
powers AI, analytics, and business intelligence at scale.
At the heart of these governance capabilities is a catalog that contains a
centralized inventory of the data assets in your organization. Dataplex
Universal Catalog holds business, technical, and runtime metadata for all of
your data. It helps you discover relationships and semantics in the metadata by
applying artificial intelligence and machine learning.
[dataplex-docs]: https://cloud.google.com/dataplex/docs
## Example
```yaml
sources:
my-dataplex-source:
kind: "dataplex"
project: "my-project-id"
```
## Sample System Prompt
You can use the following system prompt as "Custom Instructions" in your client
application.
```
Whenever you will receive response from dataplex_search_entries tool decide what do to by following these steps:
1. If there are multiple search results found
1.1. Present the list of search results
1.2. Format the output in nested ordered list, for example:
Given
```
{
results: [
{
name: "projects/test-project/locations/us/entryGroups/@bigquery-aws-us-east-1/entries/users"
entrySource: {
displayName: "Users"
description: "Table contains list of users."
location: "aws-us-east-1"
system: "BigQuery"
}
},
{
name: "projects/another_project/locations/us-central1/entryGroups/@bigquery/entries/top_customers"
entrySource: {
displayName: "Top customers",
description: "Table contains list of best customers."
location: "us-central1"
system: "BigQuery"
}
},
]
}
```
Return output formatted as markdown nested list:
```
* Users:
- projectId: test_project
- location: aws-us-east-1
- description: Table contains list of users.
* Top customers:
- projectId: another_project
- location: us-central1
- description: Table contains list of best customers.
```
1.3. Ask to select one of the presented search results
2. If there is only one search result found
2.1. Present the search result immediately.
3. If there are no search result found
3.1. Explain that no search result was found
3.2. Suggest to provide a more specific search query.
Do not try to search within search results on your own.
```
## Reference
| **field** | **type** | **required** | **description** |
|-----------|:--------:|:------------:|----------------------------------------------------------------------------------|
| kind | string | true | Must be "dataplex". |
| project | string | true | Id of the GCP project used for quota and billing purposes (e.g. "my-project-id").|

View File

@@ -33,13 +33,8 @@ with [Firestore][firestore-docs].
In addition to [setting the ADC for your server][set-adc], you need to ensure
the IAM identity has been given the correct IAM permissions for accessing
Firestore. Common roles include:
- `roles/datastore.user` - Read and write access to Firestore
- `roles/datastore.viewer` - Read-only access to Firestore
- `roles/firebaserules.admin` - Full management of Firebase Security Rules for
Firestore. This role is required for operations that involve creating,
updating, or managing Firestore security rules (see [Firebase Security Rules
roles][firebaserules-roles])
See [Firestore access control][firestore-iam] for more information on
applying IAM permissions and roles to an identity.
@@ -48,8 +43,6 @@ applying IAM permissions and roles to an identity.
[adc]: https://cloud.google.com/docs/authentication#adc
[set-adc]: https://cloud.google.com/docs/authentication/provide-credentials-adc
[firestore-iam]: https://cloud.google.com/firestore/docs/security/iam
[firebaserules-roles]:
https://cloud.google.com/iam/docs/roles-permissions/firebaserules
### Database Selection

View File

@@ -21,8 +21,7 @@ in the cloud, on GCP, or on premises.
This source only uses API authentication. You will need to
[create an API user][looker-user] to login to Looker.
[looker-user]:
https://cloud.google.com/looker/docs/api-auth#authentication_with_an_sdk
[looker-user]: https://cloud.google.com/looker/docs/api-auth#authentication_with_an_sdk
## Example
@@ -37,10 +36,9 @@ sources:
timeout: 600s
```
The Looker base url will look like "https://looker.example.com", don't include
a trailing "/". In some cases, especially if your Looker is deployed
on-premises, you may need to add the API port numner like
"https://looker.example.com:19999".
The Looker base url will look like "https://looker.example.com", don't include a
trailing "/". In some cases, especially if your Looker is deployed on-premises,
you may need to add the API port numner like "https://looker.example.com:19999".
Verify ssl should almost always be "true" (all lower case) unless you are using
a self-signed ssl certificate for the Looker server. Anything other than "true"

View File

@@ -9,10 +9,9 @@ description: >
## About
[MongoDB][mongodb-docs] is a popular NoSQL database that stores data in
flexible, JSON-like documents, making it easy to develop and scale applications.
[MongoDB][mongodb-docs] is a leading nosql database that can not only cater your operational data needs but also perform vector search.
[mongodb-docs]: https://www.mongodb.com/docs/atlas/getting-started/
[mongodb-docs]: https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/
## Example
@@ -25,10 +24,11 @@ sources:
```
## Reference
| **field** | **type** | **required** | **description** |
|-----------|:--------:|:------------:|-------------------------------------------------------------------|
| kind | string | true | Must be "mongodb". |
| uri | string | true | connection string to connect to MongoDB |
| database | string | true | Name of the mongodb database to connect to (e.g. "sample_mflix"). |
| uri | string | true | connection string to connect to MongoDB | |
| database | string | true | Name of the mongodb database to connect to (e.g. "sample_mflix"). |

View File

@@ -43,7 +43,6 @@ sources:
database: my_db
user: ${USER_NAME}
password: ${PASSWORD}
# encrypt: strict
```
{{< notice tip >}}
@@ -53,12 +52,11 @@ instead of hardcoding your secrets into the configuration file.
## Reference
| **field** | **type** | **required** | **description** |
|-----------|:--------:|:------------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "mssql". |
| host | string | true | IP address to connect to (e.g. "127.0.0.1"). |
| port | string | true | Port to connect to (e.g. "1433"). |
| database | string | true | Name of the SQL Server database to connect to (e.g. "my_db"). |
| user | string | true | Name of the SQL Server user to connect as (e.g. "my-user"). |
| password | string | true | Password of the SQL Server user (e.g. "my-password"). |
| encrypt | string | false | Encryption level for data transmitted between the client and server (e.g., "strict"). If not specified, defaults to the [github.com/microsoft/go-mssqldb](https://github.com/microsoft/go-mssqldb?tab=readme-ov-file#common-parameters) package's default encrypt value. |
| **field** | **type** | **required** | **description** |
|-----------|:--------:|:------------:|------------------------------------------------------------------------|
| kind | string | true | Must be "mssql". |
| host | string | true | IP address to connect to (e.g. "127.0.0.1"). |
| port | string | true | Port to connect to (e.g. "1433"). |
| database | string | true | Name of the SQL Server database to connect to (e.g. "my_db"). |
| user | string | true | Name of the SQL Server user to connect as (e.g. "my-user"). |
| password | string | true | Password of the SQL Server user (e.g. "my-password"). |

View File

@@ -114,24 +114,20 @@ in the list using the items field:
| items | parameter object | true | Specify a Parameter object for the type of the values in the array. |
{{< notice note >}}
Items in array should not have a `default` or `required` value. If provided, it
will be ignored.
Items in array should not have a `default` or `required` value. If provided, it will be ignored.
{{< /notice >}}
### Map Parameters
The map type is a collection of key-value pairs. It can be configured in two
ways:
The map type is a collection of key-value pairs. It can be configured in two ways:
- Generic Map: By default, it accepts values of any primitive type (string,
integer, float, boolean), allowing for mixed data.
- Generic Map: By default, it accepts values of any primitive type (string, integer, float, boolean), allowing for mixed data.
- Typed Map: By setting the valueType field, you can enforce that all values
within the map must be of the same specified type.
#### Generic Map (Mixed Value Types)
This is the default behavior when valueType is omitted. It's useful for passing
a flexible group of settings.
This is the default behavior when valueType is omitted. It's useful for passing a flexible group of settings.
```yaml
parameters:
@@ -183,7 +179,7 @@ user's ID token.
| **field** | **type** | **required** | **description** |
|-----------|:--------:|:------------:|-----------------------------------------------------------------------------------------|
| name | string | true | Name of the [authServices](../authservices) used to verify the OIDC auth token. |
| name | string | true | Name of the [authServices](../authservices) used to verify the OIDC auth token. |
| field | string | true | Claim field decoded from the OIDC token used to auto-populate this parameter. |
### Template Parameters

View File

@@ -16,7 +16,7 @@ It's compatible with the following sources:
- [bigquery](../sources/bigquery.md)
`bigquery-get-dataset-info` takes a `dataset` parameter to specify the dataset
on the given source. It also optionally accepts a `project` parameter to
on the given source. It also optionally accepts a `project` parameter to
define the Google Cloud project ID. If the `project` parameter is not provided,
the tool defaults to using the project defined in the source configuration.

View File

@@ -16,8 +16,8 @@ It's compatible with the following sources:
- [bigquery](../sources/bigquery.md)
`bigquery-get-table-info` takes `dataset` and `table` parameters to specify
the target table. It also optionally accepts a `project` parameter to define
the Google Cloud project ID. If the `project` parameter is not provided, the
the target table. It also optionally accepts a `project` parameter to define
the Google Cloud project ID. If the `project` parameter is not provided, the
tool defaults to using the project defined in the source configuration.
## Example

View File

@@ -15,8 +15,8 @@ It's compatible with the following sources:
- [bigquery](../sources/bigquery.md)
`bigquery-list-dataset-ids` optionally accepts a `project` parameter to define
the Google Cloud project ID. If the `project` parameter is not provided, the
`bigquery-list-dataset-ids` optionally accepts a `project` parameter to define
the Google Cloud project ID. If the `project` parameter is not provided, the
tool defaults to using the project defined in the source configuration.
## Example

View File

@@ -16,8 +16,8 @@ It's compatible with the following sources:
- [bigquery](../sources/bigquery.md)
`bigquery-get-dataset-info` takes a required `dataset` parameter to specify the dataset
from which to list table IDs. It also optionally accepts a `project` parameter to
define the Google Cloud project ID. If the `project` parameter is not provided, the
from which to list table IDs. It also optionally accepts a `project` parameter to
define the Google Cloud project ID. If the `project` parameter is not provided, the
tool defaults to using the project defined in the source configuration.
## Example

View File

@@ -20,14 +20,10 @@ instance. It's compatible with any of the following sources:
Bigtable supports SQL queries. The integration with Toolbox supports `googlesql`
dialect, the specified SQL statement is executed as a [data manipulation
language (DML)][bigtable-googlesql] statements, and specified parameters will
inserted according to their name: e.g. `@name`.
language (DML)][bigtable-googlesql] statements, and specified parameters will inserted according to their name: e.g. `@name`.
{{<notice note>}}
Bigtable's GoogleSQL support for DML statements might be limited to certain
query types. For detailed information on supported DML statements and use
cases, refer to the [Bigtable GoogleSQL use
cases](https://cloud.google.com/bigtable/docs/googlesql-overview#use-cases).
Bigtable's GoogleSQL support for DML statements might be limited to certain query types. For detailed information on supported DML statements and use cases, refer to the [Bigtable GoogleSQL use cases](https://cloud.google.com/bigtable/docs/googlesql-overview#use-cases).
{{</notice>}}
[bigtable-googlesql]: https://cloud.google.com/bigtable/docs/googlesql-overview

View File

@@ -1,7 +0,0 @@
---
title: "Dataplex"
type: docs
weight: 1
description: >
Tools that work with Dataplex Sources.
---

View File

@@ -1,75 +0,0 @@
---
title: "dataplex-search-entries"
type: docs
weight: 1
description: >
A "dataplex-search-entries" tool allows to search for entries based on the provided query.
aliases:
- /resources/tools/dataplex-search-entries
---
## About
A `dataplex-search-entries` tool returns all entries in Dataplex Catalog (e.g.
tables, views, models) that matches given user query.
It's compatible with the following sources:
- [dataplex](../sources/dataplex.md)
`dataplex-search-entries` takes a required `query` parameter based on which
entries are filtered and returned to the user and a required `name` parameter
which is constructed using source's project if user does not provide it
explicitly and has the following format: projects/{project}/locations/global. It
also optionally accepts following parameters:
- `pageSize` - Number of results in the search page. Defaults to `5`.
- `pageToken` - Page token received from a previous locations.searchEntries
call.
- `orderBy` - Specifies the ordering of results. Supported values are: relevance
(default), last_modified_timestamp, last_modified_timestamp asc
- `semanticSearch` - Specifies whether the search should understand the meaning
and intent behind the query, rather than just matching keywords. Defaults to
`true`.
- `scope` - The scope under which the search should be operating. Since this
parameter is not exposed to the toolbox user, it defaults to the organization
where the project provided in name is located.
## Requirements
### IAM Permissions
Dataplex uses [Identity and Access Management (IAM)][iam-overview] to control
user and group access to Dataplex resources. Toolbox will use your
[Application Default Credentials (ADC)][adc] to authorize and authenticate when
interacting with [Dataplex][dataplex-docs].
In addition to [setting the ADC for your server][set-adc], you need to ensure
the IAM identity has been given the correct IAM permissions for the tasks you
intend to perform. See [Dataplex Universal Catalog IAM permissions][iam-permissions]
and [Dataplex Universal Catalog IAM roles][iam-roles] for more information on
applying IAM permissions and roles to an identity.
[iam-overview]: https://cloud.google.com/dataplex/docs/iam-and-access-control
[adc]: https://cloud.google.com/docs/authentication#adc
[set-adc]: https://cloud.google.com/docs/authentication/provide-credentials-adc
[iam-permissions]: https://cloud.google.com/dataplex/docs/iam-permissions
[iam-roles]: https://cloud.google.com/dataplex/docs/iam-roles
[dataplex-docs]: https://cloud.google.com/dataplex
## Example
```yaml
tools:
dataplex-search-entries:
kind: dataplex-search-entries
source: my-dataplex-source
description: Use this tool to get all the entries based on the provided query.
```
## Reference
| **field** | **type** | **required** | **description** |
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "dataplex-search-entries". |
| source | string | true | Name of the source the tool should execute on. |
| description | string | true | Description of the tool that is passed to the LLM. |

View File

@@ -10,15 +10,14 @@ aliases:
## About
A `firestore-delete-documents` tool deletes multiple documents from Firestore by
their paths.
A `firestore-delete-documents` tool deletes multiple documents from Firestore by their paths.
It's compatible with the following sources:
- [firestore](../sources/firestore.md)
`firestore-delete-documents` takes one input parameter `documentPaths` which is
an array of document paths to delete. The tool uses Firestore's BulkWriter for
efficient batch deletion and returns the success status for each document.
`firestore-delete-documents` takes one input parameter `documentPaths` which is an array of
document paths to delete. The tool uses Firestore's BulkWriter for efficient batch deletion
and returns the success status for each document.
## Example
@@ -32,8 +31,8 @@ tools:
## Reference
| **field** | **type** | **required** | **description** |
|-------------|:--------------:|:------------:|----------------------------------------------------------|
| kind | string | true | Must be "firestore-delete-documents". |
| source | string | true | Name of the Firestore source to delete documents from. |
| description | string | true | Description of the tool that is passed to the LLM. |
| **field** | **type** | **required** | **description** |
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "firestore-delete-documents". |
| source | string | true | Name of the Firestore source to delete documents from. |
| description | string | true | Description of the tool that is passed to the LLM. |

View File

@@ -10,15 +10,14 @@ aliases:
## About
A `firestore-get-documents` tool retrieves multiple documents from Firestore by
their paths.
A `firestore-get-documents` tool retrieves multiple documents from Firestore by their paths.
It's compatible with the following sources:
- [firestore](../sources/firestore.md)
`firestore-get-documents` takes one input parameter `documentPaths` which is an
array of document paths, and returns the documents' data along with metadata
such as existence status, creation time, update time, and read time.
`firestore-get-documents` takes one input parameter `documentPaths` which is an array of
document paths, and returns the documents' data along with metadata such as existence status,
creation time, update time, and read time.
## Example
@@ -32,8 +31,8 @@ tools:
## Reference
| **field** | **type** | **required** | **description** |
|-------------|:--------------:|:------------:|------------------------------------------------------------|
| kind | string | true | Must be "firestore-get-documents". |
| source | string | true | Name of the Firestore source to retrieve documents from. |
| description | string | true | Description of the tool that is passed to the LLM. |
| **field** | **type** | **required** | **description** |
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "firestore-get-documents". |
| source | string | true | Name of the Firestore source to retrieve documents from. |
| description | string | true | Description of the tool that is passed to the LLM. |

View File

@@ -10,15 +10,13 @@ aliases:
## About
A `firestore-get-rules` tool retrieves the active [Firestore security
rules](https://firebase.google.com/docs/firestore/security/get-started) for the
current project.
A `firestore-get-rules` tool retrieves the active [Firestore security rules](https://firebase.google.com/docs/firestore/security/get-started) for the current project.
It's compatible with the following sources:
- [firestore](../sources/firestore.md)
`firestore-get-rules` takes no input parameters and returns the security rules
content along with metadata such as the ruleset name, and timestamps.
`firestore-get-rules` takes no input parameters and returns the security rules content along with metadata
such as the ruleset name, and timestamps.
## Example
@@ -32,8 +30,8 @@ tools:
## Reference
| **field** | **type** | **required** | **description** |
|-------------|:-------------:|:------------:|-------------------------------------------------------|
| kind | string | true | Must be "firestore-get-rules". |
| source | string | true | Name of the Firestore source to retrieve rules from. |
| description | string | true | Description of the tool that is passed to the LLM. |
| **field** | **type** | **required** | **description** |
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "firestore-get-rules". |
| source | string | true | Name of the Firestore source to retrieve rules from. |
| description | string | true | Description of the tool that is passed to the LLM. |

View File

@@ -10,11 +10,7 @@ aliases:
## About
A `firestore-list-collections` tool lists
[collections](https://firebase.google.com/docs/firestore/data-model#collections)
in Firestore, either at the root level or as
[subcollections](https://firebase.google.com/docs/firestore/data-model#subcollections)
of a specific document.
A `firestore-list-collections` tool lists [collections](https://firebase.google.com/docs/firestore/data-model#collections) in Firestore, either at the root level or as [subcollections](https://firebase.google.com/docs/firestore/data-model#subcollections) of a specific document.
It's compatible with the following sources:
- [firestore](../sources/firestore.md)
@@ -35,8 +31,8 @@ tools:
## Reference
| **field** | **type** | **required** | **description** |
|-------------|:----------------:|:------------:|--------------------------------------------------------|
| kind | string | true | Must be "firestore-list-collections". |
| source | string | true | Name of the Firestore source to list collections from. |
| description | string | true | Description of the tool that is passed to the LLM. |
| **field** | **type** | **required** | **description** |
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "firestore-list-collections". |
| source | string | true | Name of the Firestore source to list collections from. |
| description | string | true | Description of the tool that is passed to the LLM. |

View File

@@ -1,17 +1,6 @@
---
title: "firestore-query-collection"
type: docs
weight: 1
description: >
A "firestore-query-collection" tool allow to query collections in Firestore.
aliases:
- /resources/tools/firestore-query-collection
---
# firestore-query-collection
# About
The `firestore-query-collection` tool allows you to query Firestore collections
with filters, ordering, and limit capabilities.
The `firestore-query-collection` tool allows you to query Firestore collections with filters, ordering, and limit capabilities.
## Configuration
@@ -21,8 +10,9 @@ To use this tool, you need to configure it in your YAML configuration file:
sources:
my-firestore:
kind: firestore
project: my-gcp-project
database: "(default)"
config:
project: my-gcp-project
database: "(default)"
tools:
query_collection:
@@ -33,18 +23,17 @@ tools:
## Parameters
| **parameters** | **type** | **required** | **default** | **description** |
|------------------|:------------:|:------------:|:-----------:|-----------------------------------------------------------------------|
| `collectionPath` | string | true | - | The Firestore Rules source code to validate |
| `filters` | array | false | - | Array of filter objects (as JSON strings) to apply to the query |
| `orderBy` | string | false | - | JSON string specifying field and direction to order results |
| `limit` | integer | false | 100 | Maximum number of documents to return |
| `analyzeQuery` | boolean | false | false | If true, returns query explain metrics including execution statistics |
| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `collectionPath` | string | Yes | - | The path to the Firestore collection to query |
| `filters` | array | No | - | Array of filter objects (as JSON strings) to apply to the query |
| `orderBy` | string | No | - | JSON string specifying field and direction to order results |
| `limit` | integer | No | 100 | Maximum number of documents to return |
| `analyzeQuery` | boolean | No | false | If true, returns query explain metrics including execution statistics |
### Filter Format
Each filter in the `filters` array should be a JSON string with the following
structure:
Each filter in the `filters` array should be a JSON string with the following structure:
```json
{
@@ -55,7 +44,6 @@ structure:
```
Supported operators:
- `<` - Less than
- `<=` - Less than or equal to
- `>` - Greater than
@@ -68,12 +56,10 @@ Supported operators:
- `not-in` - Field value is not in the specified array
Value types supported:
- String: `"value": "text"`
- Number: `"value": 123` or `"value": 45.67`
- Boolean: `"value": true` or `"value": false`
- Array: `"value": ["item1", "item2"]` (for `in`, `not-in`, `array-contains-any`
operators)
- Array: `"value": ["item1", "item2"]` (for `in`, `not-in`, `array-contains-any` operators)
### OrderBy Format
@@ -87,7 +73,6 @@ The `orderBy` parameter should be a JSON string with the following structure:
```
Direction values:
- `ASCENDING`
- `DESCENDING`
@@ -169,8 +154,7 @@ The tool returns an array of documents, where each document includes:
### Response with Query Analysis (analyzeQuery = true)
When `analyzeQuery` is set to true, the tool returns a single object containing
documents and explain metrics:
When `analyzeQuery` is set to true, the tool returns a single object containing documents and explain metrics:
```json
{
@@ -207,7 +191,6 @@ documents and explain metrics:
## Error Handling
The tool will return errors for:
- Invalid collection path
- Malformed filter JSON
- Unsupported operators

View File

@@ -1,18 +1,12 @@
---
title: "firestore-validate-rules"
type: docs
weight: 1
description: >
A "firestore-validate-rules" tool validates Firestore security rules syntax and semantic correctness without deploying them. It provides detailed error reporting with source positions and code snippets.
aliases:
- /resources/tools/firestore-validate-rules
title: firestore-validate-rules
weight: 6
date: 2025-01-07
---
## Overview
The `firestore-validate-rules` tool validates Firestore security rules syntax
and semantic correctness without deploying them. It provides detailed error
reporting with source positions and code snippets.
The `firestore-validate-rules` tool validates Firestore security rules syntax and semantic correctness without deploying them. It provides detailed error reporting with source positions and code snippets.
## Configuration
@@ -30,9 +24,9 @@ This tool requires authentication if the source requires authentication.
## Parameters
| **parameters** | **type** | **required** | **description** |
|-----------------|:------------:|:------------:|----------------------------------------------|
| source | string | true | The Firestore Rules source code to validate |
| Parameter | Type | Required | Description |
|-----------|--------|----------|-------------|
| source | string | Yes | The Firestore Rules source code to validate |
## Response
@@ -40,20 +34,20 @@ The tool returns a `ValidationResult` object containing:
```json
{
"valid": "boolean",
"issueCount": "number",
"formattedIssues": "string",
"rawIssues": [
"valid": boolean, // Whether the rules are valid
"issueCount": number, // Number of issues found
"formattedIssues": string, // Human-readable formatted issues
"rawIssues": [ // Array of raw issue objects
{
"sourcePosition": {
"fileName": "string",
"line": "number",
"column": "number",
"currentOffset": "number",
"endOffset": "number"
"fileName": string,
"line": number,
"column": number,
"currentOffset": number,
"endOffset": number
},
"description": "string",
"severity": "string"
"description": string,
"severity": string // e.g., "ERROR", "WARNING"
}
]
}
@@ -104,7 +98,6 @@ The tool returns a `ValidationResult` object containing:
## Error Handling
The tool will return errors for:
- Missing or empty `source` parameter
- API errors when calling the Firebase Rules service
- Network connectivity issues
@@ -118,7 +111,5 @@ The tool will return errors for:
## Related Tools
- [firestore-get-rules]({{< ref "firestore-get-rules" >}}): Retrieve current
active rules
- [firestore-query-collection]({{< ref "firestore-query-collection" >}}): Test
rules by querying collections
- [firestore-get-rules]({{< ref "firestore-get-rules" >}}): Retrieve current active rules
- [firestore-query-collection]({{< ref "firestore-query-collection" >}}): Test rules by querying collections

View File

@@ -19,7 +19,6 @@ It's compatible with the following sources:
- [looker](../sources/looker.md)
`looker-query` takes eight parameters:
1. the `model`
2. the `explore`
3. the `fields` list

View File

@@ -19,7 +19,6 @@ It's compatible with the following sources:
- [looker](../sources/looker.md)
`looker-query-sql` takes eight parameters:
1. the `model`
2. the `explore`
3. the `fields` list

View File

@@ -1,7 +0,0 @@
---
title: "MongoDB"
type: docs
weight: 1
description: >
Tools that work with the MongoDB Source.
---

View File

@@ -1,81 +0,0 @@
---
title: "mongodb-aggregate"
type: docs
weight: 1
description: >
A "mongodb-aggregate" tool executes a multi-stage aggregation pipeline against a MongoDB collection.
aliases:
- /resources/tools/mongodb-aggregate
---
## About
The `mongodb-aggregate` tool is the most powerful query tool for MongoDB,
allowing you to process data through a multi-stage pipeline. Each stage
transforms the documents as they pass through, enabling complex operations like
grouping, filtering, reshaping documents, and performing calculations.
The core of this tool is the `pipelinePayload`, which must be a string
containing a **JSON array of pipeline stage documents**. The tool returns a JSON
array of documents produced by the final stage of the pipeline.
A `readOnly` flag can be set to `true` as a safety measure to ensure the
pipeline does not contain any write stages (like `$out` or `$merge`).
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
## Example
Here is an example that calculates the average price and total count of products
for each category, but only for products with an "active" status.
```yaml
tools:
get_category_stats:
kind: mongodb-aggregate
source: my-mongo-source
description: Calculates average price and count of products, grouped by category.
database: ecommerce
collection: products
readOnly: true
pipelinePayload: |
[
{
"$match": {
"status": {{json .status_filter}}
}
},
{
"$group": {
"_id": "$category",
"average_price": { "$avg": "$price" },
"item_count": { "$sum": 1 }
}
},
{
"$sort": {
"average_price": -1
}
}
]
pipelineParams:
- name: status_filter
type: string
description: The product status to filter by (e.g., "active").
```
## Reference
| **field** | **type** | **required** | **description** |
|:----------------|:---------|:-------------|:---------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-aggregate`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection to run the aggregation on. |
| pipelinePayload | string | true | A JSON array of aggregation stage documents, provided as a string. Uses `{{json .param_name}}` for templating. |
| pipelineParams | list | true | A list of parameter objects that define the variables used in the `pipelinePayload`. |
| canonical | bool | false | Determines if the pipeline string is parsed using MongoDB's Canonical or Relaxed Extended JSON format. |
| readOnly | bool | false | If `true`, the tool will fail if the pipeline contains write stages (`$out` or `$merge`). Defaults to `false`. |

View File

@@ -1,57 +0,0 @@
---
title: "mongodb-delete-many"
type: docs
weight: 1
description: >
A "mongodb-delete-many" tool deletes all documents from a MongoDB collection that match a filter.
aliases:
- /resources/tools/mongodb-delete-many
---
## About
The `mongodb-delete-many` tool performs a **bulk destructive operation**,
deleting **ALL** documents from a collection that match a specified filter.
The tool returns the total count of documents that were deleted. If the filter
does not match any documents (i.e., the deleted count is 0), the tool will
return an error.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
---
## Example
Here is an example that performs a cleanup task by deleting all products from
the `inventory` collection that belong to a discontinued brand.
```yaml
tools:
retire_brand_products:
kind: mongodb-delete-many
source: my-mongo-source
description: Deletes all products from a specified discontinued brand.
database: ecommerce
collection: inventory
filterPayload: |
{ "brand_name": {{json .brand_to_delete}} }
filterParams:
- name: brand_to_delete
type: string
description: The name of the discontinued brand whose products should be deleted.
```
## Reference
| **field** | **type** | **required** | **description** |
|:--------------|:---------|:-------------|:--------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-delete-many`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection from which to delete documents. |
| filterPayload | string | true | The MongoDB query filter document to select the documents for deletion. Uses `{{json .param_name}}` for templating. |
| filterParams | list | true | A list of parameter objects that define the variables used in the `filterPayload`. |

View File

@@ -1,61 +0,0 @@
---
title: "mongodb-delete-one"
type: docs
weight: 1
description: >
A "mongodb-delete-one" tool deletes a single document from a MongoDB collection.
aliases:
- /resources/tools/mongodb-delete-one
---
## About
The `mongodb-delete-one` tool performs a destructive operation, deleting the
**first single document** that matches a specified filter from a MongoDB
collection.
If the filter matches multiple documents, only the first one found by the
database will be deleted. This tool is useful for removing specific entries,
such as a user account or a single item from an inventory based on a unique ID.
The tool returns the number of documents deleted, which will be either `1` if a
document was found and deleted, or `0` if no matching document was found.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
---
## Example
Here is an example that deletes a specific user account from the `users`
collection by matching their unique email address. This is a permanent action.
```yaml
tools:
delete_user_account:
kind: mongodb-delete-one
source: my-mongo-source
description: Permanently deletes a user account by their email address.
database: user_data
collection: users
filterPayload: |
{ "email": {{json .email_address}} }
filterParams:
- name: email_address
type: string
description: The email of the user account to delete.
```
## Reference
| **field** | **type** | **required** | **description** |
|:--------------|:---------|:-------------|:-------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-delete-one`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection from which to delete a document. |
| filterPayload | string | true | The MongoDB query filter document to select the document for deletion. Uses `{{json .param_name}}` for templating. |
| filterParams | list | true | A list of parameter objects that define the variables used in the `filterPayload`. |

View File

@@ -1,68 +0,0 @@
---
title: "mongodb-find-one"
type: docs
weight: 1
description: >
A "mongodb-find-one" tool finds and retrieves a single document from a MongoDB collection.
aliases:
- /resources/tools/mongodb-find-one
---
## About
A `mongodb-find-one` tool is used to retrieve the **first single document** that
matches a specified filter from a MongoDB collection. If multiple documents
match the filter, you can use `sort` options to control which document is
returned. Otherwise, the selection is not guaranteed.
The tool returns a single JSON object representing the document, wrapped in a
JSON array.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
---
## Example
Here's a common use case: finding a specific user by their unique email address
and returning their profile information, while excluding sensitive fields like
the password hash.
```yaml
tools:
get_user_profile:
kind: mongodb-find-one
source: my-mongo-source
description: Retrieves a user's profile by their email address.
database: user_data
collection: profiles
filterPayload: |
{ "email": {{json .email}} }
filterParams:
- name: email
type: string
description: The email address of the user to find.
projectPayload: |
{
"password_hash": 0,
"login_history": 0
}
```
## Reference
| **field** | **type** | **required** | **description** |
|:---------------|:---------|:-------------|:---------------------------------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-find-one`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database to query. |
| collection | string | true | The name of the MongoDB collection to query. |
| filterPayload | string | true | The MongoDB query filter document to select the document. Uses `{{json .param_name}}` for templating. |
| filterParams | list | true | A list of parameter objects that define the variables used in the `filterPayload`. |
| projectPayload | string | false | An optional MongoDB projection document to specify which fields to include (1) or exclude (0) in the result. |
| projectParams | list | false | A list of parameter objects for the `projectPayload`. |
| sortPayload | string | false | An optional MongoDB sort document. Useful for selecting which document to return if the filter matches multiple (e.g., get the most recent). |
| sortParams | list | false | A list of parameter objects for the `sortPayload`. |

View File

@@ -1,76 +0,0 @@
---
title: "mongodb-find"
type: docs
weight: 1
description: >
A "mongodb-find" tool finds and retrieves documents from a MongoDB collection.
aliases:
- /resources/tools/mongodb-find
---
## About
A `mongodb-find` tool is used to query a MongoDB collection and retrieve
documents that match a specified filter. It's a flexible tool that allows you to
shape the output by selecting specific fields (**projection**), ordering the
results (**sorting**), and restricting the number of documents returned
(**limiting**).
The tool returns a JSON array of the documents found.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
## Example
Here's an example that finds up to 10 users from the `customers` collection who
live in a specific city. The results are sorted by their last name, and only
their first name, last name, and email are returned.
```yaml
tools:
find_local_customers:
kind: mongodb-find
source: my-mongo-source
description: Finds customers by city, sorted by last name.
database: crm
collection: customers
limit: 10
filterPayload: |
{ "address.city": {{json .city}} }
filterParams:
- name: city
type: string
description: The city to search for customers in.
projectPayload: |
{
"first_name": 1,
"last_name": 1,
"email": 1,
"_id": 0
}
sortPayload: |
{ "last_name": {{json .sort_order}} }
sortParams:
- name: sort_order
type: integer
description: The sort order (1 for ascending, -1 for descending).
```
## Reference
| **field** | **type** | **required** | **description** |
|:---------------|:---------|:-------------|:----------------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-find`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database to query. |
| collection | string | true | The name of the MongoDB collection to query. |
| filterPayload | string | true | The MongoDB query filter document to select which documents to return. Uses `{{json .param_name}}` for templating. |
| filterParams | list | true | A list of parameter objects that define the variables used in the `filterPayload`. |
| projectPayload | string | false | An optional MongoDB projection document to specify which fields to include (1) or exclude (0) in the results. |
| projectParams | list | false | A list of parameter objects for the `projectPayload`. |
| sortPayload | string | false | An optional MongoDB sort document to define the order of the returned documents. Use 1 for ascending and -1 for descending. |
| sortParams | list | false | A list of parameter objects for the `sortPayload`. |
| limit | integer | false | An optional integer specifying the maximum number of documents to return. |

View File

@@ -1,58 +0,0 @@
---
title: "mongodb-insert-many"
type: docs
weight: 1
description: >
A "mongodb-insert-many" tool inserts multiple new documents into a MongoDB collection.
aliases:
- /resources/tools/mongodb-insert-many
---
## About
The `mongodb-insert-many` tool inserts **multiple new documents** into a
specified MongoDB collection in a single bulk operation. This is highly
efficient for adding large amounts of data at once.
This tool takes one required parameter named `data`. This `data` parameter must
be a string containing a **JSON array of document objects**. Upon successful
insertion, the tool returns a JSON array containing the unique `_id` of **each**
new document that was created.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
---
## Example
Here is an example configuration for a tool that logs multiple events at once.
```yaml
tools:
log_batch_events:
kind: mongodb-insert-many
source: my-mongo-source
description: Inserts a batch of event logs into the database.
database: logging
collection: events
canonical: true
```
An LLM would call this tool by providing an array of documents as a JSON string
in the `data` parameter, like this:
`tool_code: log_batch_events(data='[{"event": "login", "user": "user1"}, {"event": "click", "user": "user2"}, {"event": "logout", "user": "user1"}]')`
---
## Reference
| **field** | **type** | **required** | **description** |
|:------------|:---------|:-------------|:---------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-insert-many`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection into which the documents will be inserted. |
| canonical | bool | true | Determines if the data string is parsed using MongoDB's Canonical or Relaxed Extended JSON format. |

View File

@@ -1,53 +0,0 @@
---
title: "mongodb-insert-one"
type: docs
weight: 1
description: >
A "mongodb-insert-one" tool inserts a single new document into a MongoDB collection.
aliases:
- /resources/tools/mongodb-insert-one
---
## About
The `mongodb-insert-one` tool inserts a **single new document** into a specified
MongoDB collection.
This tool takes one required parameter named `data`, which must be a string
containing the JSON object you want to insert. Upon successful insertion, the
tool returns the unique `_id` of the newly created document.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
## Example
Here is an example configuration for a tool that adds a new user to a `users`
collection.
```yaml
tools:
create_new_user:
kind: mongodb-insert-one
source: my-mongo-source
description: Creates a new user record in the database.
database: user_data
collection: users
canonical: false
```
An LLM would call this tool by providing the document as a JSON string in the
`data` parameter, like this:
`tool_code: create_new_user(data='{"email": "new.user@example.com", "name": "Jane Doe", "status": "active"}')`
## Reference
| **field** | **type** | **required** | **description** |
|:------------|:---------|:-------------|:---------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-insert-one`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection into which the document will be inserted. |
| canonical | bool | true | Determines if the data string is parsed using MongoDB's Canonical or Relaxed Extended JSON format. |

View File

@@ -1,72 +0,0 @@
---
title: "mongodb-update-many"
type: docs
weight: 1
description: >
A "mongodb-update-many" tool updates all documents in a MongoDB collection that match a filter.
aliases:
- /resources/tools/mongodb-update-many
---
## About
A `mongodb-update-many` tool updates **all** documents within a specified
MongoDB collection that match a given filter. It locates the documents using a
`filterPayload` and applies the modifications defined in an `updatePayload`.
The tool returns an array of three integers: `[ModifiedCount, UpsertedCount,
MatchedCount]`.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
---
## Example
Here's an example configuration. This tool applies a discount to all items
within a specific category and also marks them as being on sale.
```yaml
tools:
apply_category_discount:
kind: mongodb-update-many
source: my-mongo-source
description: Use this tool to apply a discount to all items in a given category.
database: products
collection: inventory
filterPayload: |
{ "category": {{json .category_name}} }
filterParams:
- name: category_name
type: string
description: The category of items to update.
updatePayload: |
{
"$mul": { "price": {{json .discount_multiplier}} },
"$set": { "on_sale": true }
}
updateParams:
- name: discount_multiplier
type: number
description: The multiplier to apply to the price (e.g., 0.8 for a 20% discount).
canonical: false
upsert: false
```
## Reference
| **field** | **type** | **required** | **description** |
|:--------------|:---------|:-------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-update-many`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection in which to update documents. |
| filterPayload | string | true | The MongoDB query filter document to select the documents for updating. It's written as a Go template, using `{{json .param_name}}` to insert parameters. |
| filterParams | list | true | A list of parameter objects that define the variables used in the `filterPayload`. |
| updatePayload | string | true | The MongoDB update document, It's written as a Go template, using `{{json .param_name}}` to insert parameters. |
| updateParams | list | true | A list of parameter objects that define the variables used in the `updatePayload`. |
| canonical | bool | true | Determines if the `filterPayload` and `updatePayload` strings are parsed using MongoDB's Canonical or Relaxed Extended JSON format. **Canonical** is stricter about type representation, while **Relaxed** is more lenient. |
| upsert | bool | false | If `true`, a new document is created if no document matches the `filterPayload`. Defaults to `false`. |

View File

@@ -1,72 +0,0 @@
---
title: "mongodb-update-one"
type: docs
weight: 1
description: >
A "mongodb-update-one" tool updates a single document in a MongoDB collection.
aliases:
- /resources/tools/mongodb-update-one
---
## About
A `mongodb-update-one` tool updates a single document within a specified MongoDB
collection. It locates the document to be updated using a `filterPayload` and
applies modifications defined in an `updatePayload`. If the filter matches
multiple documents, only the first one found will be updated.
This tool is compatible with the following source kind:
* [`mongodb`](../../sources/mongodb.md)
---
## Example
Here's an example of a `mongodb-update-one` tool configuration. This tool
updates the `stock` and `status` fields of a document in the `inventory`
collection where the `item` field matches a provided value. If no matching
document is found, the `upsert: true` option will create a new one.
```yaml
tools:
update_inventory_item:
kind: mongodb-update-one
source: my-mongo-source
description: Use this tool to update an item's stock and status in the inventory.
database: products
collection: inventory
filterPayload: |
{ "item": {{json .item_name}} }
filterParams:
- name: item_name
type: string
description: The name of the item to update.
updatePayload: |
{ "$set": { "stock": {{json .new_stock}}, "status": {{json .new_status}} } }
updateParams:
- name: new_stock
type: integer
description: The new stock quantity.
- name: new_status
type: string
description: The new status of the item (e.g., "In Stock", "Backordered").
canonical: false
upsert: true
```
## Reference
| **field** | **type** | **required** | **description** |
|:--------------|:---------|:-------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `mongodb-update-one`. |
| source | string | true | The name of the `mongodb` source to use. |
| description | string | true | A description of the tool that is passed to the LLM. |
| database | string | true | The name of the MongoDB database containing the collection. |
| collection | string | true | The name of the MongoDB collection to update a document in. |
| filterPayload | string | true | The MongoDB query filter document to select the document for updating. It's written as a Go template, using `{{json .param_name}}` to insert parameters. |
| filterParams | list | true | A list of parameter objects that define the variables used in the `filterPayload`. |
| updatePayload | string | true | The MongoDB update document, which specifies the modifications. This often uses update operators like `$set`. It's written as a Go template, using `{{json .param_name}}` to insert parameters. |
| updateParams | list | true | A list of parameter objects that define the variables used in the `updatePayload`. |
| canonical | bool | true | Determines if the `updatePayload` string is parsed using MongoDB's Canonical or Relaxed Extended JSON format. **Canonical** is stricter about type representation (e.g., `{"$numberInt": "42"}`), while **Relaxed** is more lenient (e.g., `42`). |
| upsert | bool | false | If `true`, a new document is created if no document matches the `filterPayload`. Defaults to `false`. |

View File

@@ -11,24 +11,15 @@ aliases:
## About
A `neo4j-execute-cypher` tool executes an arbitrary Cypher query provided as a
string parameter against a Neo4j database. It's designed to be a flexible tool
for interacting with the database when a pre-defined query is not sufficient.
This tool is compatible with any of the following sources:
A `neo4j-execute-cypher` tool executes an arbitrary Cypher query provided as a string parameter against a Neo4j database. It's designed to be a flexible tool for interacting with the database when a pre-defined query is not sufficient. This tool is compatible with any of the following sources:
- [neo4j](../sources/neo4j.md)
For security, the tool can be configured to be read-only. If the `readOnly` flag
is set to `true`, the tool will analyze the incoming Cypher query and reject any
write operations (like `CREATE`, `MERGE`, `DELETE`, etc.) before execution.
For security, the tool can be configured to be read-only. If the `readOnly` flag is set to `true`, the tool will analyze the incoming Cypher query and reject any write operations (like `CREATE`, `MERGE`, `DELETE`, etc.) before execution.
The Cypher query uses standard [Neo4j
Cypher](https://neo4j.com/docs/cypher-manual/current/queries/) syntax and
supports all Cypher features, including pattern matching, filtering, and
aggregation.
The Cypher query uses standard [Neo4j Cypher](https://neo4j.com/docs/cypher-manual/current/queries/) syntax and supports all Cypher features, including pattern matching, filtering, and aggregation.
`neo4j-execute-cypher` takes one input parameter `cypher` and run the cypher
query against the `source`.
`neo4j-execute-cypher` takes one input parameter `cypher` and run the cypher query against the `source`.
> **Note:** This tool is intended for developer assistant workflows with
> human-in-the-loop and shouldn't be used for production agents.
@@ -59,3 +50,4 @@ tools:
| source | string | true | Name of the source the Cypher query should execute on. |
| description | string | true | Description of the tool that is passed to the LLM. |
| readOnly | boolean | false | If set to `true`, the tool will reject any write operations in the Cypher query. Default is `false`. |

View File

@@ -1,42 +0,0 @@
---
title: "neo4j-schema"
type: "docs"
weight: 1
description: >
A "neo4j-schema" tool extracts a comprehensive schema from a Neo4j
database.
aliases:
- /resources/tools/neo4j-schema
---
## About
A `neo4j-schema` tool connects to a Neo4j database and extracts its complete schema information. It runs multiple queries concurrently to efficiently gather details about node labels, relationships, properties, constraints, and indexes.
The tool automatically detects if the APOC (Awesome Procedures on Cypher) library is available. If so, it uses APOC procedures like `apoc.meta.schema` for a highly detailed overview of the database structure; otherwise, it falls back to using native Cypher queries.
The extracted schema is **cached** to improve performance for subsequent requests. The output is a structured JSON object containing all the schema details, which can be invaluable for providing database context to an LLM. This tool is compatible with a `neo4j` source and takes no parameters.
## Example
```yaml
tools:
get_movie_db_schema:
kind: neo4j-schema
source: my-neo4j-movies-instance
description: |
Use this tool to get the full schema of the movie database.
This provides information on all available node labels (like Movie, Person),
relationships (like ACTED_IN), and the properties on each.
This tool takes no parameters.
# Optional configuration to cache the schema for 2 hours
cacheExpireMinutes: 120
```
## Reference
| **field** | **type** | **required** | **description** |
|---------------------|:----------:|:------------:|-------------------------------------------------------------------------------------------------|
| kind | string | true | Must be `neo4j-db-schema`. |
| source | string | true | Name of the source the schema should be extracted from. |
| description | string | true | Description of the tool that is passed to the LLM. |
| cacheExpireMinutes | integer | false | Cache expiration time in minutes. Defaults to 60. |

View File

@@ -28,8 +28,7 @@ inserted according to their name: e.g. `@name`.
> Parameters cannot be used as substitutes for identifiers, column names, table
> names, or other parts of the query.
[gsql-dml]:
https://cloud.google.com/spanner/docs/reference/standard-sql/dml-syntax
[gsql-dml]: https://cloud.google.com/spanner/docs/reference/standard-sql/dml-syntax
### PostgreSQL

View File

@@ -1,7 +0,0 @@
---
title: "Utility tools"
type: docs
weight: 1
description: >
Tools that provide utility.
---

View File

@@ -6,14 +6,10 @@ description: >
Wait for a long-running AlloyDB operation to complete.
---
The `alloydb-wait-for-operation` tool is a utility tool that waits for a
long-running AlloyDB operation to complete. It does this by polling the AlloyDB
Admin API operation status endpoint until the operation is finished, using
exponential backoff.
The `alloydb-wait-for-operation` tool is a utility tool that waits for a long-running AlloyDB operation to complete. It does this by polling the AlloyDB Admin API operation status endpoint until the operation is finished, using exponential backoff.
{{< notice info >}}
This tool is intended for developer assistant workflows with human-in-the-loop
and shouldn't be used for production agents.
This tool is intended for developer assistant workflows with human-in-the-loop and shouldn't be used for production agents.
{{< /notice >}}
## Example
@@ -44,7 +40,7 @@ tools:
| ----------- | :------: | :----------: | ---------------------------------------------------------------------------------------------------------------- |
| kind | string | true | Must be "alloydb-wait-for-operation". |
| source | string | true | Name of the source the HTTP request should be sent to. |
| description | string | true | A description of the tool. |
| description | string | true | A description of the tool. |
| delay | duration | false | The initial delay between polling requests (e.g., `3s`). Defaults to 3 seconds. |
| maxDelay | duration | false | The maximum delay between polling requests (e.g., `4m`). Defaults to 4 minutes. |
| multiplier | float | false | The multiplier for the polling delay. The delay is multiplied by this value after each request. Defaults to 2.0. |

View File

@@ -10,16 +10,13 @@ aliases:
## About
A `wait` tool pauses execution for a specified duration. This can be useful in
workflows where a delay is needed between steps.
A `wait` tool pauses execution for a specified duration. This can be useful in workflows where a delay is needed between steps.
`wait` takes one input parameter `duration` which is a string representing the
time to wait (e.g., "10s", "2m", "1h").
`wait` takes one input parameter `duration` which is a string representing the time to wait (e.g., "10s", "2m", "1h").
{{< notice info >}}
This tool is intended for developer assistant workflows with human-in-the-loop
and shouldn't be used for production agents.
{{< /notice >}}
{{% notice info %}}
This tool is intended for developer assistant workflows with human-in-the-loop and shouldn't be used for production agents.
{{% /notice %}}
## Example
@@ -33,8 +30,8 @@ tools:
## Reference
| **field** | **type** | **required** | **description** |
|-------------|:--------------:|:------------:|-------------------------------------------------------|
| kind | string | true | Must be "wait". |
| description | string | true | Description of the tool that is passed to the LLM. |
| timeout | string | true | The default duration the tool can wait for. |
| **field** | **type** | **required** | **description** |
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
| kind | string | true | Must be "wait". |
| description | string | true | Description of the tool that is passed to the LLM. |
| timeout | string | true | The default duration the tool can wait for. |

View File

@@ -220,7 +220,7 @@
},
"outputs": [],
"source": [
"version = \"0.10.0\" # x-release-please-version\n",
"version = \"0.9.0\" # x-release-please-version\n",
"! curl -O https://storage.googleapis.com/genai-toolbox/v{version}/linux/amd64/toolbox\n",
"\n",
"# Make the binary executable\n",

View File

@@ -179,7 +179,7 @@ to use BigQuery, and then run the Toolbox server.
<!-- {x-release-please-start-version} -->
```bash
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/$OS/toolbox
curl -O https://storage.googleapis.com/genai-toolbox/v0.9.0/$OS/toolbox
```
<!-- {x-release-please-end} -->
@@ -292,10 +292,8 @@ to use BigQuery, and then run the Toolbox server.
```bash
./toolbox --tools-file "tools.yaml"
```
{{< notice note >}}
Toolbox enables dynamic reloading by default. To disable, use the
`--disable-reload` flag.
Toolbox enables dynamic reloading by default. To disable, use the `--disable-reload` flag.
{{< /notice >}}
## Step 3: Connect your agent to Toolbox

View File

@@ -98,7 +98,7 @@ In this section, we will download Toolbox, configure our tools in a
<!-- {x-release-please-start-version} -->
```bash
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
curl -O https://storage.googleapis.com/genai-toolbox/v0.10.0/$OS/toolbox
curl -O https://storage.googleapis.com/genai-toolbox/v0.9.0/$OS/toolbox
```
<!-- {x-release-please-end} -->
@@ -208,8 +208,7 @@ In this section, we will download Toolbox, configure our tools in a
1. Type `y` when it asks to install the inspector package.
1. It should show the following when the MCP Inspector is up and running (please
take note of `<YOUR_SESSION_TOKEN>`):
1. It should show the following when the MCP Inspector is up and running (please take note of `<YOUR_SESSION_TOKEN>`):
```bash
Starting MCP inspector...
@@ -227,8 +226,7 @@ In this section, we will download Toolbox, configure our tools in a
1. For `URL`, type in `http://127.0.0.1:5000/mcp`.
1. For `Configuration` -> `Proxy Session Token`, make sure
`<YOUR_SESSION_TOKEN>` is present.
1. For `Configuration` -> `Proxy Session Token`, make sure `<YOUR_SESSION_TOKEN>` is present.
1. Click Connect.
@@ -238,4 +236,4 @@ In this section, we will download Toolbox, configure our tools in a
![inspector_tools](./inspector_tools.png)
1. Test out your tools here!
1. Test out your tools here!

View File

@@ -100,7 +100,6 @@ In this section, we will download Toolbox and run the Toolbox server.
- looker-toolbox__get_dimensions
- looker-toolbox__run_look
```
1. Start exploring your Looker instance with commands like
`Find an explore to see orders` or `show me my current
inventory broken down by item category`.

1
go.mod
View File

@@ -9,7 +9,6 @@ require (
cloud.google.com/go/bigquery v1.69.0
cloud.google.com/go/bigtable v1.38.0
cloud.google.com/go/cloudsqlconn v1.17.3
cloud.google.com/go/dataplex v1.26.0
cloud.google.com/go/firestore v1.18.0
cloud.google.com/go/spanner v1.83.0
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0

2
go.sum
View File

@@ -236,8 +236,6 @@ cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX
cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
cloud.google.com/go/dataplex v1.26.0 h1:nu8/KrLR5v62L1lApGNgm61Oq+xaa2bS9rgc1csjqE0=
cloud.google.com/go/dataplex v1.26.0/go.mod h1:12R9nlLUzxOscbb2HgoYnkGNibmv4sXEVMXxrdw2a90=
cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=

View File

@@ -29,11 +29,8 @@ func TestLoadPrebuiltToolYAMLs(t *testing.T) {
"cloud-sql-mssql",
"cloud-sql-mysql",
"cloud-sql-postgres",
"dataplex",
"firestore",
"looker",
"mssql",
"mysql",
"postgres",
"spanner-postgres",
"spanner",
@@ -75,10 +72,7 @@ func TestGetPrebuiltTool(t *testing.T) {
cloudsqlpg_config, _ := Get("cloud-sql-postgres")
cloudsqlmysql_config, _ := Get("cloud-sql-mysql")
cloudsqlmssql_config, _ := Get("cloud-sql-mssql")
dataplex_config, _ := Get("dataplex")
firestoreconfig, _ := Get("firestore")
mysql_config, _ := Get("mysql")
mssql_config, _ := Get("mssql")
postgresconfig, _ := Get("postgres")
spanner_config, _ := Get("spanner")
spannerpg_config, _ := Get("spanner-postgres")
@@ -100,18 +94,9 @@ func TestGetPrebuiltTool(t *testing.T) {
if len(cloudsqlmssql_config) <= 0 {
t.Fatalf("unexpected error: could not fetch cloud sql mssql prebuilt tools yaml")
}
if len(dataplex_config) <= 0 {
t.Fatalf("unexpected error: could not fetch dataplex prebuilt tools yaml")
}
if len(firestoreconfig) <= 0 {
t.Fatalf("unexpected error: could not fetch firestore prebuilt tools yaml")
}
if len(mysql_config) <= 0 {
t.Fatalf("unexpected error: could not fetch mysql prebuilt tools yaml")
}
if len(mssql_config) <= 0 {
t.Fatalf("unexpected error: could not fetch mssql prebuilt tools yaml")
}
if len(postgresconfig) <= 0 {
t.Fatalf("unexpected error: could not fetch postgres prebuilt tools yaml")
}

View File

@@ -1,15 +0,0 @@
sources:
dataplex-source:
kind: "dataplex"
project: ${DATAPLEX_PROJECT}
tools:
dataplex_search_entries:
kind: dataplex-search-entries
source: dataplex-source
description: |
Use this tool to search for entries in Dataplex Catalog that represent data assets (e.g. tables, views, models) based on the provided search query.
toolsets:
dataplex-tools:
- dataplex_search_entries

View File

@@ -1,269 +0,0 @@
sources:
mssql-source:
kind: mssql
host: ${MSSQL_HOST}
port: ${MSSQL_PORT}
database: ${MSSQL_DATABASE}
user: ${MSSQL_USER}
password: ${MSSQL_PASSWORD}
tools:
execute_sql:
kind: mssql-execute-sql
source: mssql-source
description: Use this tool to execute SQL.
list_tables:
kind: mssql-sql
source: mssql-source
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
statement: |
WITH table_info AS (
SELECT
t.object_id AS table_oid,
s.name AS schema_name,
t.name AS table_name,
dp.name AS table_owner, -- Schema's owner principal name
CAST(ep.value AS NVARCHAR(MAX)) AS table_comment, -- Cast for JSON compatibility
CASE
WHEN EXISTS ( -- Check if the table has more than one partition for any of its indexes or heap
SELECT 1 FROM sys.partitions p
WHERE p.object_id = t.object_id AND p.partition_number > 1
) THEN 'PARTITIONED TABLE'
ELSE 'TABLE'
END AS object_type_detail
FROM
sys.tables t
INNER JOIN
sys.schemas s ON t.schema_id = s.schema_id
LEFT JOIN
sys.database_principals dp ON s.principal_id = dp.principal_id
LEFT JOIN
sys.extended_properties ep ON ep.major_id = t.object_id AND ep.minor_id = 0 AND ep.class = 1 AND ep.name = 'MS_Description'
WHERE
t.type = 'U' -- User tables
AND s.name NOT IN ('sys', 'INFORMATION_SCHEMA', 'guest', 'db_owner', 'db_accessadmin', 'db_backupoperator', 'db_datareader', 'db_datawriter', 'db_ddladmin', 'db_denydatareader', 'db_denydatawriter', 'db_securityadmin')
AND (@table_names IS NULL OR LTRIM(RTRIM(@table_names)) = '' OR t.name IN (SELECT LTRIM(RTRIM(value)) FROM STRING_SPLIT(@table_names, ',')))
),
columns_info AS (
SELECT
c.object_id AS table_oid,
c.name AS column_name,
CONCAT(
UPPER(TY.name), -- Base type name
CASE
WHEN TY.name IN ('char', 'varchar', 'nchar', 'nvarchar', 'binary', 'varbinary') THEN
CONCAT('(', IIF(c.max_length = -1, 'MAX', CAST(c.max_length / CASE WHEN TY.name IN ('nchar', 'nvarchar') THEN 2 ELSE 1 END AS VARCHAR(10))), ')')
WHEN TY.name IN ('decimal', 'numeric') THEN
CONCAT('(', c.precision, ',', c.scale, ')')
WHEN TY.name IN ('datetime2', 'datetimeoffset', 'time') THEN
CONCAT('(', c.scale, ')')
ELSE ''
END
) AS data_type,
c.column_id AS column_ordinal_position,
IIF(c.is_nullable = 0, CAST(1 AS BIT), CAST(0 AS BIT)) AS is_not_nullable,
dc.definition AS column_default,
CAST(epc.value AS NVARCHAR(MAX)) AS column_comment
FROM
sys.columns c
JOIN
table_info ti ON c.object_id = ti.table_oid
JOIN
sys.types TY ON c.user_type_id = TY.user_type_id AND TY.is_user_defined = 0 -- Ensure we get base types
LEFT JOIN
sys.default_constraints dc ON c.object_id = dc.parent_object_id AND c.column_id = dc.parent_column_id
LEFT JOIN
sys.extended_properties epc ON epc.major_id = c.object_id AND epc.minor_id = c.column_id AND epc.class = 1 AND epc.name = 'MS_Description'
),
constraints_info AS (
-- Primary Keys & Unique Constraints
SELECT
kc.parent_object_id AS table_oid,
kc.name AS constraint_name,
REPLACE(kc.type_desc, '_CONSTRAINT', '') AS constraint_type, -- 'PRIMARY_KEY', 'UNIQUE'
STUFF((SELECT ', ' + col.name
FROM sys.index_columns ic
JOIN sys.columns col ON ic.object_id = col.object_id AND ic.column_id = col.column_id
WHERE ic.object_id = kc.parent_object_id AND ic.index_id = kc.unique_index_id
ORDER BY ic.key_ordinal
FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS constraint_columns,
NULL AS foreign_key_referenced_table,
NULL AS foreign_key_referenced_columns,
CASE kc.type
WHEN 'PK' THEN 'PRIMARY KEY (' + STUFF((SELECT ', ' + col.name FROM sys.index_columns ic JOIN sys.columns col ON ic.object_id = col.object_id AND ic.column_id = col.column_id WHERE ic.object_id = kc.parent_object_id AND ic.index_id = kc.unique_index_id ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') + ')'
WHEN 'UQ' THEN 'UNIQUE (' + STUFF((SELECT ', ' + col.name FROM sys.index_columns ic JOIN sys.columns col ON ic.object_id = col.object_id AND ic.column_id = col.column_id WHERE ic.object_id = kc.parent_object_id AND ic.index_id = kc.unique_index_id ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') + ')'
END AS constraint_definition
FROM sys.key_constraints kc
JOIN table_info ti ON kc.parent_object_id = ti.table_oid
UNION ALL
-- Foreign Keys
SELECT
fk.parent_object_id AS table_oid,
fk.name AS constraint_name,
'FOREIGN KEY' AS constraint_type,
STUFF((SELECT ', ' + pc.name
FROM sys.foreign_key_columns fkc
JOIN sys.columns pc ON fkc.parent_object_id = pc.object_id AND fkc.parent_column_id = pc.column_id
WHERE fkc.constraint_object_id = fk.object_id
ORDER BY fkc.constraint_column_id
FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS constraint_columns,
SCHEMA_NAME(rt.schema_id) + '.' + OBJECT_NAME(fk.referenced_object_id) AS foreign_key_referenced_table,
STUFF((SELECT ', ' + rc.name
FROM sys.foreign_key_columns fkc
JOIN sys.columns rc ON fkc.referenced_object_id = rc.object_id AND fkc.referenced_column_id = rc.column_id
WHERE fkc.constraint_object_id = fk.object_id
ORDER BY fkc.constraint_column_id
FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS foreign_key_referenced_columns,
OBJECT_DEFINITION(fk.object_id) AS constraint_definition
FROM sys.foreign_keys fk
JOIN sys.tables rt ON fk.referenced_object_id = rt.object_id
JOIN table_info ti ON fk.parent_object_id = ti.table_oid
UNION ALL
-- Check Constraints
SELECT
cc.parent_object_id AS table_oid,
cc.name AS constraint_name,
'CHECK' AS constraint_type,
NULL AS constraint_columns, -- Definition includes column context
NULL AS foreign_key_referenced_table,
NULL AS foreign_key_referenced_columns,
cc.definition AS constraint_definition
FROM sys.check_constraints cc
JOIN table_info ti ON cc.parent_object_id = ti.table_oid
),
indexes_info AS (
SELECT
i.object_id AS table_oid,
i.name AS index_name,
i.type_desc AS index_method, -- CLUSTERED, NONCLUSTERED, XML, etc.
i.is_unique,
i.is_primary_key AS is_primary,
STUFF((SELECT ', ' + c.name
FROM sys.index_columns ic
JOIN sys.columns c ON i.object_id = c.object_id AND ic.column_id = c.column_id
WHERE ic.object_id = i.object_id AND ic.index_id = i.index_id AND ic.is_included_column = 0
ORDER BY ic.key_ordinal
FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS index_columns,
(
'COLUMNS: (' + ISNULL(STUFF((SELECT ', ' + c.name + CASE WHEN ic.is_descending_key = 1 THEN ' DESC' ELSE '' END
FROM sys.index_columns ic
JOIN sys.columns c ON i.object_id = c.object_id AND ic.column_id = c.column_id
WHERE ic.object_id = i.object_id AND ic.index_id = i.index_id AND ic.is_included_column = 0
ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, ''), 'N/A') + ')' +
ISNULL(CHAR(13)+CHAR(10) + 'INCLUDE: (' + STUFF((SELECT ', ' + c.name
FROM sys.index_columns ic
JOIN sys.columns c ON i.object_id = c.object_id AND ic.column_id = c.column_id
WHERE ic.object_id = i.object_id AND ic.index_id = i.index_id AND ic.is_included_column = 1
ORDER BY ic.index_column_id FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') + ')', '') +
ISNULL(CHAR(13)+CHAR(10) + 'FILTER: (' + i.filter_definition + ')', '')
) AS index_definition_details
FROM
sys.indexes i
JOIN
table_info ti ON i.object_id = ti.table_oid
WHERE i.type <> 0 -- Exclude Heaps
AND i.name IS NOT NULL -- Exclude unnamed heap indexes; named indexes (PKs are often named) are preferred.
),
triggers_info AS (
SELECT
tr.parent_id AS table_oid,
tr.name AS trigger_name,
OBJECT_DEFINITION(tr.object_id) AS trigger_definition,
CASE tr.is_disabled WHEN 0 THEN 'ENABLED' ELSE 'DISABLED' END AS trigger_enabled_state
FROM
sys.triggers tr
JOIN
table_info ti ON tr.parent_id = ti.table_oid
WHERE
tr.is_ms_shipped = 0
AND tr.parent_class_desc = 'OBJECT_OR_COLUMN' -- DML Triggers on tables/views
)
SELECT
ti.schema_name,
ti.table_name AS object_name,
(
SELECT
ti.schema_name AS schema_name,
ti.table_name AS object_name,
ti.object_type_detail AS object_type,
ti.table_owner AS owner,
ti.table_comment AS comment,
JSON_QUERY(ISNULL((
SELECT
ci.column_name,
ci.data_type,
ci.column_ordinal_position,
ci.is_not_nullable,
ci.column_default,
ci.column_comment
FROM columns_info ci
WHERE ci.table_oid = ti.table_oid
ORDER BY ci.column_ordinal_position
FOR JSON PATH
), '[]')) AS columns,
JSON_QUERY(ISNULL((
SELECT
cons.constraint_name,
cons.constraint_type,
cons.constraint_definition,
JSON_QUERY(
CASE
WHEN cons.constraint_columns IS NOT NULL AND LTRIM(RTRIM(cons.constraint_columns)) <> ''
THEN '[' + (SELECT STRING_AGG('"' + LTRIM(RTRIM(value)) + '"', ',') FROM STRING_SPLIT(cons.constraint_columns, ',')) + ']'
ELSE '[]'
END
) AS constraint_columns,
cons.foreign_key_referenced_table,
JSON_QUERY(
CASE
WHEN cons.foreign_key_referenced_columns IS NOT NULL AND LTRIM(RTRIM(cons.foreign_key_referenced_columns)) <> ''
THEN '[' + (SELECT STRING_AGG('"' + LTRIM(RTRIM(value)) + '"', ',') FROM STRING_SPLIT(cons.foreign_key_referenced_columns, ',')) + ']'
ELSE '[]'
END
) AS foreign_key_referenced_columns
FROM constraints_info cons
WHERE cons.table_oid = ti.table_oid
FOR JSON PATH
), '[]')) AS constraints,
JSON_QUERY(ISNULL((
SELECT
ii.index_name,
ii.index_definition_details AS index_definition,
ii.is_unique,
ii.is_primary,
ii.index_method,
JSON_QUERY(
CASE
WHEN ii.index_columns IS NOT NULL AND LTRIM(RTRIM(ii.index_columns)) <> ''
THEN '[' + (SELECT STRING_AGG('"' + LTRIM(RTRIM(value)) + '"', ',') FROM STRING_SPLIT(ii.index_columns, ',')) + ']'
ELSE '[]'
END
) AS index_columns
FROM indexes_info ii
WHERE ii.table_oid = ti.table_oid
FOR JSON PATH
), '[]')) AS indexes,
JSON_QUERY(ISNULL((
SELECT
tri.trigger_name,
tri.trigger_definition,
tri.trigger_enabled_state
FROM triggers_info tri
WHERE tri.table_oid = ti.table_oid
FOR JSON PATH
), '[]')) AS triggers
FOR JSON PATH, WITHOUT_ARRAY_WRAPPER -- Creates a single JSON object for this table's details
) AS object_details
FROM
table_info ti
ORDER BY
ti.schema_name, ti.table_name;
parameters:
- name: table_names
type: string
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
toolsets:
mssql-database-tools:
- execute_sql
- list_tables

View File

@@ -1,172 +0,0 @@
sources:
mysql-source:
kind: mysql
host: ${MYSQL_HOST}
port: ${MYSQL_PORT}
database: ${MYSQL_DATABASE}
user: ${MYSQL_USER}
password: ${MYSQL_PASSWORD}
queryTimeout: 30s # Optional
tools:
execute_sql:
kind: mysql-execute-sql
source: mysql-source
description: Use this tool to execute SQL.
list_tables:
kind: mysql-sql
source: mysql-source
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
statement: |
SELECT
T.TABLE_SCHEMA AS schema_name,
T.TABLE_NAME AS object_name,
CONVERT( JSON_OBJECT(
'schema_name', T.TABLE_SCHEMA,
'object_name', T.TABLE_NAME,
'object_type', 'TABLE',
'owner', (
SELECT
IFNULL(U.GRANTEE, 'N/A')
FROM
INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
WHERE
U.TABLE_SCHEMA = T.TABLE_SCHEMA
LIMIT 1
),
'comment', IFNULL(T.TABLE_COMMENT, ''),
'columns', (
SELECT
IFNULL(
JSON_ARRAYAGG(
JSON_OBJECT(
'column_name', C.COLUMN_NAME,
'data_type', C.COLUMN_TYPE,
'ordinal_position', C.ORDINAL_POSITION,
'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
'column_default', C.COLUMN_DEFAULT,
'column_comment', IFNULL(C.COLUMN_COMMENT, '')
)
),
JSON_ARRAY()
)
FROM
INFORMATION_SCHEMA.COLUMNS C
WHERE
C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
ORDER BY C.ORDINAL_POSITION
),
'constraints', (
SELECT
IFNULL(
JSON_ARRAYAGG(
JSON_OBJECT(
'constraint_name', TC.CONSTRAINT_NAME,
'constraint_type',
CASE TC.CONSTRAINT_TYPE
WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
WHEN 'UNIQUE' THEN 'UNIQUE'
ELSE TC.CONSTRAINT_TYPE
END,
'constraint_definition', '',
'constraint_columns', (
SELECT
IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
FROM
INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
WHERE
KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
AND KCU.TABLE_NAME = TC.TABLE_NAME
ORDER BY KCU.ORDINAL_POSITION
),
'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
(SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
AND FKCU.TABLE_NAME = TC.TABLE_NAME
AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
ORDER BY FKCU.ORDINAL_POSITION),
NULL
)
)
),
JSON_ARRAY()
)
FROM
INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
LEFT JOIN
INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
AND TC.TABLE_NAME = RC.TABLE_NAME
WHERE
TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
),
'indexes', (
SELECT
IFNULL(
JSON_ARRAYAGG(
JSON_OBJECT(
'index_name', IndexData.INDEX_NAME,
'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
)
),
JSON_ARRAY()
)
FROM (
SELECT
S.TABLE_SCHEMA,
S.TABLE_NAME,
S.INDEX_NAME,
MIN(S.NON_UNIQUE) AS NON_UNIQUE, -- Aggregate NON_UNIQUE here to get unique status for the index
JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY -- Aggregate columns into an array for this index
FROM
INFORMATION_SCHEMA.STATISTICS S
WHERE
S.TABLE_SCHEMA = T.TABLE_SCHEMA AND S.TABLE_NAME = T.TABLE_NAME
GROUP BY
S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
) AS IndexData
ORDER BY IndexData.INDEX_NAME
),
'triggers', (
SELECT
IFNULL(
JSON_ARRAYAGG(
JSON_OBJECT(
'trigger_name', TR.TRIGGER_NAME,
'trigger_definition', TR.ACTION_STATEMENT
)
),
JSON_ARRAY()
)
FROM
INFORMATION_SCHEMA.TRIGGERS TR
WHERE
TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
ORDER BY TR.TRIGGER_NAME
)
) USING utf8mb4) AS object_details
FROM
INFORMATION_SCHEMA.TABLES T
CROSS JOIN (SELECT @table_names := ?) AS variables
WHERE
T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
AND T.TABLE_TYPE = 'BASE TABLE'
ORDER BY
T.TABLE_SCHEMA, T.TABLE_NAME;
parameters:
- name: table_names
type: string
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
default: ""
toolsets:
mysql-database-tools:
- execute_sql
- list_tables

View File

@@ -1,125 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataplex
import (
"context"
"fmt"
dataplexapi "cloud.google.com/go/dataplex/apiv1"
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/sources"
"github.com/googleapis/genai-toolbox/internal/util"
"go.opentelemetry.io/otel/trace"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
)
const SourceKind string = "dataplex"
// validate interface
var _ sources.SourceConfig = Config{}
func init() {
if !sources.Register(SourceKind, newConfig) {
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
}
}
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
actual := Config{Name: name}
if err := decoder.DecodeContext(ctx, &actual); err != nil {
return nil, err
}
return actual, nil
}
type Config struct {
// Dataplex configs
Name string `yaml:"name" validate:"required"`
Kind string `yaml:"kind" validate:"required"`
Project string `yaml:"project" validate:"required"`
}
func (r Config) SourceConfigKind() string {
// Returns Dataplex source kind
return SourceKind
}
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
// Initializes a Dataplex source
client, err := initDataplexConnection(ctx, tracer, r.Name, r.Project)
if err != nil {
return nil, err
}
s := &Source{
Name: r.Name,
Kind: SourceKind,
Client: client,
Project: r.Project,
}
return s, nil
}
var _ sources.Source = &Source{}
type Source struct {
// Source struct with Dataplex client
Name string `yaml:"name"`
Kind string `yaml:"kind"`
Client *dataplexapi.CatalogClient
Project string `yaml:"project"`
Location string `yaml:"location"`
}
func (s *Source) SourceKind() string {
// Returns Dataplex source kind
return SourceKind
}
func (s *Source) ProjectID() string {
return s.Project
}
func (s *Source) CatalogClient() *dataplexapi.CatalogClient {
return s.Client
}
func initDataplexConnection(
ctx context.Context,
tracer trace.Tracer,
name string,
project string,
) (*dataplexapi.CatalogClient, error) {
ctx, span := sources.InitConnectionSpan(ctx, tracer, SourceKind, name)
defer span.End()
cred, err := google.FindDefaultCredentials(ctx)
if err != nil {
return nil, fmt.Errorf("failed to find default Google Cloud credentials: %w", err)
}
userAgent, err := util.UserAgentFromContext(ctx)
if err != nil {
return nil, err
}
client, err := dataplexapi.NewCatalogClient(ctx, option.WithUserAgent(userAgent), option.WithCredentials(cred))
if err != nil {
return nil, fmt.Errorf("failed to create Dataplex client for project %q: %w", project, err)
}
return client, nil
}

View File

@@ -1,111 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataplex_test
import (
"testing"
yaml "github.com/goccy/go-yaml"
"github.com/google/go-cmp/cmp"
"github.com/googleapis/genai-toolbox/internal/server"
"github.com/googleapis/genai-toolbox/internal/sources/dataplex"
"github.com/googleapis/genai-toolbox/internal/testutils"
)
func TestParseFromYamlDataplex(t *testing.T) {
tcs := []struct {
desc string
in string
want server.SourceConfigs
}{
{
desc: "basic example",
in: `
sources:
my-instance:
kind: dataplex
project: my-project
`,
want: server.SourceConfigs{
"my-instance": dataplex.Config{
Name: "my-instance",
Kind: dataplex.SourceKind,
Project: "my-project",
},
},
},
}
for _, tc := range tcs {
t.Run(tc.desc, func(t *testing.T) {
got := struct {
Sources server.SourceConfigs `yaml:"sources"`
}{}
// Parse contents
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
if err != nil {
t.Fatalf("unable to unmarshal: %s", err)
}
if !cmp.Equal(tc.want, got.Sources) {
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
}
})
}
}
func TestFailParseFromYaml(t *testing.T) {
tcs := []struct {
desc string
in string
err string
}{
{
desc: "extra field",
in: `
sources:
my-instance:
kind: dataplex
project: my-project
foo: bar
`,
err: "unable to parse source \"my-instance\" as \"dataplex\": [1:1] unknown field \"foo\"\n> 1 | foo: bar\n ^\n 2 | kind: dataplex\n 3 | project: my-project",
},
{
desc: "missing required field",
in: `
sources:
my-instance:
kind: dataplex
`,
err: "unable to parse source \"my-instance\" as \"dataplex\": Key: 'Config.Project' Error:Field validation for 'Project' failed on the 'required' tag",
},
}
for _, tc := range tcs {
t.Run(tc.desc, func(t *testing.T) {
got := struct {
Sources server.SourceConfigs `yaml:"sources"`
}{}
// Parse contents
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
if err == nil {
t.Fatalf("expect parsing to fail")
}
errStr := err.Error()
if errStr != tc.err {
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
}
})
}
}

View File

@@ -67,11 +67,6 @@ func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.So
return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
}
userAgent, err := util.UserAgentFromContext(ctx)
if err != nil {
return nil, err
}
duration, err := time.ParseDuration(r.Timeout)
if err != nil {
return nil, fmt.Errorf("unable to parse Timeout string as time.Duration: %s", err)
@@ -81,7 +76,6 @@ func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.So
logger.WarnContext(ctx, "Insecure HTTP is enabled for Looker source %s. TLS certificate verification is skipped.\n", r.Name)
}
cfg := rtl.ApiSettings{
AgentTag: userAgent,
BaseUrl: r.BaseURL,
ApiVersion: "4.0",
VerifySsl: (r.SslVerification == "true"),

View File

@@ -1,4 +1,4 @@
// Copyright 2025 Google LLC
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -20,7 +20,6 @@ import (
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/sources"
"github.com/googleapis/genai-toolbox/internal/util"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.opentelemetry.io/otel/trace"
@@ -96,13 +95,8 @@ func initMongoDBClient(ctx context.Context, tracer trace.Tracer, name, uri strin
ctx, span := sources.InitConnectionSpan(ctx, tracer, SourceKind, name)
defer span.End()
userAgent, err := util.UserAgentFromContext(ctx)
if err != nil {
return nil, err
}
// Create a new MongoDB client
clientOpts := options.Client().ApplyURI(uri).SetAppName(userAgent)
clientOpts := options.Client().ApplyURI(uri)
client, err := mongo.Connect(ctx, clientOpts)
if err != nil {
return nil, fmt.Errorf("unable to create MongoDB client: %w", err)

View File

@@ -1,4 +1,4 @@
// Copyright 2025 Google LLC
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@@ -54,7 +54,6 @@ type Config struct {
User string `yaml:"user" validate:"required"`
Password string `yaml:"password" validate:"required"`
Database string `yaml:"database" validate:"required"`
Encrypt string `yaml:"encrypt"`
}
func (r Config) SourceConfigKind() string {
@@ -64,7 +63,7 @@ func (r Config) SourceConfigKind() string {
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
// Initializes a MSSQL source
db, err := initMssqlConnection(ctx, tracer, r.Name, r.Host, r.Port, r.User, r.Password, r.Database, r.Encrypt)
db, err := initMssqlConnection(ctx, tracer, r.Name, r.Host, r.Port, r.User, r.Password, r.Database)
if err != nil {
return nil, fmt.Errorf("unable to create db connection: %w", err)
}
@@ -102,14 +101,7 @@ func (s *Source) MSSQLDB() *sql.DB {
return s.Db
}
func initMssqlConnection(
ctx context.Context,
tracer trace.Tracer,
name, host, port, user, pass, dbname, encrypt string,
) (
*sql.DB,
error,
) {
func initMssqlConnection(ctx context.Context, tracer trace.Tracer, name, host, port, user, pass, dbname string) (*sql.DB, error) {
//nolint:all // Reassigned ctx
ctx, span := sources.InitConnectionSpan(ctx, tracer, SourceKind, name)
defer span.End()
@@ -117,10 +109,6 @@ func initMssqlConnection(
// Create dsn
query := url.Values{}
query.Add("database", dbname)
if encrypt != "" {
query.Add("encrypt", encrypt)
}
url := &url.URL{
Scheme: "sqlserver",
User: url.UserPassword(user, pass),

View File

@@ -54,32 +54,6 @@ func TestParseFromYamlMssql(t *testing.T) {
},
},
},
{
desc: "with encrypt field",
in: `
sources:
my-mssql-instance:
kind: mssql
host: 0.0.0.0
port: my-port
database: my_db
user: my_user
password: my_pass
encrypt: strict
`,
want: server.SourceConfigs{
"my-mssql-instance": mssql.Config{
Name: "my-mssql-instance",
Kind: mssql.SourceKind,
Host: "0.0.0.0",
Port: "my-port",
Database: "my_db",
User: "my_user",
Password: "my_pass",
Encrypt: "strict",
},
},
},
}
for _, tc := range tcs {
t.Run(tc.desc, func(t *testing.T) {

View File

@@ -15,11 +15,8 @@
package tools
import (
"bytes"
"encoding/json"
"fmt"
"regexp"
"text/template"
)
var validName = regexp.MustCompile(`^[a-zA-Z0-9_-]*$`)
@@ -28,7 +25,6 @@ func IsValidName(s string) bool {
return validName.MatchString(s)
}
// ConvertAnySliceToTyped a []any to typed slice ([]string, []int, []float etc.)
func ConvertAnySliceToTyped(s []any, itemType string) (any, error) {
var typedSlice any
switch itemType {
@@ -75,44 +71,3 @@ func ConvertAnySliceToTyped(s []any, itemType string) (any, error) {
}
return typedSlice, nil
}
// convertParamToJSON is a Go template helper function to convert a parameter to JSON formatted string.
func convertParamToJSON(param any) (string, error) {
jsonData, err := json.Marshal(param)
if err != nil {
return "", fmt.Errorf("failed to marshal param to JSON: %w", err)
}
return string(jsonData), nil
}
// PopulateTemplateWithJSON populate a Go template with a custom `json` array formatter
func PopulateTemplateWithJSON(templateName, templateString string, data map[string]any) (string, error) {
funcMap := template.FuncMap{
"json": convertParamToJSON,
}
tmpl, err := template.New(templateName).Funcs(funcMap).Parse(templateString)
if err != nil {
return "", fmt.Errorf("error parsing template '%s': %w", templateName, err)
}
var result bytes.Buffer
err = tmpl.Execute(&result, data)
if err != nil {
return "", fmt.Errorf("error executing template '%s': %w", templateName, err)
}
return result.String(), nil
}
// CheckDuplicateParameters verify there are no duplicate parameter names
func CheckDuplicateParameters(ps Parameters) error {
seenNames := make(map[string]bool)
for _, p := range ps {
pName := p.GetName()
if _, exists := seenNames[pName]; exists {
return fmt.Errorf("parameter name must be unique across all parameter fields. Duplicate parameter: %s", pName)
}
seenNames[pName] = true
}
return nil
}

View File

@@ -1,175 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataplexsearchentries
import (
"context"
"fmt"
dataplexapi "cloud.google.com/go/dataplex/apiv1"
dataplexpb "cloud.google.com/go/dataplex/apiv1/dataplexpb"
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/sources"
dataplexds "github.com/googleapis/genai-toolbox/internal/sources/dataplex"
"github.com/googleapis/genai-toolbox/internal/tools"
)
const kind string = "dataplex-search-entries"
func init() {
if !tools.Register(kind, newConfig) {
panic(fmt.Sprintf("tool kind %q already registered", kind))
}
}
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
actual := Config{Name: name}
if err := decoder.DecodeContext(ctx, &actual); err != nil {
return nil, err
}
return actual, nil
}
type compatibleSource interface {
CatalogClient() *dataplexapi.CatalogClient
ProjectID() string
}
// validate compatible sources are still compatible
var _ compatibleSource = &dataplexds.Source{}
var compatibleSources = [...]string{dataplexds.SourceKind}
type Config struct {
Name string `yaml:"name" validate:"required"`
Kind string `yaml:"kind" validate:"required"`
Source string `yaml:"source" validate:"required"`
Description string `yaml:"description"`
AuthRequired []string `yaml:"authRequired"`
}
// validate interface
var _ tools.ToolConfig = Config{}
func (cfg Config) ToolConfigKind() string {
return kind
}
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
// Initialize the search configuration with the provided sources
rawS, ok := srcs[cfg.Source]
if !ok {
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
}
// verify the source is compatible
s, ok := rawS.(compatibleSource)
if !ok {
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
}
query := tools.NewStringParameter("query", "The query against which entries in scope should be matched.")
name := tools.NewStringParameterWithDefault("name", fmt.Sprintf("projects/%s/locations/global", s.ProjectID()), "The project to which the request should be attributed in the following form: projects/{project}/locations/global")
pageSize := tools.NewIntParameterWithDefault("pageSize", 5, "Number of results in the search page.")
pageToken := tools.NewStringParameterWithDefault("pageToken", "", "Page token received from a previous locations.searchEntries call. Provide this to retrieve the subsequent page.")
orderBy := tools.NewStringParameterWithDefault("orderBy", "relevance", "Specifies the ordering of results. Supported values are: relevance, last_modified_timestamp, last_modified_timestamp asc")
semanticSearch := tools.NewBooleanParameterWithDefault("semanticSearch", true, "Whether to use semantic search for the query. If true, the query will be processed using semantic search capabilities.")
parameters := tools.Parameters{query, name, pageSize, pageToken, orderBy, semanticSearch}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: parameters.McpManifest(),
}
t := &SearchTool{
Name: cfg.Name,
Kind: kind,
Parameters: parameters,
AuthRequired: cfg.AuthRequired,
CatalogClient: s.CatalogClient(),
ProjectID: s.ProjectID(),
manifest: tools.Manifest{
Description: cfg.Description,
Parameters: parameters.Manifest(),
AuthRequired: cfg.AuthRequired,
},
mcpManifest: mcpManifest,
}
return t, nil
}
type SearchTool struct {
Name string
Kind string
Parameters tools.Parameters
AuthRequired []string
CatalogClient *dataplexapi.CatalogClient
ProjectID string
manifest tools.Manifest
mcpManifest tools.McpManifest
}
func (t *SearchTool) Authorized(verifiedAuthServices []string) bool {
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
}
func (t *SearchTool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
query, _ := paramsMap["query"].(string)
name, _ := paramsMap["name"].(string)
pageSize, _ := paramsMap["pageSize"].(int32)
pageToken, _ := paramsMap["pageToken"].(string)
orderBy, _ := paramsMap["orderBy"].(string)
semanticSearch, _ := paramsMap["semanticSearch"].(bool)
req := &dataplexpb.SearchEntriesRequest{
Query: query,
Name: name,
PageSize: pageSize,
PageToken: pageToken,
OrderBy: orderBy,
SemanticSearch: semanticSearch,
}
it := t.CatalogClient.SearchEntries(ctx, req)
if it == nil {
return nil, fmt.Errorf("failed to create search entries iterator for project %q", t.ProjectID)
}
var results []*dataplexpb.SearchEntriesResult
for {
entry, err := it.Next()
if err != nil {
break
}
results = append(results, entry)
}
return results, nil
}
func (t *SearchTool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
// Parse parameters from the provided data
return tools.ParseParams(t.Parameters, data, claims)
}
func (t *SearchTool) Manifest() tools.Manifest {
// Returns the tool manifest
return t.manifest
}
func (t *SearchTool) McpManifest() tools.McpManifest {
// Returns the tool MCP manifest
return t.mcpManifest
}

View File

@@ -1,73 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataplexsearchentries_test
import (
"testing"
yaml "github.com/goccy/go-yaml"
"github.com/google/go-cmp/cmp"
"github.com/googleapis/genai-toolbox/internal/server"
"github.com/googleapis/genai-toolbox/internal/testutils"
"github.com/googleapis/genai-toolbox/internal/tools/dataplex/dataplexsearchentries"
)
func TestParseFromYamlDataplexSearchEntries(t *testing.T) {
ctx, err := testutils.ContextWithNewLogger()
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
tcs := []struct {
desc string
in string
want server.ToolConfigs
}{
{
desc: "basic example",
in: `
tools:
example_tool:
kind: dataplex-search-entries
source: my-instance
description: some description
`,
want: server.ToolConfigs{
"example_tool": dataplexsearchentries.Config{
Name: "example_tool",
Kind: "dataplex-search-entries",
Source: "my-instance",
Description: "some description",
AuthRequired: []string{},
},
},
},
}
for _, tc := range tcs {
t.Run(tc.desc, func(t *testing.T) {
got := struct {
Tools server.ToolConfigs `yaml:"tools"`
}{}
// Parse contents
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
if err != nil {
t.Fatalf("unable to unmarshal: %s", err)
}
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
t.Fatalf("incorrect parse: diff %v", diff)
}
})
}
}

View File

@@ -105,16 +105,6 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across queryParams, bodyParams, and headerParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
pathMcpManifest := cfg.PathParams.McpManifest()
queryMcpManifest := cfg.QueryParams.McpManifest()
bodyMcpManifest := cfg.BodyParams.McpManifest()
@@ -153,6 +143,15 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
Required: concatRequiredManifest,
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across queryParams, bodyParams, and headerParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
@@ -208,6 +207,15 @@ type Tool struct {
mcpManifest tools.McpManifest
}
// helper function to convert a parameter to JSON formatted string.
func convertParamToJSON(param any) (string, error) {
jsonData, err := json.Marshal(param)
if err != nil {
return "", fmt.Errorf("failed to marshal param to JSON: %w", err)
}
return string(jsonData), nil
}
// Helper function to generate the HTTP request body upon Tool invocation.
func getRequestBody(bodyParams tools.Parameters, requestBodyPayload string, paramsMap map[string]any) (string, error) {
bodyParamValues, err := tools.GetParams(bodyParams, paramsMap)
@@ -216,11 +224,20 @@ func getRequestBody(bodyParams tools.Parameters, requestBodyPayload string, para
}
bodyParamsMap := bodyParamValues.AsMap()
requestBodyStr, err := tools.PopulateTemplateWithJSON("HTTPToolRequestBody", requestBodyPayload, bodyParamsMap)
if err != nil {
return "", err
// Create a FuncMap to format array parameters
funcMap := template.FuncMap{
"json": convertParamToJSON,
}
return requestBodyStr, nil
templ, err := template.New("body").Funcs(funcMap).Parse(requestBodyPayload)
if err != nil {
return "", fmt.Errorf("error parsing request body: %s", err)
}
var result bytes.Buffer
err = templ.Execute(&result, bodyParamsMap)
if err != nil {
return "", fmt.Errorf("error replacing body payload: %s", err)
}
return result.String(), nil
}
// Helper function to generate the HTTP request URL upon Tool invocation.

View File

@@ -0,0 +1,75 @@
package common
import (
"bytes"
"encoding/json"
"fmt"
"text/template"
"github.com/googleapis/genai-toolbox/internal/tools"
)
// helper function to convert a parameter to JSON formatted string.
func ConvertParamToJSON(param any) (string, error) {
jsonData, err := json.Marshal(param)
if err != nil {
return "", fmt.Errorf("failed to marshal param to JSON: %w", err)
}
return string(jsonData), nil
}
func ParsePayloadTemplate(params tools.Parameters, payload string, paramsMap map[string]any) (string, error) {
// Create a map for request body parameters
cleanParamsMap := make(map[string]any)
for _, p := range params {
k := p.GetName()
v, ok := paramsMap[k]
if !ok {
return "", fmt.Errorf("missing parameter %s", k)
}
cleanParamsMap[k] = v
}
// Create a FuncMap to format array parameters
funcMap := template.FuncMap{
"json": ConvertParamToJSON,
}
templ, err := template.New("template").Funcs(funcMap).Parse(payload)
if err != nil {
return "", fmt.Errorf("error parsing: %s", err)
}
var result bytes.Buffer
err = templ.Execute(&result, cleanParamsMap)
if err != nil {
return "", fmt.Errorf("error replacing payload: %s", err)
}
return result.String(), nil
}
func GetUpdate(params tools.Parameters, payload string, paramsMap map[string]any) (string, error) {
// Create a map for request body parameters
cleanParamsMap := make(map[string]any)
for _, p := range params {
k := p.GetName()
v, ok := paramsMap[k]
if !ok {
return "", fmt.Errorf("missing update parameter %s", k)
}
cleanParamsMap[k] = v
}
// Create a FuncMap to format array parameters
funcMap := template.FuncMap{
"json": ConvertParamToJSON,
}
templ, err := template.New("filter").Funcs(funcMap).Parse(payload)
if err != nil {
return "", fmt.Errorf("error parsing filter: %s", err)
}
var result bytes.Buffer
err = templ.Execute(&result, cleanParamsMap)
if err != nil {
return "", fmt.Errorf("error replacing filter payload: %s", err)
}
return result.String(), nil
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/goccy/go-yaml"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
@@ -81,18 +82,50 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.PipelineParams)
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.PipelineParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
pipelineMcpManifest := cfg.PipelineParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
pipelineMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range pipelineMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across pipelineParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -135,7 +168,7 @@ type Tool struct {
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
pipelineString, err := tools.PopulateTemplateWithJSON("MongoDBAggregatePipeline", t.PipelinePayload, paramsMap)
pipelineString, err := common.ParsePayloadTemplate(t.PipelineParams, t.PipelinePayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating pipeline: %s", err)
}

View File

@@ -15,11 +15,10 @@
package mongodbaggregate_test
import (
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/mongodbaggregate"
"strings"
"testing"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/mongodbaggregate"
yaml "github.com/goccy/go-yaml"
"github.com/google/go-cmp/cmp"
"github.com/googleapis/genai-toolbox/internal/server"

View File

@@ -21,6 +21,7 @@ import (
"github.com/goccy/go-yaml"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
@@ -80,24 +81,51 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.FilterParams)
// Verify no duplicate parameter names
err := tools.CheckDuplicateParameters(allParameters)
if err != nil {
return nil, err
}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.FilterParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
filterMcpManifest := cfg.FilterParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
filterMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range filterMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across filterParams, projectParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -136,7 +164,7 @@ type Tool struct {
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
filterString, err := tools.PopulateTemplateWithJSON("MongoDBDeleteManyFilter", t.FilterPayload, paramsMap)
filterString, err := common.ParsePayloadTemplate(t.FilterParams, t.FilterPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating filter: %s", err)
}

View File

@@ -20,6 +20,7 @@ import (
"github.com/goccy/go-yaml"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
@@ -79,24 +80,51 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.FilterParams)
// Verify no duplicate parameter names
err := tools.CheckDuplicateParameters(allParameters)
if err != nil {
return nil, err
}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.FilterParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
filterMcpManifest := cfg.FilterParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
filterMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range filterMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across filterParams, projectParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -135,7 +163,7 @@ type Tool struct {
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
filterString, err := tools.PopulateTemplateWithJSON("MongoDBDeleteOneFilter", t.FilterPayload, paramsMap)
filterString, err := common.ParsePayloadTemplate(t.FilterParams, t.FilterPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating filter: %s", err)
}

View File

@@ -14,13 +14,16 @@
package mongodbfind
import (
"bytes"
"context"
"encoding/json"
"fmt"
"slices"
"text/template"
"github.com/goccy/go-yaml"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
@@ -85,23 +88,62 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.FilterParams, cfg.ProjectParams, cfg.SortParams)
// Verify no duplicate parameter names
err := tools.CheckDuplicateParameters(allParameters)
if err != nil {
return nil, err
}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.FilterParams.Manifest(),
cfg.ProjectParams.Manifest(),
cfg.SortParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
filterMcpManifest := cfg.FilterParams.McpManifest()
projectMcpManifest := cfg.ProjectParams.McpManifest()
sortMcpManifest := cfg.SortParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
filterMcpManifest.Required,
projectMcpManifest.Required,
sortMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range filterMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
for name, p := range projectMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
for name, p := range sortMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across filterParams, projectParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -147,7 +189,7 @@ type Tool struct {
mcpManifest tools.McpManifest
}
func getOptions(sortParameters tools.Parameters, projectPayload string, limit int64, paramsMap map[string]any) (*options.FindOptions, error) {
func getOptions(sortParameters tools.Parameters, projectParams tools.Parameters, projectPayload string, limit int64, paramsMap map[string]any) (*options.FindOptions, error) {
opts := options.Find()
sort := bson.M{}
@@ -160,14 +202,29 @@ func getOptions(sortParameters tools.Parameters, projectPayload string, limit in
return opts, nil
}
result, err := tools.PopulateTemplateWithJSON("MongoDBFindProjectString", projectPayload, paramsMap)
project := bson.M{}
for _, p := range projectParams {
project[p.GetName()] = paramsMap[p.GetName()]
}
// Create a FuncMap to format array parameters
funcMap := template.FuncMap{
"json": common.ConvertParamToJSON,
}
templ, err := template.New("project").Funcs(funcMap).Parse(projectPayload)
if err != nil {
return nil, fmt.Errorf("error populating project payload: %s", err)
return nil, fmt.Errorf("error parsing project: %s", err)
}
var result bytes.Buffer
err = templ.Execute(&result, project)
if err != nil {
return nil, fmt.Errorf("error replacing projection payload: %s", err)
}
var projection any
err = bson.UnmarshalExtJSON([]byte(result), false, &projection)
err = bson.UnmarshalExtJSON(result.Bytes(), false, &projection)
if err != nil {
return nil, fmt.Errorf("error unmarshalling projection: %s", err)
}
@@ -184,13 +241,12 @@ func getOptions(sortParameters tools.Parameters, projectPayload string, limit in
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
filterString, err := tools.PopulateTemplateWithJSON("MongoDBFindFilterString", t.FilterPayload, paramsMap)
filterString, err := common.ParsePayloadTemplate(t.FilterParams, t.FilterPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating filter: %s", err)
}
opts, err := getOptions(t.SortParams, t.ProjectPayload, t.Limit, paramsMap)
opts, err := getOptions(t.SortParams, t.ProjectParams, t.ProjectPayload, t.Limit, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating options: %s", err)
}

View File

@@ -14,13 +14,16 @@
package mongodbfindone
import (
"bytes"
"context"
"encoding/json"
"fmt"
"slices"
"text/template"
"github.com/goccy/go-yaml"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
@@ -84,24 +87,62 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.FilterParams, cfg.ProjectParams, cfg.SortParams)
// Verify no duplicate parameter names
err := tools.CheckDuplicateParameters(allParameters)
if err != nil {
return nil, err
}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.FilterParams.Manifest(),
cfg.ProjectParams.Manifest(),
cfg.SortParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
filterMcpManifest := cfg.FilterParams.McpManifest()
projectMcpManifest := cfg.ProjectParams.McpManifest()
sortMcpManifest := cfg.SortParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
filterMcpManifest.Required,
projectMcpManifest.Required,
sortMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range filterMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
for name, p := range projectMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
for name, p := range sortMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across filterParams, projectParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -145,7 +186,7 @@ type Tool struct {
mcpManifest tools.McpManifest
}
func getOptions(sortParameters tools.Parameters, projectPayload string, paramsMap map[string]any) (*options.FindOneOptions, error) {
func getOptions(sortParameters tools.Parameters, projectParams tools.Parameters, projectPayload string, paramsMap map[string]any) (*options.FindOneOptions, error) {
opts := options.FindOne()
sort := bson.M{}
@@ -158,14 +199,28 @@ func getOptions(sortParameters tools.Parameters, projectPayload string, paramsMa
return opts, nil
}
result, err := tools.PopulateTemplateWithJSON("MongoDBFindOneProjectString", projectPayload, paramsMap)
project := bson.M{}
for _, p := range projectParams {
project[p.GetName()] = paramsMap[p.GetName()]
}
// Create a FuncMap to format array parameters
funcMap := template.FuncMap{
"json": common.ConvertParamToJSON,
}
templ, err := template.New("project").Funcs(funcMap).Parse(projectPayload)
if err != nil {
return nil, fmt.Errorf("error populating project payload: %s", err)
return nil, fmt.Errorf("error parsing project: %s", err)
}
var result bytes.Buffer
err = templ.Execute(&result, project)
if err != nil {
return nil, fmt.Errorf("error replacing project payload: %s", err)
}
var projection any
err = bson.Unmarshal([]byte(result), &projection)
err = bson.Unmarshal(result.Bytes(), &projection)
if err != nil {
return nil, fmt.Errorf("error unmarshalling projection: %s", err)
}
@@ -177,13 +232,12 @@ func getOptions(sortParameters tools.Parameters, projectPayload string, paramsMa
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
filterString, err := tools.PopulateTemplateWithJSON("MongoDBFindOneFilterString", t.FilterPayload, paramsMap)
filterString, err := common.ParsePayloadTemplate(t.FilterParams, t.FilterPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating filter: %s", err)
}
opts, err := getOptions(t.SortParams, t.ProjectPayload, paramsMap)
opts, err := getOptions(t.SortParams, t.ProjectParams, t.ProjectPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating options: %s", err)
}

View File

@@ -17,6 +17,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/sources"
@@ -77,22 +78,37 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
}
dataParam := tools.NewStringParameterWithRequired(paramDataKey, "the JSON payload to insert, should be a JSON array of documents", true)
parameters := tools.Parameters{dataParam}
allParameters := tools.Parameters{dataParam}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
parameters.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
payloadMcpManifest := dataParam.McpManifest()
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := map[string]tools.ParameterMcpManifest{
paramDataKey: payloadMcpManifest,
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: []string{paramDataKey},
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
return Tool{
Name: cfg.Name,
@@ -100,7 +116,7 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
AuthRequired: cfg.AuthRequired,
Collection: cfg.Collection,
Canonical: cfg.Canonical,
PayloadParams: allParameters,
PayloadParams: parameters,
database: s.Client.Database(cfg.Database),
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
mcpManifest: mcpManifest,

View File

@@ -17,6 +17,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/sources"
@@ -77,21 +78,35 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
}
payloadParams := tools.NewStringParameterWithRequired(dataParamsKey, "the JSON payload to insert, should be a JSON object", true)
parameters := tools.Parameters{payloadParams}
allParameters := tools.Parameters{payloadParams}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
parameters.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
payloadMcpManifest := payloadParams.McpManifest()
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := map[string]tools.ParameterMcpManifest{
dataParamsKey: payloadMcpManifest,
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: []string{dataParamsKey},
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -101,7 +116,7 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
AuthRequired: cfg.AuthRequired,
Collection: cfg.Collection,
Canonical: cfg.Canonical,
PayloadParams: allParameters,
PayloadParams: parameters,
database: s.Client.Database(cfg.Database),
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
mcpManifest: mcpManifest,

View File

@@ -22,6 +22,7 @@ import (
"github.com/googleapis/genai-toolbox/internal/sources"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
@@ -55,7 +56,7 @@ type Config struct {
FilterParams tools.Parameters `yaml:"filterParams" validate:"required"`
UpdatePayload string `yaml:"updatePayload" validate:"required"`
UpdateParams tools.Parameters `yaml:"updateParams" validate:"required"`
Canonical bool `yaml:"canonical" validate:"required"`
Canonical bool `yaml:"canonical" validate:"required"` //i want to force the user to choose
Upsert bool `yaml:"upsert"`
}
@@ -82,24 +83,55 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.FilterParams, cfg.FilterParams, cfg.UpdateParams)
// Verify no duplicate parameter names
err := tools.CheckDuplicateParameters(allParameters)
if err != nil {
return nil, err
}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.FilterParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
filterMcpManifest := cfg.FilterParams.McpManifest()
updateMcpManifest := cfg.UpdateParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
filterMcpManifest.Required,
updateMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range filterMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
for name, p := range updateMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across filterParams, projectParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -135,7 +167,7 @@ type Tool struct {
UpdatePayload string `yaml:"updatePayload" validate:"required"`
UpdateParams tools.Parameters `yaml:"updateParams" validate:"required"`
AllParams tools.Parameters `yaml:"allParams"`
Canonical bool `yaml:"canonical" validation:"required"`
Canonical bool `yaml:"canonical" validation:"required"` //i want to force the user to choose
Upsert bool `yaml:"upsert"`
database *mongo.Database
@@ -146,7 +178,7 @@ type Tool struct {
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
filterString, err := tools.PopulateTemplateWithJSON("MongoDBUpdateManyFilter", t.FilterPayload, paramsMap)
filterString, err := common.ParsePayloadTemplate(t.FilterParams, t.FilterPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating filter: %s", err)
}
@@ -157,7 +189,7 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error)
return nil, fmt.Errorf("unable to unmarshal filter string: %w", err)
}
updateString, err := tools.PopulateTemplateWithJSON("MongoDBUpdateMany", t.UpdatePayload, paramsMap)
updateString, err := common.GetUpdate(t.UpdateParams, t.UpdatePayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("unable to get update: %w", err)
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/googleapis/genai-toolbox/internal/sources"
mongosrc "github.com/googleapis/genai-toolbox/internal/sources/mongodb"
"github.com/googleapis/genai-toolbox/internal/tools"
"github.com/googleapis/genai-toolbox/internal/tools/mongodb/common"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
@@ -56,7 +57,7 @@ type Config struct {
UpdatePayload string `yaml:"updatePayload" validate:"required"`
UpdateParams tools.Parameters `yaml:"updateParams" validate:"required"`
Canonical bool `yaml:"canonical" validate:"required"`
Canonical bool `yaml:"canonical" validate:"required"` //i want to force the user to choose
Upsert bool `yaml:"upsert"`
}
@@ -83,24 +84,50 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
// Create a slice for all parameters
allParameters := slices.Concat(cfg.FilterParams, cfg.FilterParams, cfg.UpdateParams)
// Verify no duplicate parameter names
err := tools.CheckDuplicateParameters(allParameters)
if err != nil {
return nil, err
}
// Create Toolbox manifest
paramManifest := allParameters.Manifest()
// Create parameter MCP manifest
paramManifest := slices.Concat(
cfg.FilterParams.Manifest(),
)
if paramManifest == nil {
paramManifest = make([]tools.ParameterManifest, 0)
}
// Create MCP manifest
// Verify there are no duplicate parameter names
seenNames := make(map[string]bool)
for _, param := range paramManifest {
if _, exists := seenNames[param.Name]; exists {
return nil, fmt.Errorf("parameter name must be unique across filterParams, projectParams, and sortParams. Duplicate parameter: %s", param.Name)
}
seenNames[param.Name] = true
}
filterMcpManifest := cfg.FilterParams.McpManifest()
// Concatenate parameters for MCP `required` field
concatRequiredManifest := slices.Concat(
filterMcpManifest.Required,
)
if concatRequiredManifest == nil {
concatRequiredManifest = []string{}
}
// Concatenate parameters for MCP `properties` field
concatPropertiesManifest := make(map[string]tools.ParameterMcpManifest)
for name, p := range filterMcpManifest.Properties {
concatPropertiesManifest[name] = p
}
// Create a new McpToolsSchema with all parameters
paramMcpManifest := tools.McpToolsSchema{
Type: "object",
Properties: concatPropertiesManifest,
Required: concatRequiredManifest,
}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: allParameters.McpManifest(),
InputSchema: paramMcpManifest,
}
// finish tool setup
@@ -147,7 +174,7 @@ type Tool struct {
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
paramsMap := params.AsMap()
filterString, err := tools.PopulateTemplateWithJSON("MongoDBUpdateOneFilter", t.FilterPayload, paramsMap)
filterString, err := common.ParsePayloadTemplate(t.FilterParams, t.FilterPayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("error populating filter: %s", err)
}
@@ -158,7 +185,7 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error)
return nil, fmt.Errorf("unable to unmarshal filter string: %w", err)
}
updateString, err := tools.PopulateTemplateWithJSON("MongoDBUpdateOne", t.UpdatePayload, paramsMap)
updateString, err := common.GetUpdate(t.UpdateParams, t.UpdatePayload, paramsMap)
if err != nil {
return nil, fmt.Errorf("unable to get update: %w", err)
}

View File

@@ -17,7 +17,6 @@ package mysqlsql
import (
"context"
"database/sql"
"encoding/json"
"fmt"
yaml "github.com/goccy/go-yaml"
@@ -178,14 +177,6 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error)
// mysql driver return []uint8 type for "TEXT", "VARCHAR", and "NVARCHAR"
// we'll need to cast it back to string
switch colTypes[i].DatabaseTypeName() {
case "JSON":
// unmarshal JSON data before storing to prevent double marshaling
var unmarshaledData any
err := json.Unmarshal(val.([]byte), &unmarshaledData)
if err != nil {
return nil, fmt.Errorf("unable to unmarshal json data %s", val)
}
vMap[name] = unmarshaledData
case "TEXT", "VARCHAR", "NVARCHAR":
vMap[name] = string(val.([]byte))
default:

View File

@@ -1,204 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package cache provides a simple, thread-safe, in-memory key-value store.
It features item expiration and an optional background process (janitor) that
periodically removes expired items.
*/
package cache
import (
"sync"
"time"
)
const (
// DefaultJanitorInterval is the default interval at which the janitor
// runs to clean up expired cache items.
DefaultJanitorInterval = 1 * time.Minute
// DefaultExpiration is the default time-to-live for a cache item.
// Note: This constant is defined but not used in the current implementation,
// as expiration is set on a per-item basis.
DefaultExpiration = 60
)
// CacheItem represents a value stored in the cache, along with its expiration time.
type CacheItem struct {
Value any // The actual value being stored.
Expiration int64 // The time when the item expires, as a Unix nano timestamp. 0 means no expiration.
}
// isExpired checks if the cache item has passed its expiration time.
// It returns true if the item is expired, and false otherwise.
func (item CacheItem) isExpired() bool {
// If Expiration is 0, the item is considered to never expire.
if item.Expiration == 0 {
return false
}
return time.Now().UnixNano() > item.Expiration
}
// Cache is a thread-safe, in-memory key-value store with self-cleaning capabilities.
type Cache struct {
items map[string]CacheItem // The underlying map that stores the cache items.
mu sync.RWMutex // A read/write mutex to ensure thread safety for concurrent access.
stop chan struct{} // A channel used to signal the janitor goroutine to stop.
}
// NewCache creates and returns a new Cache instance.
// The janitor for cleaning up expired items is not started by default.
// Use the WithJanitor method to start the cleanup process.
//
// Example:
//
// c := cache.NewCache()
// c.Set("myKey", "myValue", 5*time.Minute)
func NewCache() *Cache {
return &Cache{
items: make(map[string]CacheItem),
}
}
// WithJanitor starts a background goroutine (janitor) that periodically cleans up
// expired items from the cache. If a janitor is already running, it will be
// stopped and a new one will be started with the specified interval.
//
// The interval parameter defines how often the janitor should run. If a non-positive
// interval is provided, it defaults to DefaultJanitorInterval (1 minute).
//
// It returns a pointer to the Cache to allow for method chaining.
//
// Example:
//
// // Create a cache that cleans itself every 10 minutes.
// c := cache.NewCache().WithJanitor(10 * time.Minute)
// defer c.Stop() // It's important to stop the janitor when the cache is no longer needed.
func (c *Cache) WithJanitor(interval time.Duration) *Cache {
c.mu.Lock()
defer c.mu.Unlock()
if c.stop != nil {
// If a janitor is already running, stop it before starting a new one.
close(c.stop)
}
c.stop = make(chan struct{})
// Use the default interval if an invalid one is provided.
if interval <= 0 {
interval = DefaultJanitorInterval
}
// Start the janitor in a new goroutine.
go c.janitor(interval, c.stop)
return c
}
// Get retrieves an item from the cache by its key.
// It returns the item's value and a boolean. The boolean is true if the key
// was found and the item has not expired. Otherwise, it is false.
//
// Example:
//
// v, found := c.Get("myKey")
// if found {
// fmt.Printf("Found value: %v\n", v)
// } else {
// fmt.Println("Key not found or expired.")
// }
func (c *Cache) Get(key string) (any, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
item, found := c.items[key]
// Return false if the item is not found or if it is found but has expired.
if !found || item.isExpired() {
return nil, false
}
return item.Value, true
}
// Set adds an item to the cache, replacing any existing item with the same key.
//
// The `ttl` (time-to-live) parameter specifies how long the item should remain
// in the cache. If `ttl` is positive, the item will expire after that duration.
// If `ttl` is zero or negative, the item will never expire.
//
// Example:
//
// // Add a key that expires in 5 minutes.
// c.Set("sessionToken", "xyz123", 5*time.Minute)
//
// // Add a key that never expires.
// c.Set("appConfig", "configValue", 0)
func (c *Cache) Set(key string, value any, ttl time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
var expiration int64
// Calculate the expiration time only if ttl is positive.
if ttl > 0 {
expiration = time.Now().Add(ttl).UnixNano()
}
c.items[key] = CacheItem{
Value: value,
Expiration: expiration,
}
}
// Stop terminates the background janitor goroutine.
// It is safe to call Stop even if the janitor was never started or has already
// been stopped. This is useful for cleaning up resources.
func (c *Cache) Stop() {
c.mu.Lock()
defer c.mu.Unlock()
if c.stop != nil {
close(c.stop)
c.stop = nil
}
}
// janitor is the background cleanup worker. It runs in a separate goroutine.
// It uses a time.Ticker to periodically trigger the deletion of expired items.
func (c *Cache) janitor(interval time.Duration, stopCh chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Time to clean up expired items.
c.deleteExpired()
case <-stopCh:
// Stop signal received, exit the goroutine.
return
}
}
}
// deleteExpired scans the cache and removes all items that have expired.
// This function acquires a write lock on the cache to ensure safe mutation.
func (c *Cache) deleteExpired() {
c.mu.Lock()
defer c.mu.Unlock()
for k, v := range c.items {
if v.isExpired() {
delete(c.items, k)
}
}
}

View File

@@ -1,170 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"sync"
"testing"
"time"
)
// TestCache_SetAndGet verifies the basic functionality of setting a value
// and immediately retrieving it.
func TestCache_SetAndGet(t *testing.T) {
cache := NewCache()
defer cache.Stop()
key := "testKey"
value := "testValue"
cache.Set(key, value, 1*time.Minute)
retrievedValue, found := cache.Get(key)
if !found {
t.Errorf("Expected to find key %q, but it was not found", key)
}
if retrievedValue != value {
t.Errorf("Expected value %q, but got %q", value, retrievedValue)
}
}
// TestCache_GetExpired tests that an item is not retrievable after it has expired.
func TestCache_GetExpired(t *testing.T) {
cache := NewCache()
defer cache.Stop()
key := "expiredKey"
value := "expiredValue"
// Set an item with a very short TTL.
cache.Set(key, value, 1*time.Millisecond)
time.Sleep(2 * time.Millisecond) // Wait for the item to expire.
// Attempt to get the expired item.
_, found := cache.Get(key)
if found {
t.Errorf("Expected key %q to be expired, but it was found", key)
}
}
// TestCache_SetNoExpiration ensures that an item with a TTL of 0 or less
// does not expire.
func TestCache_SetNoExpiration(t *testing.T) {
cache := NewCache()
defer cache.Stop()
key := "noExpireKey"
value := "noExpireValue"
cache.Set(key, value, 0) // Setting with 0 should mean no expiration.
time.Sleep(5 * time.Millisecond)
retrievedValue, found := cache.Get(key)
if !found {
t.Errorf("Expected to find key %q, but it was not found", key)
}
if retrievedValue != value {
t.Errorf("Expected value %q, but got %q", value, retrievedValue)
}
}
// TestCache_Janitor verifies that the janitor goroutine automatically removes
// expired items from the cache.
func TestCache_Janitor(t *testing.T) {
// Initialize cache with a very short janitor interval for quick testing.
cache := NewCache().WithJanitor(10 * time.Millisecond)
defer cache.Stop()
expiredKey := "expired"
activeKey := "active"
// Set one item that will expire and one that will not.
cache.Set(expiredKey, "value", 1*time.Millisecond)
cache.Set(activeKey, "value", 1*time.Hour)
// Wait longer than the janitor interval to ensure it has a chance to run.
time.Sleep(20 * time.Millisecond)
// Check that the expired key has been removed.
_, found := cache.Get(expiredKey)
if found {
t.Errorf("Expected janitor to clean up expired key %q, but it was found", expiredKey)
}
// Check that the active key is still present.
_, found = cache.Get(activeKey)
if !found {
t.Errorf("Expected active key %q to be present, but it was not found", activeKey)
}
}
// TestCache_Stop ensures that calling the Stop method does not cause a panic,
// regardless of whether the janitor is running or not. It also tests idempotency.
func TestCache_Stop(t *testing.T) {
t.Run("Stop without janitor", func(t *testing.T) {
cache := NewCache()
// Test that calling Stop multiple times on a cache without a janitor is safe.
cache.Stop()
cache.Stop()
})
t.Run("Stop with janitor", func(t *testing.T) {
cache := NewCache().WithJanitor(1 * time.Minute)
// Test that calling Stop multiple times on a cache with a janitor is safe.
cache.Stop()
cache.Stop()
})
}
// TestCache_Concurrent performs a stress test on the cache with concurrent
// reads and writes to check for race conditions.
func TestCache_Concurrent(t *testing.T) {
cache := NewCache().WithJanitor(100 * time.Millisecond)
defer cache.Stop()
var wg sync.WaitGroup
numGoroutines := 100
numOperations := 1000
// Start concurrent writer goroutines.
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(g int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := string(rune(g*numOperations + j))
value := g*numOperations + j
cache.Set(key, value, 10*time.Second)
}
}(i)
}
// Start concurrent reader goroutines.
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(g int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := string(rune(g*numOperations + j))
cache.Get(key) // We don't check the result, just that access is safe.
}
}(i)
}
// Wait for all goroutines to complete. If a race condition exists, the Go
// race detector (`go test -race`) will likely catch it.
wg.Wait()
}

View File

@@ -1,291 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package helpers provides utility functions for transforming and processing Neo4j
// schema data. It includes functions for converting raw query results from both
// APOC and native Cypher queries into a standardized, structured format.
package helpers
import (
"fmt"
"sort"
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jschema/types"
)
// ConvertToStringSlice converts a slice of any type to a slice of strings.
// It uses fmt.Sprintf to perform the conversion for each element.
// Example:
//
// input: []any{"user", 123, true}
// output: []string{"user", "123", "true"}
func ConvertToStringSlice(slice []any) []string {
result := make([]string, len(slice))
for i, v := range slice {
result[i] = fmt.Sprintf("%v", v)
}
return result
}
// GetStringValue safely converts any value to its string representation.
// If the input value is nil, it returns an empty string.
func GetStringValue(val any) string {
if val == nil {
return ""
}
return fmt.Sprintf("%v", val)
}
// MapToAPOCSchema converts a raw map from a Cypher query into a structured
// APOCSchemaResult. This is a workaround for database drivers that may return
// complex nested structures as `map[string]any` instead of unmarshalling
// directly into a struct. It achieves this by marshalling the map to YAML and
// then unmarshalling into the target struct.
func MapToAPOCSchema(schemaMap map[string]any) (*types.APOCSchemaResult, error) {
schemaBytes, err := yaml.Marshal(schemaMap)
if err != nil {
return nil, fmt.Errorf("failed to marshal schema map: %w", err)
}
var entities map[string]types.APOCEntity
if err = yaml.Unmarshal(schemaBytes, &entities); err != nil {
return nil, fmt.Errorf("failed to unmarshal schema map into entities: %w", err)
}
return &types.APOCSchemaResult{Value: entities}, nil
}
// ProcessAPOCSchema transforms the nested result from the `apoc.meta.schema()`
// procedure into flat lists of node labels and relationships, along with
// aggregated database statistics. It iterates through entities, processes nodes,
// and extracts outgoing relationship information nested within those nodes.
func ProcessAPOCSchema(apocSchema *types.APOCSchemaResult) ([]types.NodeLabel, []types.Relationship, *types.Statistics) {
var nodeLabels []types.NodeLabel
relMap := make(map[string]*types.Relationship)
stats := &types.Statistics{
NodesByLabel: make(map[string]int64),
RelationshipsByType: make(map[string]int64),
PropertiesByLabel: make(map[string]int64),
PropertiesByRelType: make(map[string]int64),
}
for name, entity := range apocSchema.Value {
// We only process top-level entities of type "node". Relationship info is
// derived from the "relationships" field within each node entity.
if entity.Type != "node" {
continue
}
nodeLabel := types.NodeLabel{
Name: name,
Count: entity.Count,
Properties: extractAPOCProperties(entity.Properties),
}
nodeLabels = append(nodeLabels, nodeLabel)
// Aggregate statistics for the node.
stats.NodesByLabel[name] = entity.Count
stats.TotalNodes += entity.Count
propCount := int64(len(nodeLabel.Properties))
stats.PropertiesByLabel[name] = propCount
stats.TotalProperties += propCount * entity.Count
// Extract relationship information from the node.
for relName, relInfo := range entity.Relationships {
// Only process outgoing relationships to avoid double-counting.
if relInfo.Direction != "out" {
continue
}
rel, exists := relMap[relName]
if !exists {
rel = &types.Relationship{
Type: relName,
Properties: extractAPOCProperties(relInfo.Properties),
}
if len(relInfo.Labels) > 0 {
rel.EndNode = relInfo.Labels[0]
}
rel.StartNode = name
relMap[relName] = rel
}
rel.Count += relInfo.Count
}
}
// Consolidate the relationships from the map into a slice and update stats.
relationships := make([]types.Relationship, 0, len(relMap))
for _, rel := range relMap {
relationships = append(relationships, *rel)
stats.RelationshipsByType[rel.Type] = rel.Count
stats.TotalRelationships += rel.Count
propCount := int64(len(rel.Properties))
stats.PropertiesByRelType[rel.Type] = propCount
stats.TotalProperties += propCount * rel.Count
}
sortAndClean(nodeLabels, relationships, stats)
// Set empty maps and lists to nil for cleaner output.
if len(nodeLabels) == 0 {
nodeLabels = nil
}
if len(relationships) == 0 {
relationships = nil
}
return nodeLabels, relationships, stats
}
// ProcessNonAPOCSchema serves as an alternative to ProcessAPOCSchema for environments
// where APOC procedures are not available. It converts schema data gathered from
// multiple separate, native Cypher queries (providing node counts, property maps, etc.)
// into the same standardized, structured format.
func ProcessNonAPOCSchema(
nodeCounts map[string]int64,
nodePropsMap map[string]map[string]map[string]bool,
relCounts map[string]int64,
relPropsMap map[string]map[string]map[string]bool,
relConnectivity map[string]types.RelConnectivityInfo,
) ([]types.NodeLabel, []types.Relationship, *types.Statistics) {
stats := &types.Statistics{
NodesByLabel: make(map[string]int64),
RelationshipsByType: make(map[string]int64),
PropertiesByLabel: make(map[string]int64),
PropertiesByRelType: make(map[string]int64),
}
// Process node information.
nodeLabels := make([]types.NodeLabel, 0, len(nodeCounts))
for label, count := range nodeCounts {
properties := make([]types.PropertyInfo, 0)
if props, ok := nodePropsMap[label]; ok {
for key, typeSet := range props {
typeList := make([]string, 0, len(typeSet))
for tp := range typeSet {
typeList = append(typeList, tp)
}
sort.Strings(typeList)
properties = append(properties, types.PropertyInfo{Name: key, Types: typeList})
}
}
sort.Slice(properties, func(i, j int) bool { return properties[i].Name < properties[j].Name })
nodeLabels = append(nodeLabels, types.NodeLabel{Name: label, Count: count, Properties: properties})
// Aggregate node statistics.
stats.NodesByLabel[label] = count
stats.TotalNodes += count
propCount := int64(len(properties))
stats.PropertiesByLabel[label] = propCount
stats.TotalProperties += propCount * count
}
// Process relationship information.
relationships := make([]types.Relationship, 0, len(relCounts))
for relType, count := range relCounts {
properties := make([]types.PropertyInfo, 0)
if props, ok := relPropsMap[relType]; ok {
for key, typeSet := range props {
typeList := make([]string, 0, len(typeSet))
for tp := range typeSet {
typeList = append(typeList, tp)
}
sort.Strings(typeList)
properties = append(properties, types.PropertyInfo{Name: key, Types: typeList})
}
}
sort.Slice(properties, func(i, j int) bool { return properties[i].Name < properties[j].Name })
conn := relConnectivity[relType]
relationships = append(relationships, types.Relationship{
Type: relType,
Count: count,
StartNode: conn.StartNode,
EndNode: conn.EndNode,
Properties: properties,
})
// Aggregate relationship statistics.
stats.RelationshipsByType[relType] = count
stats.TotalRelationships += count
propCount := int64(len(properties))
stats.PropertiesByRelType[relType] = propCount
stats.TotalProperties += propCount * count
}
sortAndClean(nodeLabels, relationships, stats)
// Set empty maps and lists to nil for cleaner output.
if len(nodeLabels) == 0 {
nodeLabels = nil
}
if len(relationships) == 0 {
relationships = nil
}
return nodeLabels, relationships, stats
}
// extractAPOCProperties is a helper that converts a map of APOC property
// information into a slice of standardized PropertyInfo structs. The resulting
// slice is sorted by property name for consistent ordering.
func extractAPOCProperties(props map[string]types.APOCProperty) []types.PropertyInfo {
properties := make([]types.PropertyInfo, 0, len(props))
for name, info := range props {
properties = append(properties, types.PropertyInfo{
Name: name,
Types: []string{info.Type},
Indexed: info.Indexed,
Unique: info.Unique,
Mandatory: info.Existence,
})
}
sort.Slice(properties, func(i, j int) bool {
return properties[i].Name < properties[j].Name
})
return properties
}
// sortAndClean performs final processing on the schema data. It sorts node and
// relationship slices for consistent output, primarily by count (descending) and
// secondarily by name/type. It also sets any empty maps in the statistics
// struct to nil, which can simplify downstream serialization (e.g., omitting
// empty fields in JSON).
func sortAndClean(nodeLabels []types.NodeLabel, relationships []types.Relationship, stats *types.Statistics) {
// Sort nodes by count (desc) then name (asc).
sort.Slice(nodeLabels, func(i, j int) bool {
if nodeLabels[i].Count != nodeLabels[j].Count {
return nodeLabels[i].Count > nodeLabels[j].Count
}
return nodeLabels[i].Name < nodeLabels[j].Name
})
// Sort relationships by count (desc) then type (asc).
sort.Slice(relationships, func(i, j int) bool {
if relationships[i].Count != relationships[j].Count {
return relationships[i].Count > relationships[j].Count
}
return relationships[i].Type < relationships[j].Type
})
// Nil out empty maps for cleaner output.
if len(stats.NodesByLabel) == 0 {
stats.NodesByLabel = nil
}
if len(stats.RelationshipsByType) == 0 {
stats.RelationshipsByType = nil
}
if len(stats.PropertiesByLabel) == 0 {
stats.PropertiesByLabel = nil
}
if len(stats.PropertiesByRelType) == 0 {
stats.PropertiesByRelType = nil
}
}

View File

@@ -1,384 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helpers
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jschema/types"
)
func TestHelperFunctions(t *testing.T) {
t.Run("ConvertToStringSlice", func(t *testing.T) {
tests := []struct {
name string
input []any
want []string
}{
{
name: "empty slice",
input: []any{},
want: []string{},
},
{
name: "string values",
input: []any{"a", "b", "c"},
want: []string{"a", "b", "c"},
},
{
name: "mixed types",
input: []any{"string", 123, true, 45.67},
want: []string{"string", "123", "true", "45.67"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ConvertToStringSlice(tt.input)
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("ConvertToStringSlice() mismatch (-want +got):\n%s", diff)
}
})
}
})
t.Run("GetStringValue", func(t *testing.T) {
tests := []struct {
name string
input any
want string
}{
{
name: "nil value",
input: nil,
want: "",
},
{
name: "string value",
input: "test",
want: "test",
},
{
name: "int value",
input: 42,
want: "42",
},
{
name: "bool value",
input: true,
want: "true",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := GetStringValue(tt.input)
if got != tt.want {
t.Errorf("GetStringValue() got %q, want %q", got, tt.want)
}
})
}
})
}
func TestMapToAPOCSchema(t *testing.T) {
tests := []struct {
name string
input map[string]any
want *types.APOCSchemaResult
wantErr bool
}{
{
name: "simple node schema",
input: map[string]any{
"Person": map[string]any{
"type": "node",
"count": int64(150),
"properties": map[string]any{
"name": map[string]any{
"type": "STRING",
"unique": false,
"indexed": true,
"existence": false,
},
},
},
},
want: &types.APOCSchemaResult{
Value: map[string]types.APOCEntity{
"Person": {
Type: "node",
Count: 150,
Properties: map[string]types.APOCProperty{
"name": {
Type: "STRING",
Unique: false,
Indexed: true,
Existence: false,
},
},
},
},
},
wantErr: false,
},
{
name: "empty input",
input: map[string]any{},
want: &types.APOCSchemaResult{Value: map[string]types.APOCEntity{}},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := MapToAPOCSchema(tt.input)
if (err != nil) != tt.wantErr {
t.Errorf("MapToAPOCSchema() error = %v, wantErr %v", err, tt.wantErr)
return
}
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("MapToAPOCSchema() mismatch (-want +got):\n%s", diff)
}
})
}
}
func TestProcessAPOCSchema(t *testing.T) {
tests := []struct {
name string
input *types.APOCSchemaResult
wantNodes []types.NodeLabel
wantRels []types.Relationship
wantStats *types.Statistics
statsAreEmpty bool
}{
{
name: "empty schema",
input: &types.APOCSchemaResult{
Value: map[string]types.APOCEntity{},
},
wantNodes: nil,
wantRels: nil,
statsAreEmpty: true,
},
{
name: "simple node only",
input: &types.APOCSchemaResult{
Value: map[string]types.APOCEntity{
"Person": {
Type: "node",
Count: 100,
Properties: map[string]types.APOCProperty{
"name": {Type: "STRING", Indexed: true},
"age": {Type: "INTEGER"},
},
},
},
},
wantNodes: []types.NodeLabel{
{
Name: "Person",
Count: 100,
Properties: []types.PropertyInfo{
{Name: "age", Types: []string{"INTEGER"}},
{Name: "name", Types: []string{"STRING"}, Indexed: true},
},
},
},
wantRels: nil,
wantStats: &types.Statistics{
NodesByLabel: map[string]int64{"Person": 100},
PropertiesByLabel: map[string]int64{"Person": 2},
TotalNodes: 100,
TotalProperties: 200,
},
},
{
name: "nodes and relationships",
input: &types.APOCSchemaResult{
Value: map[string]types.APOCEntity{
"Person": {
Type: "node",
Count: 100,
Properties: map[string]types.APOCProperty{
"name": {Type: "STRING", Unique: true, Indexed: true, Existence: true},
},
Relationships: map[string]types.APOCRelationshipInfo{
"KNOWS": {
Direction: "out",
Count: 50,
Labels: []string{"Person"},
Properties: map[string]types.APOCProperty{
"since": {Type: "INTEGER"},
},
},
},
},
"Post": {
Type: "node",
Count: 200,
Properties: map[string]types.APOCProperty{"content": {Type: "STRING"}},
},
"FOLLOWS": {Type: "relationship", Count: 80},
},
},
wantNodes: []types.NodeLabel{
{
Name: "Post",
Count: 200,
Properties: []types.PropertyInfo{
{Name: "content", Types: []string{"STRING"}},
},
},
{
Name: "Person",
Count: 100,
Properties: []types.PropertyInfo{
{Name: "name", Types: []string{"STRING"}, Unique: true, Indexed: true, Mandatory: true},
},
},
},
wantRels: []types.Relationship{
{
Type: "KNOWS",
StartNode: "Person",
EndNode: "Person",
Count: 50,
Properties: []types.PropertyInfo{
{Name: "since", Types: []string{"INTEGER"}},
},
},
},
wantStats: &types.Statistics{
NodesByLabel: map[string]int64{"Person": 100, "Post": 200},
RelationshipsByType: map[string]int64{"KNOWS": 50},
PropertiesByLabel: map[string]int64{"Person": 1, "Post": 1},
PropertiesByRelType: map[string]int64{"KNOWS": 1},
TotalNodes: 300,
TotalRelationships: 50,
TotalProperties: 350, // (100*1 + 200*1) for nodes + (50*1) for rels
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotNodes, gotRels, gotStats := ProcessAPOCSchema(tt.input)
if diff := cmp.Diff(tt.wantNodes, gotNodes); diff != "" {
t.Errorf("ProcessAPOCSchema() node labels mismatch (-want +got):\n%s", diff)
}
if diff := cmp.Diff(tt.wantRels, gotRels); diff != "" {
t.Errorf("ProcessAPOCSchema() relationships mismatch (-want +got):\n%s", diff)
}
if tt.statsAreEmpty {
tt.wantStats = &types.Statistics{}
}
if diff := cmp.Diff(tt.wantStats, gotStats); diff != "" {
t.Errorf("ProcessAPOCSchema() statistics mismatch (-want +got):\n%s", diff)
}
})
}
}
func TestProcessNonAPOCSchema(t *testing.T) {
t.Run("full schema processing", func(t *testing.T) {
nodeCounts := map[string]int64{"Person": 10, "City": 5}
nodePropsMap := map[string]map[string]map[string]bool{
"Person": {"name": {"STRING": true}, "age": {"INTEGER": true}},
"City": {"name": {"STRING": true, "TEXT": true}},
}
relCounts := map[string]int64{"LIVES_IN": 8}
relPropsMap := map[string]map[string]map[string]bool{
"LIVES_IN": {"since": {"DATE": true}},
}
relConnectivity := map[string]types.RelConnectivityInfo{
"LIVES_IN": {StartNode: "Person", EndNode: "City", Count: 8},
}
wantNodes := []types.NodeLabel{
{
Name: "Person",
Count: 10,
Properties: []types.PropertyInfo{
{Name: "age", Types: []string{"INTEGER"}},
{Name: "name", Types: []string{"STRING"}},
},
},
{
Name: "City",
Count: 5,
Properties: []types.PropertyInfo{
{Name: "name", Types: []string{"STRING", "TEXT"}},
},
},
}
wantRels := []types.Relationship{
{
Type: "LIVES_IN",
Count: 8,
StartNode: "Person",
EndNode: "City",
Properties: []types.PropertyInfo{
{Name: "since", Types: []string{"DATE"}},
},
},
}
wantStats := &types.Statistics{
TotalNodes: 15,
TotalRelationships: 8,
TotalProperties: 33, // (10*2 + 5*1) for nodes + (8*1) for rels
NodesByLabel: map[string]int64{"Person": 10, "City": 5},
RelationshipsByType: map[string]int64{"LIVES_IN": 8},
PropertiesByLabel: map[string]int64{"Person": 2, "City": 1},
PropertiesByRelType: map[string]int64{"LIVES_IN": 1},
}
gotNodes, gotRels, gotStats := ProcessNonAPOCSchema(nodeCounts, nodePropsMap, relCounts, relPropsMap, relConnectivity)
if diff := cmp.Diff(wantNodes, gotNodes); diff != "" {
t.Errorf("ProcessNonAPOCSchema() nodes mismatch (-want +got):\n%s", diff)
}
if diff := cmp.Diff(wantRels, gotRels); diff != "" {
t.Errorf("ProcessNonAPOCSchema() relationships mismatch (-want +got):\n%s", diff)
}
if diff := cmp.Diff(wantStats, gotStats); diff != "" {
t.Errorf("ProcessNonAPOCSchema() stats mismatch (-want +got):\n%s", diff)
}
})
t.Run("empty schema", func(t *testing.T) {
gotNodes, gotRels, gotStats := ProcessNonAPOCSchema(
map[string]int64{},
map[string]map[string]map[string]bool{},
map[string]int64{},
map[string]map[string]map[string]bool{},
map[string]types.RelConnectivityInfo{},
)
if len(gotNodes) != 0 {
t.Errorf("expected 0 nodes, got %d", len(gotNodes))
}
if len(gotRels) != 0 {
t.Errorf("expected 0 relationships, got %d", len(gotRels))
}
if diff := cmp.Diff(&types.Statistics{}, gotStats); diff != "" {
t.Errorf("ProcessNonAPOCSchema() stats mismatch (-want +got):\n%s", diff)
}
})
}

View File

@@ -1,712 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package neo4jschema
import (
"context"
"fmt"
"sync"
"time"
"github.com/goccy/go-yaml"
"github.com/googleapis/genai-toolbox/internal/sources"
neo4jsc "github.com/googleapis/genai-toolbox/internal/sources/neo4j"
"github.com/googleapis/genai-toolbox/internal/tools"
"github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jschema/cache"
"github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jschema/helpers"
"github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jschema/types"
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
)
// kind defines the unique identifier for this tool.
const kind string = "neo4j-schema"
// init registers the tool with the application's tool registry when the package is initialized.
func init() {
if !tools.Register(kind, newConfig) {
panic(fmt.Sprintf("tool kind %q already registered", kind))
}
}
// newConfig decodes a YAML configuration into a Config struct.
// This function is called by the tool registry to create a new configuration object.
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
actual := Config{Name: name}
if err := decoder.DecodeContext(ctx, &actual); err != nil {
return nil, err
}
return actual, nil
}
// compatibleSource defines the interface a data source must implement to be used by this tool.
// It ensures that the source can provide a Neo4j driver and database name.
type compatibleSource interface {
Neo4jDriver() neo4j.DriverWithContext
Neo4jDatabase() string
}
// Statically verify that our compatible source implementation is valid.
var _ compatibleSource = &neo4jsc.Source{}
// compatibleSources lists the kinds of sources that are compatible with this tool.
var compatibleSources = [...]string{neo4jsc.SourceKind}
// Config holds the configuration settings for the Neo4j schema tool.
// These settings are typically read from a YAML file.
type Config struct {
Name string `yaml:"name" validate:"required"`
Kind string `yaml:"kind" validate:"required"`
Source string `yaml:"source" validate:"required"`
Description string `yaml:"description" validate:"required"`
AuthRequired []string `yaml:"authRequired"`
CacheExpireMinutes *int `yaml:"cacheExpireMinutes,omitempty"` // Cache expiration time in minutes.
}
// Statically verify that Config implements the tools.ToolConfig interface.
var _ tools.ToolConfig = Config{}
// ToolConfigKind returns the kind of this tool configuration.
func (cfg Config) ToolConfigKind() string {
return kind
}
// Initialize sets up the tool with its dependencies and returns a ready-to-use Tool instance.
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
// Verify that the specified source exists.
rawS, ok := srcs[cfg.Source]
if !ok {
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
}
// Verify the source is of a compatible kind.
s, ok := rawS.(compatibleSource)
if !ok {
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
}
parameters := tools.Parameters{}
mcpManifest := tools.McpManifest{
Name: cfg.Name,
Description: cfg.Description,
InputSchema: parameters.McpManifest(),
}
// Set a default cache expiration if not provided in the configuration.
if cfg.CacheExpireMinutes == nil {
defaultExpiration := cache.DefaultExpiration // Default to 60 minutes
cfg.CacheExpireMinutes = &defaultExpiration
}
// Finish tool setup by creating the Tool instance.
t := Tool{
Name: cfg.Name,
Kind: kind,
AuthRequired: cfg.AuthRequired,
Driver: s.Neo4jDriver(),
Database: s.Neo4jDatabase(),
cache: cache.NewCache(),
cacheExpireMinutes: cfg.CacheExpireMinutes,
manifest: tools.Manifest{Description: cfg.Description, Parameters: parameters.Manifest(), AuthRequired: cfg.AuthRequired},
mcpManifest: mcpManifest,
}
return t, nil
}
// Statically verify that Tool implements the tools.Tool interface.
var _ tools.Tool = Tool{}
// Tool represents the Neo4j schema extraction tool.
// It holds the Neo4j driver, database information, and a cache for the schema.
type Tool struct {
Name string `yaml:"name"`
Kind string `yaml:"kind"`
AuthRequired []string `yaml:"authRequired"`
Driver neo4j.DriverWithContext
Database string
cache *cache.Cache
cacheExpireMinutes *int
manifest tools.Manifest
mcpManifest tools.McpManifest
}
// Invoke executes the tool's main logic: fetching the Neo4j schema.
// It first checks the cache for a valid schema before extracting it from the database.
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues) (any, error) {
// Check if a valid schema is already in the cache.
if cachedSchema, ok := t.cache.Get("schema"); ok {
if schema, ok := cachedSchema.(*types.SchemaInfo); ok {
return schema, nil
}
}
// If not cached, extract the schema from the database.
schema, err := t.extractSchema(ctx)
if err != nil {
return nil, fmt.Errorf("failed to extract database schema: %w", err)
}
// Cache the newly extracted schema for future use.
expiration := time.Duration(*t.cacheExpireMinutes) * time.Minute
t.cache.Set("schema", schema, expiration)
return schema, nil
}
// ParseParams is a placeholder as this tool does not require input parameters.
func (t Tool) ParseParams(data map[string]any, claimsMap map[string]map[string]any) (tools.ParamValues, error) {
return tools.ParamValues{}, nil
}
// Manifest returns the tool's manifest, which describes its purpose and parameters.
func (t Tool) Manifest() tools.Manifest {
return t.manifest
}
// McpManifest returns the machine-consumable manifest for the tool.
func (t Tool) McpManifest() tools.McpManifest {
return t.mcpManifest
}
// Authorized checks if the tool is authorized to run based on the provided authentication services.
func (t Tool) Authorized(verifiedAuthServices []string) bool {
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
}
// checkAPOCProcedures verifies if essential APOC procedures are available in the database.
// It returns true only if all required procedures are found.
func (t Tool) checkAPOCProcedures(ctx context.Context) (bool, error) {
proceduresToCheck := []string{"apoc.meta.schema", "apoc.meta.cypher.types"}
session := t.Driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: t.Database})
defer session.Close(ctx)
// This query efficiently counts how many of the specified procedures exist.
query := "SHOW PROCEDURES YIELD name WHERE name IN $procs RETURN count(name) AS procCount"
params := map[string]any{"procs": proceduresToCheck}
result, err := session.Run(ctx, query, params)
if err != nil {
return false, fmt.Errorf("failed to execute procedure check query: %w", err)
}
record, err := result.Single(ctx)
if err != nil {
return false, fmt.Errorf("failed to retrieve single result for procedure check: %w", err)
}
rawCount, found := record.Get("procCount")
if !found {
return false, fmt.Errorf("field 'procCount' not found in result record")
}
procCount, ok := rawCount.(int64)
if !ok {
return false, fmt.Errorf("expected 'procCount' to be of type int64, but got %T", rawCount)
}
// Return true only if the number of found procedures matches the number we were looking for.
return procCount == int64(len(proceduresToCheck)), nil
}
// extractSchema orchestrates the concurrent extraction of different parts of the database schema.
// It runs several extraction tasks in parallel for efficiency.
func (t Tool) extractSchema(ctx context.Context) (*types.SchemaInfo, error) {
schema := &types.SchemaInfo{}
var mu sync.Mutex
// Define the different schema extraction tasks.
tasks := []struct {
name string
fn func() error
}{
{
name: "database-info",
fn: func() error {
dbInfo, err := t.extractDatabaseInfo(ctx)
if err != nil {
return fmt.Errorf("failed to extract database info: %w", err)
}
mu.Lock()
defer mu.Unlock()
schema.DatabaseInfo = *dbInfo
return nil
},
},
{
name: "schema-extraction",
fn: func() error {
// Check if APOC procedures are available.
hasAPOC, err := t.checkAPOCProcedures(ctx)
if err != nil {
return fmt.Errorf("failed to check APOC procedures: %w", err)
}
var nodeLabels []types.NodeLabel
var relationships []types.Relationship
var stats *types.Statistics
// Use APOC if available for a more detailed schema; otherwise, use native queries.
if hasAPOC {
nodeLabels, relationships, stats, err = t.GetAPOCSchema(ctx)
} else {
nodeLabels, relationships, stats, err = t.GetSchemaWithoutAPOC(ctx, 100)
}
if err != nil {
return fmt.Errorf("failed to get schema: %w", err)
}
mu.Lock()
defer mu.Unlock()
schema.NodeLabels = nodeLabels
schema.Relationships = relationships
schema.Statistics = *stats
return nil
},
},
{
name: "constraints",
fn: func() error {
constraints, err := t.extractConstraints(ctx)
if err != nil {
return fmt.Errorf("failed to extract constraints: %w", err)
}
mu.Lock()
defer mu.Unlock()
schema.Constraints = constraints
return nil
},
},
{
name: "indexes",
fn: func() error {
indexes, err := t.extractIndexes(ctx)
if err != nil {
return fmt.Errorf("failed to extract indexes: %w", err)
}
mu.Lock()
defer mu.Unlock()
schema.Indexes = indexes
return nil
},
},
}
var wg sync.WaitGroup
errCh := make(chan error, len(tasks))
// Execute all tasks concurrently.
for _, task := range tasks {
wg.Add(1)
go func(task struct {
name string
fn func() error
}) {
defer wg.Done()
if err := task.fn(); err != nil {
errCh <- err
}
}(task)
}
wg.Wait()
close(errCh)
// Collect any errors that occurred during the concurrent tasks.
for err := range errCh {
if err != nil {
schema.Errors = append(schema.Errors, err.Error())
}
}
return schema, nil
}
// GetAPOCSchema extracts schema information using the APOC library, which provides detailed metadata.
func (t Tool) GetAPOCSchema(ctx context.Context) ([]types.NodeLabel, []types.Relationship, *types.Statistics, error) {
var nodeLabels []types.NodeLabel
var relationships []types.Relationship
stats := &types.Statistics{
NodesByLabel: make(map[string]int64),
RelationshipsByType: make(map[string]int64),
PropertiesByLabel: make(map[string]int64),
PropertiesByRelType: make(map[string]int64),
}
var mu sync.Mutex
var firstErr error
ctx, cancel := context.WithCancel(ctx)
defer cancel()
handleError := func(err error) {
mu.Lock()
defer mu.Unlock()
if firstErr == nil {
firstErr = err
cancel() // Cancel other operations on the first error.
}
}
tasks := []struct {
name string
fn func(session neo4j.SessionWithContext) error
}{
{
name: "apoc-schema",
fn: func(session neo4j.SessionWithContext) error {
result, err := session.Run(ctx, "CALL apoc.meta.schema({sample: 10}) YIELD value RETURN value", nil)
if err != nil {
return fmt.Errorf("failed to run APOC schema query: %w", err)
}
if !result.Next(ctx) {
return fmt.Errorf("no results from APOC schema query")
}
schemaMap, ok := result.Record().Values[0].(map[string]any)
if !ok {
return fmt.Errorf("unexpected result format from APOC schema query: %T", result.Record().Values[0])
}
apocSchema, err := helpers.MapToAPOCSchema(schemaMap)
if err != nil {
return fmt.Errorf("failed to convert schema map to APOCSchemaResult: %w", err)
}
nodes, _, apocStats := helpers.ProcessAPOCSchema(apocSchema)
mu.Lock()
defer mu.Unlock()
nodeLabels = nodes
stats.TotalNodes = apocStats.TotalNodes
stats.TotalProperties += apocStats.TotalProperties
stats.NodesByLabel = apocStats.NodesByLabel
stats.PropertiesByLabel = apocStats.PropertiesByLabel
return nil
},
},
{
name: "apoc-relationships",
fn: func(session neo4j.SessionWithContext) error {
query := `
MATCH (startNode)-[rel]->(endNode)
WITH
labels(startNode)[0] AS startNode,
type(rel) AS relType,
apoc.meta.cypher.types(rel) AS relProperties,
labels(endNode)[0] AS endNode,
count(*) AS count
RETURN relType, startNode, endNode, relProperties, count`
result, err := session.Run(ctx, query, nil)
if err != nil {
return fmt.Errorf("failed to extract relationships: %w", err)
}
for result.Next(ctx) {
record := result.Record()
relType, startNode, endNode := record.Values[0].(string), record.Values[1].(string), record.Values[2].(string)
properties, count := record.Values[3].(map[string]any), record.Values[4].(int64)
if relType == "" || count == 0 {
continue
}
relationship := types.Relationship{Type: relType, StartNode: startNode, EndNode: endNode, Count: count, Properties: []types.PropertyInfo{}}
for prop, propType := range properties {
relationship.Properties = append(relationship.Properties, types.PropertyInfo{Name: prop, Types: []string{propType.(string)}})
}
mu.Lock()
relationships = append(relationships, relationship)
stats.RelationshipsByType[relType] += count
stats.TotalRelationships += count
propCount := int64(len(relationship.Properties))
stats.TotalProperties += propCount
stats.PropertiesByRelType[relType] += propCount
mu.Unlock()
}
mu.Lock()
defer mu.Unlock()
if len(stats.RelationshipsByType) == 0 {
stats.RelationshipsByType = nil
}
if len(stats.PropertiesByRelType) == 0 {
stats.PropertiesByRelType = nil
}
return nil
},
},
}
var wg sync.WaitGroup
wg.Add(len(tasks))
for _, task := range tasks {
go func(task struct {
name string
fn func(session neo4j.SessionWithContext) error
}) {
defer wg.Done()
session := t.Driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: t.Database})
defer session.Close(ctx)
if err := task.fn(session); err != nil {
handleError(fmt.Errorf("task %s failed: %w", task.name, err))
}
}(task)
}
wg.Wait()
if firstErr != nil {
return nil, nil, nil, firstErr
}
return nodeLabels, relationships, stats, nil
}
// GetSchemaWithoutAPOC extracts schema information using native Cypher queries.
// This serves as a fallback for databases without APOC installed.
func (t Tool) GetSchemaWithoutAPOC(ctx context.Context, sampleSize int) ([]types.NodeLabel, []types.Relationship, *types.Statistics, error) {
nodePropsMap := make(map[string]map[string]map[string]bool)
relPropsMap := make(map[string]map[string]map[string]bool)
nodeCounts := make(map[string]int64)
relCounts := make(map[string]int64)
relConnectivity := make(map[string]types.RelConnectivityInfo)
var mu sync.Mutex
var firstErr error
ctx, cancel := context.WithCancel(ctx)
defer cancel()
handleError := func(err error) {
mu.Lock()
defer mu.Unlock()
if firstErr == nil {
firstErr = err
cancel()
}
}
tasks := []struct {
name string
fn func(session neo4j.SessionWithContext) error
}{
{
name: "node-schema",
fn: func(session neo4j.SessionWithContext) error {
countResult, err := session.Run(ctx, `MATCH (n) UNWIND labels(n) AS label RETURN label, count(*) AS count ORDER BY count DESC`, nil)
if err != nil {
return fmt.Errorf("node count query failed: %w", err)
}
var labelsList []string
mu.Lock()
for countResult.Next(ctx) {
record := countResult.Record()
label, count := record.Values[0].(string), record.Values[1].(int64)
nodeCounts[label] = count
labelsList = append(labelsList, label)
}
mu.Unlock()
if err = countResult.Err(); err != nil {
return fmt.Errorf("node count result error: %w", err)
}
for _, label := range labelsList {
propQuery := fmt.Sprintf(`MATCH (n:%s) WITH n LIMIT $sampleSize UNWIND keys(n) AS key WITH key, n[key] AS value WHERE value IS NOT NULL RETURN key, COLLECT(DISTINCT valueType(value)) AS types`, label)
propResult, err := session.Run(ctx, propQuery, map[string]any{"sampleSize": sampleSize})
if err != nil {
return fmt.Errorf("node properties query for label %s failed: %w", label, err)
}
mu.Lock()
if nodePropsMap[label] == nil {
nodePropsMap[label] = make(map[string]map[string]bool)
}
for propResult.Next(ctx) {
record := propResult.Record()
key, types := record.Values[0].(string), record.Values[1].([]any)
if nodePropsMap[label][key] == nil {
nodePropsMap[label][key] = make(map[string]bool)
}
for _, tp := range types {
nodePropsMap[label][key][tp.(string)] = true
}
}
mu.Unlock()
if err = propResult.Err(); err != nil {
return fmt.Errorf("node properties result error for label %s: %w", label, err)
}
}
return nil
},
},
{
name: "relationship-schema",
fn: func(session neo4j.SessionWithContext) error {
relQuery := `
MATCH (start)-[r]->(end)
WITH type(r) AS relType, labels(start) AS startLabels, labels(end) AS endLabels, count(*) AS count
RETURN relType, CASE WHEN size(startLabels) > 0 THEN startLabels[0] ELSE null END AS startLabel, CASE WHEN size(endLabels) > 0 THEN endLabels[0] ELSE null END AS endLabel, sum(count) AS totalCount
ORDER BY totalCount DESC`
relResult, err := session.Run(ctx, relQuery, nil)
if err != nil {
return fmt.Errorf("relationship count query failed: %w", err)
}
var relTypesList []string
mu.Lock()
for relResult.Next(ctx) {
record := relResult.Record()
relType := record.Values[0].(string)
startLabel := ""
if record.Values[1] != nil {
startLabel = record.Values[1].(string)
}
endLabel := ""
if record.Values[2] != nil {
endLabel = record.Values[2].(string)
}
count := record.Values[3].(int64)
relCounts[relType] = count
relTypesList = append(relTypesList, relType)
if existing, ok := relConnectivity[relType]; !ok || count > existing.Count {
relConnectivity[relType] = types.RelConnectivityInfo{StartNode: startLabel, EndNode: endLabel, Count: count}
}
}
mu.Unlock()
if err = relResult.Err(); err != nil {
return fmt.Errorf("relationship count result error: %w", err)
}
for _, relType := range relTypesList {
propQuery := fmt.Sprintf(`MATCH ()-[r:%s]->() WITH r LIMIT $sampleSize WHERE size(keys(r)) > 0 UNWIND keys(r) AS key WITH key, r[key] AS value WHERE value IS NOT NULL RETURN key, COLLECT(DISTINCT valueType(value)) AS types`, relType)
propResult, err := session.Run(ctx, propQuery, map[string]any{"sampleSize": sampleSize})
if err != nil {
return fmt.Errorf("relationship properties query for type %s failed: %w", relType, err)
}
mu.Lock()
if relPropsMap[relType] == nil {
relPropsMap[relType] = make(map[string]map[string]bool)
}
for propResult.Next(ctx) {
record := propResult.Record()
key, propTypes := record.Values[0].(string), record.Values[1].([]any)
if relPropsMap[relType][key] == nil {
relPropsMap[relType][key] = make(map[string]bool)
}
for _, t := range propTypes {
relPropsMap[relType][key][t.(string)] = true
}
}
mu.Unlock()
if err = propResult.Err(); err != nil {
return fmt.Errorf("relationship properties result error for type %s: %w", relType, err)
}
}
return nil
},
},
}
var wg sync.WaitGroup
wg.Add(len(tasks))
for _, task := range tasks {
go func(task struct {
name string
fn func(session neo4j.SessionWithContext) error
}) {
defer wg.Done()
session := t.Driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: t.Database})
defer session.Close(ctx)
if err := task.fn(session); err != nil {
handleError(fmt.Errorf("task %s failed: %w", task.name, err))
}
}(task)
}
wg.Wait()
if firstErr != nil {
return nil, nil, nil, firstErr
}
nodeLabels, relationships, stats := helpers.ProcessNonAPOCSchema(nodeCounts, nodePropsMap, relCounts, relPropsMap, relConnectivity)
return nodeLabels, relationships, stats, nil
}
// extractDatabaseInfo retrieves general information about the Neo4j database instance.
func (t Tool) extractDatabaseInfo(ctx context.Context) (*types.DatabaseInfo, error) {
session := t.Driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: t.Database})
defer session.Close(ctx)
result, err := session.Run(ctx, "CALL dbms.components() YIELD name, versions, edition", nil)
if err != nil {
return nil, err
}
dbInfo := &types.DatabaseInfo{}
if result.Next(ctx) {
record := result.Record()
dbInfo.Name = record.Values[0].(string)
if versions, ok := record.Values[1].([]any); ok && len(versions) > 0 {
dbInfo.Version = versions[0].(string)
}
dbInfo.Edition = record.Values[2].(string)
}
return dbInfo, result.Err()
}
// extractConstraints fetches all schema constraints from the database.
func (t Tool) extractConstraints(ctx context.Context) ([]types.Constraint, error) {
session := t.Driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: t.Database})
defer session.Close(ctx)
result, err := session.Run(ctx, "SHOW CONSTRAINTS", nil)
if err != nil {
return nil, err
}
var constraints []types.Constraint
for result.Next(ctx) {
record := result.Record().AsMap()
constraint := types.Constraint{
Name: helpers.GetStringValue(record["name"]),
Type: helpers.GetStringValue(record["type"]),
EntityType: helpers.GetStringValue(record["entityType"]),
}
if labels, ok := record["labelsOrTypes"].([]any); ok && len(labels) > 0 {
constraint.Label = labels[0].(string)
}
if props, ok := record["properties"].([]any); ok {
constraint.Properties = helpers.ConvertToStringSlice(props)
}
constraints = append(constraints, constraint)
}
return constraints, result.Err()
}
// extractIndexes fetches all schema indexes from the database.
func (t Tool) extractIndexes(ctx context.Context) ([]types.Index, error) {
session := t.Driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: t.Database})
defer session.Close(ctx)
result, err := session.Run(ctx, "SHOW INDEXES", nil)
if err != nil {
return nil, err
}
var indexes []types.Index
for result.Next(ctx) {
record := result.Record().AsMap()
index := types.Index{
Name: helpers.GetStringValue(record["name"]),
State: helpers.GetStringValue(record["state"]),
Type: helpers.GetStringValue(record["type"]),
EntityType: helpers.GetStringValue(record["entityType"]),
}
if labels, ok := record["labelsOrTypes"].([]any); ok && len(labels) > 0 {
index.Label = labels[0].(string)
}
if props, ok := record["properties"].([]any); ok {
index.Properties = helpers.ConvertToStringSlice(props)
}
indexes = append(indexes, index)
}
return indexes, result.Err()
}

View File

@@ -1,99 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package neo4jschema
import (
"testing"
"github.com/goccy/go-yaml"
"github.com/google/go-cmp/cmp"
"github.com/googleapis/genai-toolbox/internal/server"
"github.com/googleapis/genai-toolbox/internal/testutils"
)
func TestParseFromYamlNeo4j(t *testing.T) {
ctx, err := testutils.ContextWithNewLogger()
exp := 30
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
tcs := []struct {
desc string
in string
want server.ToolConfigs
}{
{
desc: "basic example with default cache expiration",
in: `
tools:
example_tool:
kind: neo4j-schema
source: my-neo4j-instance
description: some tool description
authRequired:
- my-google-auth-service
- other-auth-service
`,
want: server.ToolConfigs{
"example_tool": Config{
Name: "example_tool",
Kind: "neo4j-schema",
Source: "my-neo4j-instance",
Description: "some tool description",
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
CacheExpireMinutes: nil,
},
},
},
{
desc: "cache expire minutes set explicitly",
in: `
tools:
example_tool:
kind: neo4j-schema
source: my-neo4j-instance
description: some tool description
cacheExpireMinutes: 30
`,
want: server.ToolConfigs{
"example_tool": Config{
Name: "example_tool",
Kind: "neo4j-schema",
Source: "my-neo4j-instance",
Description: "some tool description",
AuthRequired: []string{}, // Expect an empty slice, not nil.
CacheExpireMinutes: &exp,
},
},
},
}
for _, tc := range tcs {
t.Run(tc.desc, func(t *testing.T) {
got := struct {
Tools server.ToolConfigs `yaml:"tools"`
}{}
// Parse contents
err = yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
if err != nil {
t.Fatalf("unable to unmarshal: %s", err)
}
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
t.Fatalf("incorrect parse: diff %v", diff)
}
})
}
}

View File

@@ -1,127 +0,0 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package types contains the shared data structures for Neo4j schema representation.
package types
// SchemaInfo represents the complete database schema.
type SchemaInfo struct {
NodeLabels []NodeLabel `json:"nodeLabels"`
Relationships []Relationship `json:"relationships"`
Constraints []Constraint `json:"constraints"`
Indexes []Index `json:"indexes"`
DatabaseInfo DatabaseInfo `json:"databaseInfo"`
Statistics Statistics `json:"statistics"`
Errors []string `json:"errors,omitempty"`
}
// NodeLabel represents a node label with its properties.
type NodeLabel struct {
Name string `json:"name"`
Properties []PropertyInfo `json:"properties"`
Count int64 `json:"count"`
}
// RelConnectivityInfo holds information about a relationship's start and end nodes,
// primarily used during schema extraction without APOC procedures.
type RelConnectivityInfo struct {
StartNode string
EndNode string
Count int64
}
// Relationship represents a relationship type with its properties.
type Relationship struct {
Type string `json:"type"`
Properties []PropertyInfo `json:"properties"`
StartNode string `json:"startNode,omitempty"`
EndNode string `json:"endNode,omitempty"`
Count int64 `json:"count"`
}
// PropertyInfo represents a property with its data types.
type PropertyInfo struct {
Name string `json:"name"`
Types []string `json:"types"`
Mandatory bool `json:"-"`
Unique bool `json:"-"`
Indexed bool `json:"-"`
}
// Constraint represents a database constraint.
type Constraint struct {
Name string `json:"name"`
Type string `json:"type"`
EntityType string `json:"entityType"`
Label string `json:"label,omitempty"`
Properties []string `json:"properties"`
}
// Index represents a database index.
type Index struct {
Name string `json:"name"`
State string `json:"state"`
Type string `json:"type"`
EntityType string `json:"entityType"`
Label string `json:"label,omitempty"`
Properties []string `json:"properties"`
}
// DatabaseInfo contains general database information.
type DatabaseInfo struct {
Name string `json:"name"`
Version string `json:"version"`
Edition string `json:"edition,omitempty"`
}
// Statistics contains database statistics.
type Statistics struct {
TotalNodes int64 `json:"totalNodes"`
TotalRelationships int64 `json:"totalRelationships"`
TotalProperties int64 `json:"totalProperties"`
NodesByLabel map[string]int64 `json:"nodesByLabel"`
RelationshipsByType map[string]int64 `json:"relationshipsByType"`
PropertiesByLabel map[string]int64 `json:"propertiesByLabel"`
PropertiesByRelType map[string]int64 `json:"propertiesByRelType"`
}
// APOCSchemaResult represents the result from apoc.meta.schema().
type APOCSchemaResult struct {
Value map[string]APOCEntity `json:"value"`
}
// APOCEntity represents a node or relationship in APOC schema.
type APOCEntity struct {
Type string `json:"type"`
Count int64 `json:"count"`
Labels []string `json:"labels,omitempty"`
Properties map[string]APOCProperty `json:"properties"`
Relationships map[string]APOCRelationshipInfo `json:"relationships,omitempty"`
}
// APOCProperty represents property info from APOC.
type APOCProperty struct {
Type string `json:"type"`
Indexed bool `json:"indexed"`
Unique bool `json:"unique"`
Existence bool `json:"existence"`
}
// APOCRelationshipInfo represents relationship info from APOC.
type APOCRelationshipInfo struct {
Count int64 `json:"count"`
Direction string `json:"direction"`
Labels []string `json:"labels"`
Properties map[string]APOCProperty `json:"properties"`
}

View File

@@ -790,15 +790,6 @@ func (p *FloatParameter) Manifest() ParameterManifest {
}
}
// McpManifest returns the MCP manifest for the FloatParameter.
// json schema only allow numeric types of 'integer' and 'number'.
func (p *FloatParameter) McpManifest() ParameterMcpManifest {
return ParameterMcpManifest{
Type: "number",
Description: p.Desc,
}
}
// NewBooleanParameter is a convenience function for initializing a BooleanParameter.
func NewBooleanParameter(name string, desc string) *BooleanParameter {
return &BooleanParameter{

View File

@@ -1327,7 +1327,7 @@ func TestParamMcpManifest(t *testing.T) {
{
name: "float",
in: tools.NewFloatParameter("foo-float", "bar"),
want: tools.ParameterMcpManifest{Type: "number", Description: "bar"},
want: tools.ParameterMcpManifest{Type: "float", Description: "bar"},
},
{
name: "boolean",
@@ -1385,7 +1385,6 @@ func TestMcpManifest(t *testing.T) {
tools.NewStringParameterWithDefault("foo-string", "foo", "bar"),
tools.NewStringParameter("foo-string2", "bar"),
tools.NewIntParameter("foo-int2", "bar"),
tools.NewFloatParameter("foo-float", "bar"),
tools.NewArrayParameter("foo-array2", "bar", tools.NewStringParameter("foo-string", "bar")),
tools.NewMapParameter("foo-map-int", "a map of ints", "integer"),
tools.NewMapParameter("foo-map-any", "a map of any", ""),
@@ -1396,7 +1395,6 @@ func TestMcpManifest(t *testing.T) {
"foo-string": {Type: "string", Description: "bar"},
"foo-string2": {Type: "string", Description: "bar"},
"foo-int2": {Type: "integer", Description: "bar"},
"foo-float": {Type: "number", Description: "bar"},
"foo-array2": {
Type: "array",
Description: "bar",
@@ -1413,7 +1411,7 @@ func TestMcpManifest(t *testing.T) {
AdditionalProperties: true,
},
},
Required: []string{"foo-string2", "foo-int2", "foo-float", "foo-array2", "foo-map-int", "foo-map-any"},
Required: []string{"foo-string2", "foo-int2", "foo-array2", "foo-map-int", "foo-map-any"},
},
},
}

Some files were not shown because too many files have changed in this diff Show More