mirror of
https://github.com/googleapis/genai-toolbox.git
synced 2026-01-12 17:09:48 -05:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7e4c12915 | ||
|
|
5530b08f34 | ||
|
|
e515d9254f | ||
|
|
d661f5343f | ||
|
|
6c8460b0e5 | ||
|
|
71f360d315 | ||
|
|
33beb7187d | ||
|
|
4f46782927 | ||
|
|
bf6831fdbe | ||
|
|
bd195d2fe2 | ||
|
|
ec0d3a6eb3 | ||
|
|
81d239b053 | ||
|
|
0cd3f16f87 | ||
|
|
aa3972470f | ||
|
|
36d79ef147 | ||
|
|
302faf2513 | ||
|
|
22cf228b88 | ||
|
|
794ad91885 | ||
|
|
b5f9780a59 | ||
|
|
cce602f280 | ||
|
|
70e832bd08 | ||
|
|
05b14a8824 | ||
|
|
664711f4b3 |
@@ -620,6 +620,25 @@ steps:
|
||||
trino \
|
||||
trinosql trinoexecutesql
|
||||
|
||||
- id: "yugabytedb"
|
||||
name: golang:1
|
||||
waitFor: ["compile-test-binary"]
|
||||
entrypoint: /bin/bash
|
||||
env:
|
||||
- "GOPATH=/gopath"
|
||||
- "YUGABYTEDB_DATABASE=$_YUGABYTEDB_DATABASE"
|
||||
- "YUGABYTEDB_PORT=$_YUGABYTEDB_PORT"
|
||||
- "YUGABYTEDB_LOADBALANCE=$_YUGABYTEDB_LOADBALANCE"
|
||||
- "SERVICE_ACCOUNT_EMAIL=$SERVICE_ACCOUNT_EMAIL"
|
||||
secretEnv: ["YUGABYTEDB_USER", "YUGABYTEDB_PASS", "YUGABYTEDB_HOST", "CLIENT_ID"]
|
||||
volumes:
|
||||
- name: "go"
|
||||
path: "/gopath"
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
./yugabytedb.test -test.v
|
||||
|
||||
availableSecrets:
|
||||
secretManager:
|
||||
- versionName: projects/$PROJECT_ID/secrets/cloud_sql_pg_user/versions/latest
|
||||
@@ -698,6 +717,13 @@ availableSecrets:
|
||||
env: OCEANBASE_USER
|
||||
- versionName: projects/$PROJECT_ID/secrets/oceanbase_pass/versions/latest
|
||||
env: OCEANBASE_PASSWORD
|
||||
- versionName: projects/$PROJECT_ID/secrets/yugabytedb_host/versions/latest
|
||||
env: YUGABYTEDB_HOST
|
||||
- versionName: projects/$PROJECT_ID/secrets/yugabytedb_user/versions/latest
|
||||
env: YUGABYTEDB_USER
|
||||
- versionName: projects/$PROJECT_ID/secrets/yugabytedb_pass/versions/latest
|
||||
env: YUGABYTEDB_PASS
|
||||
|
||||
|
||||
options:
|
||||
logging: CLOUD_LOGGING_ONLY
|
||||
@@ -744,3 +770,6 @@ substitutions:
|
||||
_TRINO_SCHEMA: "default"
|
||||
_OCEANBASE_PORT: "2883"
|
||||
_OCEANBASE_DATABASE: "oceanbase"
|
||||
_YUGABYTEDB_DATABASE: "yugabyte"
|
||||
_YUGABYTEDB_PORT: "5433"
|
||||
_YUGABYTEDB_LOADBALANCE: "false"
|
||||
|
||||
17
.github/blunderbuss.yml
vendored
17
.github/blunderbuss.yml
vendored
@@ -1,7 +1,24 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
assign_issues:
|
||||
- Yuan325
|
||||
- duwenxin99
|
||||
- averikitsch
|
||||
- anubhav756
|
||||
- dishaprakash
|
||||
- twishabansal
|
||||
assign_issues_by:
|
||||
- labels:
|
||||
- 'product: bigquery'
|
||||
|
||||
4
.github/labels.yaml
vendored
4
.github/labels.yaml
vendored
@@ -88,6 +88,10 @@
|
||||
color: 8befd7
|
||||
description: 'Status: reviewer is awaiting feedback or responses from the author before proceeding.'
|
||||
|
||||
- name: 'release candidate'
|
||||
color: 32CD32
|
||||
description: 'Use label to signal PR should be included in the next release.'
|
||||
|
||||
# Product Labels
|
||||
- name: 'product: bigquery'
|
||||
color: 5065c7
|
||||
|
||||
@@ -89,6 +89,9 @@ implementation](https://github.com/googleapis/genai-toolbox/blob/main/internal/s
|
||||
|
||||
### Adding a New Tool
|
||||
|
||||
> [!NOTE]
|
||||
> Please follow the tool naming convention detailed [here](./DEVELOPER.md#tool-naming-conventions).
|
||||
|
||||
We recommend looking at an [example tool
|
||||
implementation](https://github.com/googleapis/genai-toolbox/tree/main/internal/tools/postgres/postgressql).
|
||||
|
||||
|
||||
41
DEVELOPER.md
41
DEVELOPER.md
@@ -44,6 +44,47 @@ Before you begin, ensure you have the following:
|
||||
curl http://127.0.0.1:5000
|
||||
```
|
||||
|
||||
### Tool Naming Conventions
|
||||
|
||||
This section details the purpose and conventions for MCP Toolbox's tools naming
|
||||
properties, **tool name** and **tool kind**.
|
||||
|
||||
```
|
||||
cancel_hotel: <- tool name
|
||||
kind: postgres-sql <- tool kind
|
||||
source: my_pg_source
|
||||
```
|
||||
|
||||
#### Tool Name
|
||||
|
||||
Tool name is the identifier used by a Large Language Model (LLM) to invoke a
|
||||
specific tool.
|
||||
* Custom tools: The user can define any name they want. The below guidelines
|
||||
do not apply.
|
||||
* Pre-built tools: The tool name is predefined and cannot be changed. It
|
||||
should follow the guidelines.
|
||||
|
||||
The following guidelines apply to tool names:
|
||||
* Should use underscores over hyphens (e.g., `list_collections` instead of
|
||||
`list-collections`).
|
||||
* Should not have the product name in the name (e.g., `list_collections` instead
|
||||
of `firestore_list_collections`).
|
||||
* Superficial changes are NOT considered as breaking (e.g., changing tool name).
|
||||
* Non-superficial changes MAY be considered breaking (e.g. adding new parameters
|
||||
to a function) until they can be validated through extensive testing to ensure
|
||||
they do not negatively impact agent's performances.
|
||||
|
||||
#### Tool Kind
|
||||
|
||||
Tool kind serves as a category or type that a user can assign to a tool.
|
||||
|
||||
The following guidelines apply to tool kinds:
|
||||
* Should user hyphens over underscores (e.g. `firestore-list-collections` or
|
||||
`firestore_list_colelctions`).
|
||||
* Should use product name in name (e.g. `firestore-list-collections` over
|
||||
`list-collections`).
|
||||
* Changes to tool kind are breaking changes and should be avoided.
|
||||
|
||||
## Testing
|
||||
|
||||
### Infrastructure
|
||||
|
||||
11
cmd/root.go
11
cmd/root.go
@@ -43,6 +43,7 @@ import (
|
||||
|
||||
// Import tool packages for side effect of registration
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydbainl"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryanalyzecontribution"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryconversationalanalytics"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryforecast"
|
||||
@@ -53,7 +54,9 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigquerysql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigtable"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhouseexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhouselistdatabases"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhousesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudmonitoring"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/couchbase"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/dataplex/dataplexlookupentry"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/dataplex/dataplexsearchaspecttypes"
|
||||
@@ -66,6 +69,7 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestoregetdocuments"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestoregetrules"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestorelistcollections"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestorequery"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestorequerycollection"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestoreupdatedocument"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/firestore/firestorevalidaterules"
|
||||
@@ -97,6 +101,7 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mssql/mssqlexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mssql/mssqlsql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqllisttables"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlsql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jcypher"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jexecutecypher"
|
||||
@@ -104,6 +109,7 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/oceanbase/oceanbaseexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/oceanbase/oceanbasesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgresexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslisttables"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgressql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/redis"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/spanner/spannerexecutesql"
|
||||
@@ -116,13 +122,17 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/utility/alloydbwaitforoperation"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/utility/wait"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/valkey"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/yugabytedbsql"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/alloydbpg"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/bigquery"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/bigtable"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/clickhouse"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudmonitoring"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqladmin"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmssql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmysql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
|
||||
@@ -145,6 +155,7 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/tidb"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/trino"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/valkey"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/yugabytedb"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -1359,7 +1359,7 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"bigquery-database-tools": tools.ToolsetConfig{
|
||||
Name: "bigquery-database-tools",
|
||||
ToolNames: []string{"ask_data_insights", "execute_sql", "forecast", "get_dataset_info", "get_table_info", "list_dataset_ids", "list_table_ids"},
|
||||
ToolNames: []string{"analyze_contribution", "ask_data_insights", "execute_sql", "forecast", "get_dataset_info", "get_table_info", "list_dataset_ids", "list_table_ids"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1369,7 +1369,7 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"clickhouse-database-tools": tools.ToolsetConfig{
|
||||
Name: "clickhouse-database-tools",
|
||||
ToolNames: []string{"execute_sql"},
|
||||
ToolNames: []string{"execute_sql", "list_databases"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -3,8 +3,8 @@ module genai-quickstart
|
||||
go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.2.0
|
||||
google.golang.org/genai v1.21.0
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.3.0
|
||||
google.golang.org/genai v1.23.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
@@ -4,7 +4,7 @@ go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/firebase/genkit/go v0.6.2
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.2.0
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
@@ -3,7 +3,7 @@ module langchan-quickstart
|
||||
go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.2.0
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.3.0
|
||||
github.com/tmc/langchaingo v0.1.13
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ module openai-quickstart
|
||||
go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.2.0
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.3.0
|
||||
github.com/openai/openai-go v1.12.0
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import { ToolboxClient } from "@toolbox-sdk/core";
|
||||
|
||||
|
||||
const TOOLBOX_URL = "http://127.0.0.1:5000"; // Update if needed
|
||||
const GOOGLE_API_KEY = 'enter your api here'; // Replace it with your API key
|
||||
const GOOGLE_API_KEY = process.env.GOOGLE_API_KEY || 'your-api-key'; // Replace it with your API key
|
||||
|
||||
const prompt = `
|
||||
You're a helpful hotel assistant. You handle hotel searching, booking, and
|
||||
|
||||
@@ -2,8 +2,7 @@ import { ToolboxClient } from "@toolbox-sdk/core";
|
||||
import { genkit } from "genkit";
|
||||
import { googleAI } from '@genkit-ai/googleai';
|
||||
|
||||
// Replace it with your API key
|
||||
process.env.GOOGLE_API_KEY = 'your-api-key';
|
||||
const GOOGLE_API_KEY = process.env.GOOGLE_API_KEY || 'your-api-key'; // Replace it with your API key
|
||||
|
||||
const systemPrompt = `
|
||||
You're a helpful hotel assistant. You handle hotel searching, booking, and
|
||||
@@ -28,7 +27,7 @@ async function main() {
|
||||
const ai = genkit({
|
||||
plugins: [
|
||||
googleAI({
|
||||
apiKey: process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY
|
||||
apiKey: process.env.GEMINI_API_KEY || GOOGLE_API_KEY
|
||||
})
|
||||
],
|
||||
model: googleAI.model('gemini-2.0-flash'),
|
||||
@@ -86,4 +85,4 @@ async function main() {
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
main();
|
||||
|
||||
@@ -4,8 +4,7 @@ import { tool } from "@langchain/core/tools";
|
||||
import { createReactAgent } from "@langchain/langgraph/prebuilt";
|
||||
import { MemorySaver } from "@langchain/langgraph";
|
||||
|
||||
// Replace it with your API key
|
||||
process.env.GOOGLE_API_KEY = 'your-api-key';
|
||||
const GOOGLE_API_KEY = process.env.GOOGLE_API_KEY || 'your-api-key'; // Replace it with your API key
|
||||
|
||||
const prompt = `
|
||||
You're a helpful hotel assistant. You handle hotel searching, booking, and
|
||||
|
||||
@@ -4,7 +4,7 @@ import { createMemory, staticBlock, tool } from "llamaindex";
|
||||
import { ToolboxClient } from "@toolbox-sdk/core";
|
||||
|
||||
const TOOLBOX_URL = "http://127.0.0.1:5000"; // Update if needed
|
||||
process.env.GOOGLE_API_KEY = 'your-api-key'; // Replace it with your API key
|
||||
const GOOGLE_API_KEY = process.env.GOOGLE_API_KEY || 'your-api-key'; // Replace it with your API key
|
||||
|
||||
const prompt = `
|
||||
|
||||
@@ -40,7 +40,7 @@ async function main() {
|
||||
// Initialize LLM
|
||||
const llm = gemini({
|
||||
model: GEMINI_MODEL.GEMINI_2_0_FLASH,
|
||||
apiKey: process.env.GOOGLE_API_KEY,
|
||||
apiKey: GOOGLE_API_KEY,
|
||||
});
|
||||
|
||||
const memory = createMemory({
|
||||
|
||||
@@ -19,10 +19,9 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `ALLOYDB_POSTGRES_CLUSTER`: The ID of your AlloyDB cluster.
|
||||
* `ALLOYDB_POSTGRES_INSTANCE`: The ID of your AlloyDB instance.
|
||||
* `ALLOYDB_POSTGRES_DATABASE`: The name of the database to connect to.
|
||||
* `ALLOYDB_POSTGRES_USER`: The database username. Defaults to IAM authentication if unspecified.
|
||||
* `ALLOYDB_POSTGRES_PASSWORD`: The password for the database user. Defaults to IAM authentication if unspecified.
|
||||
* `ALLOYDB_POSTGRES_IP_TYPE`: The IP type i.e. "Public
|
||||
or "Private" (Default: Public).
|
||||
* `ALLOYDB_POSTGRES_USER`: (Optional) The database username. Defaults to IAM authentication if unspecified.
|
||||
* `ALLOYDB_POSTGRES_PASSWORD`: (Optional) The password for the database user. Defaults to IAM authentication if unspecified.
|
||||
* `ALLOYDB_POSTGRES_IP_TYPE`: (Optional) The IP type i.e. "Public" or "Private" (Default: Public).
|
||||
* **Permissions:**
|
||||
* **AlloyDB Client** (`roles/alloydb.client`) to connect to the instance.
|
||||
* Database-level permissions (e.g., `SELECT`, `INSERT`) are required to execute queries.
|
||||
@@ -51,6 +50,7 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `--prebuilt` value: `bigquery`
|
||||
* **Environment Variables:**
|
||||
* `BIGQUERY_PROJECT`: The GCP project ID.
|
||||
* `BIGQUERY_LOCATION`: (Optional) The dataset location.
|
||||
* **Permissions:**
|
||||
* **BigQuery User** (`roles/bigquery.user`) to execute queries and view metadata.
|
||||
* **BigQuery Metadata Viewer** (`roles/bigquery.metadataViewer`) to view all datasets.
|
||||
@@ -75,6 +75,8 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `CLOUD_SQL_MYSQL_DATABASE`: The name of the database to connect to.
|
||||
* `CLOUD_SQL_MYSQL_USER`: The database username.
|
||||
* `CLOUD_SQL_MYSQL_PASSWORD`: The password for the database user.
|
||||
* `CLOUD_SQL_MYSQL_IP_TYPE`: The IP type i.e. "Public
|
||||
or "Private" (Default: Public).
|
||||
* **Permissions:**
|
||||
* **Cloud SQL Client** (`roles/cloudsql.client`) to connect to the instance.
|
||||
* Database-level permissions (e.g., `SELECT`, `INSERT`) are required to execute queries.
|
||||
@@ -90,8 +92,9 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `CLOUD_SQL_POSTGRES_REGION`: The region of your Cloud SQL instance.
|
||||
* `CLOUD_SQL_POSTGRES_INSTANCE`: The ID of your Cloud SQL instance.
|
||||
* `CLOUD_SQL_POSTGRES_DATABASE`: The name of the database to connect to.
|
||||
* `CLOUD_SQL_POSTGRES_USER`: The database username.
|
||||
* `CLOUD_SQL_POSTGRES_PASSWORD`: The password for the database user.
|
||||
* `CLOUD_SQL_POSTGRES_USER`: (Optional) The database username. Defaults to IAM authentication if unspecified.
|
||||
* `CLOUD_SQL_POSTGRES_PASSWORD`: (Optional) The password for the database user. Defaults to IAM authentication if unspecified.
|
||||
* `CLOUD_SQL_POSTGRES_IP_TYPE`: (Optional) The IP type i.e. "Public" or "Private" (Default: Public).
|
||||
* **Permissions:**
|
||||
* **Cloud SQL Client** (`roles/cloudsql.client`) to connect to the instance.
|
||||
* Database-level permissions (e.g., `SELECT`, `INSERT`) are required to execute queries.
|
||||
@@ -110,6 +113,7 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `CLOUD_SQL_MSSQL_IP_ADDRESS`: The IP address of the Cloud SQL instance.
|
||||
* `CLOUD_SQL_MSSQL_USER`: The database username.
|
||||
* `CLOUD_SQL_MSSQL_PASSWORD`: The password for the database user.
|
||||
* `CLOUD_SQL_MSSQL_IP_TYPE`: (Optional) The IP type i.e. "Public" or "Private" (Default: Public).
|
||||
* **Permissions:**
|
||||
* **Cloud SQL Client** (`roles/cloudsql.client`) to connect to the instance.
|
||||
* Database-level permissions (e.g., `SELECT`, `INSERT`) are required to execute queries.
|
||||
@@ -135,7 +139,7 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `--prebuilt` value: `firestore`
|
||||
* **Environment Variables:**
|
||||
* `FIRESTORE_PROJECT`: The GCP project ID.
|
||||
* `FIRESTORE_DATABASE`: The Firestore database ID.
|
||||
* `FIRESTORE_DATABASE`: (Optional) The Firestore database ID. Defaults to "(default)".
|
||||
* **Permissions:**
|
||||
* **Cloud Datastore User** (`roles/datastore.user`) to get documents, list collections, and query collections.
|
||||
* **Firebase Rules Viewer** (`roles/firebaserules.viewer`) to get and validate Firestore rules.
|
||||
@@ -228,6 +232,7 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `POSTGRES_DATABASE`: The name of the database to connect to.
|
||||
* `POSTGRES_USER`: The database username.
|
||||
* `POSTGRES_PASSWORD`: The password for the database user.
|
||||
* `POSTGRES_QUERY_PARAMS`: (Optional) Raw query to be added to the db connection string.
|
||||
* **Permissions:**
|
||||
* Database-level permissions (e.g., `SELECT`, `INSERT`) are required to execute queries.
|
||||
* **Tools:**
|
||||
|
||||
36
docs/en/resources/sources/alloydb-admin.md
Normal file
36
docs/en/resources/sources/alloydb-admin.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "AlloyDB Admin"
|
||||
linkTitle: "AlloyDB Admin"
|
||||
type: docs
|
||||
weight: 2
|
||||
description: >
|
||||
The "alloydb-admin" source provides a client for the AlloyDB API.
|
||||
aliases:
|
||||
- /resources/sources/alloydb-admin
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-admin` source provides a client to interact with the [Google AlloyDB API](https://cloud.google.com/alloydb/docs/reference/rest). This allows tools to perform administrative tasks on AlloyDB resources, such as managing clusters, instances, and users.
|
||||
|
||||
Authentication can be handled in two ways:
|
||||
1. **Application Default Credentials (ADC):** By default, the source uses ADC to authenticate with the API.
|
||||
2. **Client-side OAuth:** If `useClientOAuth` is set to `true`, the source will expect an OAuth 2.0 access token to be provided by the client (e.g., a web browser) for each request.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-alloydb-admin:
|
||||
kind: alloy-admin
|
||||
|
||||
my-oauth-alloydb-admin:
|
||||
kind: alloydb-admin
|
||||
useClientOAuth: true
|
||||
```
|
||||
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "alloydb-admin". |
|
||||
| useClientOAuth | boolean | false | If true, the source will use client-side OAuth for authorization. Otherwise, it will use Application Default Credentials. Defaults to `false`. |
|
||||
@@ -33,6 +33,9 @@ cluster][alloydb-free-trial].
|
||||
- [`postgres-execute-sql`](../tools/postgres/postgres-execute-sql.md)
|
||||
Run parameterized SQL statements in AlloyDB Postgres.
|
||||
|
||||
- [`postgres-list-tables`](../tools/postgres/postgres-list-tables.md)
|
||||
List tables in an AlloyDB for PostgreSQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [AlloyDB using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/alloydb_pg_mcp/)
|
||||
|
||||
@@ -82,26 +82,7 @@ intend to run. Common roles include `roles/bigquery.user` (which includes
|
||||
permissions to run jobs and read data) or `roles/bigbigquery.dataViewer`.
|
||||
Follow this [guide][set-adc] to set up your ADC.
|
||||
|
||||
### Authentication via User's OAuth Access Token
|
||||
|
||||
If the `useClientOAuth` parameter is set to `true`, Toolbox will instead use the
|
||||
OAuth access token for authentication. This token is parsed from the
|
||||
`Authorization` header passed in with the tool invocation request. This method
|
||||
allows Toolbox to make queries to [BigQuery][bigquery-docs] on behalf of the
|
||||
client or the end-user.
|
||||
|
||||
When using this on-behalf-of authentication, you must ensure that the
|
||||
identity used has been granted the correct IAM permissions. Currently,
|
||||
this option is only supported by the following BigQuery tools:
|
||||
|
||||
- [`bigquery-sql`](../tools/bigquery/bigquery-sql.md)
|
||||
Run SQL queries directly against BigQuery datasets.
|
||||
|
||||
[iam-overview]: https://cloud.google.com/bigquery/docs/access-control
|
||||
[adc]: https://cloud.google.com/docs/authentication#adc
|
||||
[set-adc]: https://cloud.google.com/docs/authentication/provide-credentials-adc
|
||||
|
||||
## Example
|
||||
#### Example (ADC)
|
||||
|
||||
Initialize a BigQuery source that uses ADC:
|
||||
|
||||
@@ -111,8 +92,28 @@ sources:
|
||||
kind: "bigquery"
|
||||
project: "my-project-id"
|
||||
# location: "US" # Optional: Specifies the location for query jobs.
|
||||
# allowedDatasets: # Optional: Restricts tool access to a specific list of datasets.
|
||||
# - "my_dataset_1"
|
||||
# - "other_project.my_dataset_2"
|
||||
```
|
||||
|
||||
### Authentication via User's OAuth Access Token
|
||||
|
||||
If the `useClientOAuth` parameter is set to `true`, Toolbox will instead use the
|
||||
OAuth access token for authentication. This token is parsed from the
|
||||
`Authorization` header passed in with the tool invocation request. This method
|
||||
allows Toolbox to make queries to [BigQuery][bigquery-docs] on behalf of the
|
||||
client or the end-user.
|
||||
|
||||
When using this on-behalf-of authentication, you must ensure that the
|
||||
identity used has been granted the correct IAM permissions.
|
||||
|
||||
[iam-overview]: <https://cloud.google.com/bigquery/docs/access-control>
|
||||
[adc]: <https://cloud.google.com/docs/authentication#adc>
|
||||
[set-adc]: <https://cloud.google.com/docs/authentication/provide-credentials-adc>
|
||||
|
||||
#### Example (Client OAuth)
|
||||
|
||||
Initialize a BigQuery source that uses the client's access token:
|
||||
|
||||
```yaml
|
||||
@@ -122,8 +123,13 @@ sources:
|
||||
project: "my-project-id"
|
||||
useClientOAuth: true
|
||||
# location: "US" # Optional: Specifies the location for query jobs.
|
||||
# allowedDatasets: # Optional: Restricts tool access to a specific list of datasets.
|
||||
# - "my_dataset_1"
|
||||
# - "other_project.my_dataset_2"
|
||||
```
|
||||
|
||||
To connect to Gemini CLI using the client OAuth feature, you can follow this step-by-step [guide](../../samples/bigquery/bigquery_gemini_cli_client_oauth/_index.md).
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
@@ -131,4 +137,5 @@ sources:
|
||||
| kind | string | true | Must be "bigquery". |
|
||||
| project | string | true | Id of the Google Cloud project to use for billing and as the default project for BigQuery resources. |
|
||||
| location | string | false | Specifies the location (e.g., 'us', 'asia-northeast1') in which to run the query job. This location must match the location of any tables referenced in the query. Defaults to the table's location or 'US' if the location cannot be determined. [Learn More](https://cloud.google.com/bigquery/docs/locations) |
|
||||
| allowedDatasets | []string | false | An optional list of dataset IDs that tools using this source are allowed to access. If provided, any tool operation attempting to access a dataset not in this list will be rejected. To enforce this, two types of operations are also disallowed: 1) Dataset-level operations (e.g., `CREATE SCHEMA`), and 2) operations where table access cannot be statically analyzed (e.g., `EXECUTE IMMEDIATE`, `CREATE PROCEDURE`). If a single dataset is provided, it will be treated as the default for prebuilt tools. |
|
||||
| useClientOAuth | bool | false | If true, forwards the client's OAuth access token from the "Authorization" header to downstream queries. |
|
||||
|
||||
36
docs/en/resources/sources/cloud-monitoring.md
Normal file
36
docs/en/resources/sources/cloud-monitoring.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "Cloud Monitoring"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "cloud-monitoring" source provides a client for the Cloud Monitoring API.
|
||||
aliases:
|
||||
- /resources/sources/cloud-monitoring
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `cloud-monitoring` source provides a client to interact with the [Google Cloud Monitoring API](https://cloud.google.com/monitoring/api). This allows tools to access cloud monitoring metrics explorer and run promql queries.
|
||||
|
||||
Authentication can be handled in two ways:
|
||||
1. **Application Default Credentials (ADC):** By default, the source uses ADC to authenticate with the API.
|
||||
2. **Client-side OAuth:** If `useClientOAuth` is set to `true`, the source will expect an OAuth 2.0 access token to be provided by the client (e.g., a web browser) for each request.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-monitoring:
|
||||
kind: cloud-monitoring
|
||||
|
||||
my-oauth-cloud-monitoring:
|
||||
kind: cloud-monitoring
|
||||
useClientOAuth: true
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "cloud-monitoring". |
|
||||
| useClientOAuth | boolean | false | If true, the source will use client-side OAuth for authorization. Otherwise, it will use Application Default Credentials. Defaults to `false`. |
|
||||
36
docs/en/resources/sources/cloud-sql-admin.md
Normal file
36
docs/en/resources/sources/cloud-sql-admin.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "Cloud SQL Admin"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "cloud-sql-admin" source provides a client for the Cloud SQL Admin API.
|
||||
aliases:
|
||||
- /resources/sources/cloud-sql-admin
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `cloud-sql-admin` source provides a client to interact with the [Google Cloud SQL Admin API](https://cloud.google.com/sql/docs/mysql/admin-api/v1). This allows tools to perform administrative tasks on Cloud SQL instances, such as creating users and databases.
|
||||
|
||||
Authentication can be handled in two ways:
|
||||
1. **Application Default Credentials (ADC):** By default, the source uses ADC to authenticate with the API.
|
||||
2. **Client-side OAuth:** If `useClientOAuth` is set to `true`, the source will expect an OAuth 2.0 access token to be provided by the client (e.g., a web browser) for each request.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-sql-admin:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
my-oauth-cloud-sql-admin:
|
||||
kind: cloud-sql-admin
|
||||
useClientOAuth: true
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "cloud-sql-admin". |
|
||||
| useClientOAuth | boolean | false | If true, the source will use client-side OAuth for authorization. Otherwise, it will use Application Default Credentials. Defaults to `false`. |
|
||||
@@ -28,6 +28,9 @@ to a database by following these instructions][csql-mysql-quickstart].
|
||||
- [`mysql-execute-sql`](../tools/mysql/mysql-execute-sql.md)
|
||||
Run parameterized SQL queries in Cloud SQL for MySQL.
|
||||
|
||||
- [`mysql-list-tables`](../tools/mysql/mysql-list-tables.md)
|
||||
List tables in a Cloud SQL for MySQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [Cloud SQL for MySQL using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/cloud_sql_mysql_mcp/)
|
||||
|
||||
@@ -28,6 +28,9 @@ to a database by following these instructions][csql-pg-quickstart].
|
||||
- [`postgres-execute-sql`](../tools/postgres/postgres-execute-sql.md)
|
||||
Run parameterized SQL statements in PostgreSQL.
|
||||
|
||||
- [`postgres-list-tables`](../tools/postgres/postgres-list-tables.md)
|
||||
List tables in a PostgreSQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [Cloud SQL for Postgres using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/cloud_sql_pg_mcp/)
|
||||
|
||||
@@ -22,6 +22,9 @@ reliability, performance, and ease of use.
|
||||
- [`mysql-execute-sql`](../tools/mysql/mysql-execute-sql.md)
|
||||
Run parameterized SQL queries in MySQL.
|
||||
|
||||
- [`mysql-list-tables`](../tools/mysql/mysql-list-tables.md)
|
||||
List tables in a MySQL database.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Database User
|
||||
|
||||
@@ -23,6 +23,9 @@ reputation for reliability, feature robustness, and performance.
|
||||
- [`postgres-execute-sql`](../tools/postgres/postgres-execute-sql.md)
|
||||
Run parameterized SQL statements in PostgreSQL.
|
||||
|
||||
- [`postgres-list-tables`](../tools/postgres/postgres-list-tables.md)
|
||||
List tables in a PostgreSQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [PostgreSQL using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/postgres_mcp/)
|
||||
|
||||
44
docs/en/resources/sources/yugabytedb.md
Normal file
44
docs/en/resources/sources/yugabytedb.md
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
title: "YugabyteDB"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
YugabyteDB is a high-performance, distributed SQL database.
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
[YugabyteDB][yugabytedb] is a high-performance, distributed SQL database designed for global, internet-scale applications, with full PostgreSQL compatibility.
|
||||
|
||||
[yugabytedb]: https://www.yugabyte.com/
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-yb-source:
|
||||
kind: yugabytedb
|
||||
host: 127.0.0.1
|
||||
port: 5433
|
||||
database: yugabyte
|
||||
user: ${USER_NAME}
|
||||
password: ${PASSWORD}
|
||||
loadBalance: true
|
||||
topologyKeys: cloud.region.zone1:1,cloud.region.zone2:2
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-----------------------------------|:--------:|:------------:|------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "yugabytedb". |
|
||||
| host | string | true | IP address to connect to. |
|
||||
| port | integer | true | Port to connect to. The default port is 5433. |
|
||||
| database | string | true | Name of the YugabyteDB database to connect to. The default database name is yugabyte. |
|
||||
| user | string | true | Name of the YugabyteDB user to connect as. The default user is yugabyte. |
|
||||
| password | string | true | Password of the YugabyteDB user. The default password is yugabyte. |
|
||||
| loadBalance | boolean | false | If true, enable uniform load balancing. The default loadBalance value is false. |
|
||||
| topologyKeys | string | false | Comma-separated geo-locations in the form cloud.region.zone:priority to enable topology-aware load balancing. Ignored if loadBalance is false. It is null by default. |
|
||||
| ybServersRefreshInterval | integer | false | The interval (in seconds) to refresh the servers list; ignored if loadBalance is false. The default value of ybServersRefreshInterval is 300. |
|
||||
| fallbackToTopologyKeysOnly | boolean | false | If set to true and topologyKeys are specified, only connect to nodes specified in topologyKeys. By defualt, this is set to false. |
|
||||
| failedHostReconnectDelaySecs | integer | false | Time (in seconds) to wait before trying to connect to failed nodes. The default value of is 5. |
|
||||
@@ -0,0 +1,52 @@
|
||||
---
|
||||
title: "bigquery-analyze-contribution"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "bigquery-analyze-contribution" tool performs contribution analysis in BigQuery.
|
||||
aliases:
|
||||
- /resources/tools/bigquery-analyze-contribution
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `bigquery-analyze-contribution` tool performs contribution analysis in BigQuery by creating a temporary `CONTRIBUTION_ANALYSIS` model and then querying it with `ML.GET_INSIGHTS` to find top contributors for a given metric.
|
||||
|
||||
It's compatible with the following sources:
|
||||
|
||||
- [bigquery](../../sources/bigquery.md)
|
||||
|
||||
`bigquery-analyze-contribution` takes the following parameters:
|
||||
|
||||
- **input_data** (string, required): The data that contain the test and control data to analyze. This can be a fully qualified BigQuery table ID (e.g., `my-project.my_dataset.my_table`) or a SQL query that returns the data.
|
||||
- **contribution_metric** (string, required): The name of the column that contains the metric to analyze. This can be SUM(metric_column_name), SUM(numerator_metric_column_name)/SUM(denominator_metric_column_name) or SUM(metric_sum_column_name)/COUNT(DISTINCT categorical_column_name) depending the type of metric to analyze.
|
||||
- **is_test_col** (string, required): The name of the column that identifies whether a row is in the test or control group. The column must contain boolean values.
|
||||
- **dimension_id_cols** (array of strings, optional): An array of column names that uniquely identify each dimension.
|
||||
- **top_k_insights_by_apriori_support** (integer, optional): The number of top insights to return, ranked by apriori support. Default to '30'.
|
||||
- **pruning_method** (string, optional): The method to use for pruning redundant insights. Can be `'NO_PRUNING'` or `'PRUNE_REDUNDANT_INSIGHTS'`. Defaults to `'PRUNE_REDUNDANT_INSIGHTS'`.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
contribution_analyzer:
|
||||
kind: bigquery-analyze-contribution
|
||||
source: my-bigquery-source
|
||||
description: Use this tool to run contribution analysis on a dataset in BigQuery.
|
||||
```
|
||||
|
||||
## Sample Prompt
|
||||
You can prepare a sample table following https://cloud.google.com/bigquery/docs/get-contribution-analysis-insights.
|
||||
And use the following sample prompts to call this tool:
|
||||
|
||||
- What drives the changes in sales in the table `bqml_tutorial.iowa_liquor_sales_sum_data`? Use the project id myproject.
|
||||
- Analyze the contribution for the `total_sales` metric in the table `bqml_tutorial.iowa_liquor_sales_sum_data`. The test group is identified by the `is_test` column. The dimensions are `store_name`, `city`, `vendor_name`, `category_name` and `item_description`.
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:--------:|:------------:|------------------------------------------------------------|
|
||||
| kind | string | true | Must be "bigquery-analyze-contribution". |
|
||||
| source | string | true | Name of the source the tool should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the LLM. |
|
||||
@@ -15,10 +15,19 @@ It's compatible with the following sources:
|
||||
|
||||
- [bigquery](../../sources/bigquery.md)
|
||||
|
||||
`bigquery-list-table-ids` takes a required `dataset` parameter to specify the dataset
|
||||
from which to list table IDs. It also optionally accepts a `project` parameter to
|
||||
define the Google Cloud project ID. If the `project` parameter is not provided, the
|
||||
tool defaults to using the project defined in the source configuration.
|
||||
`bigquery-list-table-ids` accepts the following parameters:
|
||||
- **`dataset`** (required): Specifies the dataset from which to list table IDs.
|
||||
- **`project`** (optional): Defines the Google Cloud project ID. If not provided,
|
||||
the tool defaults to the project from the source configuration.
|
||||
|
||||
The tool's behavior regarding these parameters is influenced by the
|
||||
`allowedDatasets` restriction on the `bigquery` source:
|
||||
- **Without `allowedDatasets` restriction:** The tool can list tables from any
|
||||
dataset specified by the `dataset` and `project` parameters.
|
||||
- **With `allowedDatasets` restriction:** Before listing tables, the tool verifies
|
||||
that the requested dataset is in the allowed list. If it is not, the request is
|
||||
denied. If only one dataset is specified in the `allowedDatasets` list, it
|
||||
will be used as the default value for the `dataset` parameter.
|
||||
|
||||
## Example
|
||||
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
---
|
||||
title: "clickhouse-list-databases"
|
||||
type: docs
|
||||
weight: 3
|
||||
description: >
|
||||
A "clickhouse-list-databases" tool lists all databases in a ClickHouse instance.
|
||||
aliases:
|
||||
- /resources/tools/clickhouse-list-databases
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `clickhouse-list-databases` tool lists all available databases in a
|
||||
ClickHouse instance. It's compatible with the [clickhouse](../../sources/clickhouse.md) source.
|
||||
|
||||
This tool executes the `SHOW DATABASES` command and returns a list of all
|
||||
databases accessible to the configured user, making it useful for database
|
||||
discovery and exploration tasks.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_clickhouse_databases:
|
||||
kind: clickhouse-list-databases
|
||||
source: my-clickhouse-instance
|
||||
description: List all available databases in the ClickHouse instance
|
||||
```
|
||||
|
||||
## Return Value
|
||||
|
||||
The tool returns an array of objects, where each object contains:
|
||||
- `name`: The name of the database
|
||||
|
||||
Example response:
|
||||
```json
|
||||
[
|
||||
{"name": "default"},
|
||||
{"name": "system"},
|
||||
{"name": "analytics"},
|
||||
{"name": "user_data"}
|
||||
]
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|--------------------|:------------------:|:------------:|-----------------------------------------------------------|
|
||||
| kind | string | true | Must be "clickhouse-list-databases". |
|
||||
| source | string | true | Name of the ClickHouse source to list databases from. |
|
||||
| description | string | true | Description of the tool that is passed to the LLM. |
|
||||
| authRequired | array of string | false | Authentication services required to use this tool. |
|
||||
| parameters | array of Parameter | false | Parameters for the tool (typically not used). |
|
||||
412
docs/en/resources/tools/firestore/firestore-query.md
Normal file
412
docs/en/resources/tools/firestore/firestore-query.md
Normal file
@@ -0,0 +1,412 @@
|
||||
---
|
||||
title: "firestore-query"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
Query a Firestore collection with parameterizable filters and Firestore native JSON value types
|
||||
aliases:
|
||||
- /resources/tools/firestore-query
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The `firestore-query` tool allows you to query Firestore collections with dynamic, parameterizable filters that support Firestore's native JSON value types. This tool is designed for querying single collection, which is the standard pattern in Firestore. The collection path itself can be parameterized, making it flexible for various use cases. This tool is particularly useful when you need to create reusable query templates with parameters that can be substituted at runtime.
|
||||
|
||||
**Developer Note**: This tool serves as the general querying foundation that developers can use to create custom tools with specific query patterns.
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Parameterizable Queries**: Use Go template syntax to create dynamic queries
|
||||
- **Dynamic Collection Paths**: The collection path can be parameterized for flexibility
|
||||
- **Native JSON Value Types**: Support for Firestore's typed values (stringValue, integerValue, doubleValue, etc.)
|
||||
- **Complex Filter Logic**: Support for AND/OR logical operators in filters
|
||||
- **Template Substitution**: Dynamic collection paths, filters, and ordering
|
||||
- **Query Analysis**: Optional query performance analysis with explain metrics (non-parameterizable)
|
||||
|
||||
## Configuration
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
query_countries:
|
||||
kind: firestore-query
|
||||
source: my-firestore-source
|
||||
description: Query countries with dynamic filters
|
||||
collectionPath: "countries"
|
||||
filters: |
|
||||
{
|
||||
"field": "continent",
|
||||
"op": "==",
|
||||
"value": {"stringValue": "{{.continent}}"}
|
||||
}
|
||||
parameters:
|
||||
- name: continent
|
||||
type: string
|
||||
description: Continent to filter by
|
||||
required: true
|
||||
```
|
||||
|
||||
### Advanced Configuration with Complex Filters
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
advanced_query:
|
||||
kind: firestore-query
|
||||
source: my-firestore-source
|
||||
description: Advanced query with complex filters
|
||||
collectionPath: "{{.collection}}"
|
||||
filters: |
|
||||
{
|
||||
"or": [
|
||||
{"field": "status", "op": "==", "value": {"stringValue": "{{.status}}"}},
|
||||
{
|
||||
"and": [
|
||||
{"field": "priority", "op": ">", "value": {"integerValue": "{{.priority}}"}},
|
||||
{"field": "area", "op": "<", "value": {"doubleValue": {{.maxArea}}}},
|
||||
{"field": "active", "op": "==", "value": {"booleanValue": {{.isActive}}}}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
select:
|
||||
- name
|
||||
- status
|
||||
- priority
|
||||
orderBy:
|
||||
field: "{{.sortField}}"
|
||||
direction: "{{.sortDirection}}"
|
||||
limit: 100
|
||||
analyzeQuery: true
|
||||
parameters:
|
||||
- name: collection
|
||||
type: string
|
||||
description: Collection to query
|
||||
required: true
|
||||
- name: status
|
||||
type: string
|
||||
description: Status to filter by
|
||||
required: true
|
||||
- name: priority
|
||||
type: string
|
||||
description: Minimum priority value
|
||||
required: true
|
||||
- name: maxArea
|
||||
type: float
|
||||
description: Maximum area value
|
||||
required: true
|
||||
- name: isActive
|
||||
type: boolean
|
||||
description: Filter by active status
|
||||
required: true
|
||||
- name: sortField
|
||||
type: string
|
||||
description: Field to sort by
|
||||
required: false
|
||||
default: "createdAt"
|
||||
- name: sortDirection
|
||||
type: string
|
||||
description: Sort direction (ASCENDING or DESCENDING)
|
||||
required: false
|
||||
default: "DESCENDING"
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `kind` | string | Yes | Must be `firestore-query` |
|
||||
| `source` | string | Yes | Name of the Firestore source to use |
|
||||
| `description` | string | Yes | Description of what this tool does |
|
||||
| `collectionPath` | string | Yes | Path to the collection to query (supports templates) |
|
||||
| `filters` | string | No | JSON string defining query filters (supports templates) |
|
||||
| `select` | array | No | Fields to select from documents(supports templates - string or array) |
|
||||
| `orderBy` | object | No | Ordering configuration with `field` and `direction`(supports templates for the value of field or direction) |
|
||||
| `limit` | integer | No | Maximum number of documents to return (default: 100) (supports templates) |
|
||||
| `analyzeQuery` | boolean | No | Whether to analyze query performance (default: false) |
|
||||
| `parameters` | array | Yes | Parameter definitions for template substitution |
|
||||
|
||||
### Runtime Parameters
|
||||
|
||||
Runtime parameters are defined in the `parameters` array and can be used in templates throughout the configuration.
|
||||
|
||||
## Filter Format
|
||||
|
||||
### Simple Filter
|
||||
```json
|
||||
{
|
||||
"field": "age",
|
||||
"op": ">",
|
||||
"value": {"integerValue": "25"}
|
||||
}
|
||||
```
|
||||
|
||||
### AND Filter
|
||||
```json
|
||||
{
|
||||
"and": [
|
||||
{"field": "status", "op": "==", "value": {"stringValue": "active"}},
|
||||
{"field": "age", "op": ">=", "value": {"integerValue": "18"}}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### OR Filter
|
||||
```json
|
||||
{
|
||||
"or": [
|
||||
{"field": "role", "op": "==", "value": {"stringValue": "admin"}},
|
||||
{"field": "role", "op": "==", "value": {"stringValue": "moderator"}}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Nested Filters
|
||||
```json
|
||||
{
|
||||
"or": [
|
||||
{"field": "type", "op": "==", "value": {"stringValue": "premium"}},
|
||||
{
|
||||
"and": [
|
||||
{"field": "type", "op": "==", "value": {"stringValue": "standard"}},
|
||||
{"field": "credits", "op": ">", "value": {"integerValue": "1000"}}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Firestore Native Value Types
|
||||
|
||||
The tool supports all Firestore native JSON value types:
|
||||
|
||||
| Type | Format | Example |
|
||||
|------|--------|---------|
|
||||
| String | `{"stringValue": "text"}` | `{"stringValue": "{{.name}}"}` |
|
||||
| Integer | `{"integerValue": "123"}` or `{"integerValue": 123}` | `{"integerValue": "{{.age}}"}` or `{"integerValue": {{.age}}}` |
|
||||
| Double | `{"doubleValue": 45.67}` | `{"doubleValue": {{.price}}}` |
|
||||
| Boolean | `{"booleanValue": true}` | `{"booleanValue": {{.active}}}` |
|
||||
| Null | `{"nullValue": null}` | `{"nullValue": null}` |
|
||||
| Timestamp | `{"timestampValue": "RFC3339"}` | `{"timestampValue": "{{.date}}"}` |
|
||||
| GeoPoint | `{"geoPointValue": {"latitude": 0, "longitude": 0}}` | See below |
|
||||
| Array | `{"arrayValue": {"values": [...]}}` | See below |
|
||||
| Map | `{"mapValue": {"fields": {...}}}` | See below |
|
||||
|
||||
### Complex Type Examples
|
||||
|
||||
**GeoPoint:**
|
||||
```json
|
||||
{
|
||||
"field": "location",
|
||||
"op": "==",
|
||||
"value": {
|
||||
"geoPointValue": {
|
||||
"latitude": 37.7749,
|
||||
"longitude": -122.4194
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Array:**
|
||||
```json
|
||||
{
|
||||
"field": "tags",
|
||||
"op": "array-contains",
|
||||
"value": {"stringValue": "{{.tag}}"}
|
||||
}
|
||||
```
|
||||
|
||||
## Supported Operators
|
||||
|
||||
- `<` - Less than
|
||||
- `<=` - Less than or equal
|
||||
- `>` - Greater than
|
||||
- `>=` - Greater than or equal
|
||||
- `==` - Equal
|
||||
- `!=` - Not equal
|
||||
- `array-contains` - Array contains value
|
||||
- `array-contains-any` - Array contains any of the values
|
||||
- `in` - Value is in array
|
||||
- `not-in` - Value is not in array
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Query with Dynamic Collection Path
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
user_documents:
|
||||
kind: firestore-query
|
||||
source: my-firestore
|
||||
description: Query user-specific documents
|
||||
collectionPath: "users/{{.userId}}/documents"
|
||||
filters: |
|
||||
{
|
||||
"field": "type",
|
||||
"op": "==",
|
||||
"value": {"stringValue": "{{.docType}}"}
|
||||
}
|
||||
parameters:
|
||||
- name: userId
|
||||
type: string
|
||||
description: User ID
|
||||
required: true
|
||||
- name: docType
|
||||
type: string
|
||||
description: Document type to filter
|
||||
required: true
|
||||
```
|
||||
|
||||
### Example 2: Complex Geographic Query
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
location_search:
|
||||
kind: firestore-query
|
||||
source: my-firestore
|
||||
description: Search locations by area and population
|
||||
collectionPath: "cities"
|
||||
filters: |
|
||||
{
|
||||
"and": [
|
||||
{"field": "country", "op": "==", "value": {"stringValue": "{{.country}}"}},
|
||||
{"field": "population", "op": ">", "value": {"integerValue": "{{.minPopulation}}"}},
|
||||
{"field": "area", "op": "<", "value": {"doubleValue": {{.maxArea}}}}
|
||||
]
|
||||
}
|
||||
orderBy:
|
||||
field: "population"
|
||||
direction: "DESCENDING"
|
||||
limit: 50
|
||||
parameters:
|
||||
- name: country
|
||||
type: string
|
||||
description: Country code
|
||||
required: true
|
||||
- name: minPopulation
|
||||
type: string
|
||||
description: Minimum population (as string for large numbers)
|
||||
required: true
|
||||
- name: maxArea
|
||||
type: float
|
||||
description: Maximum area in square kilometers
|
||||
required: true
|
||||
```
|
||||
|
||||
### Example 3: Time-based Query with Analysis
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
activity_log:
|
||||
kind: firestore-query
|
||||
source: my-firestore
|
||||
description: Query activity logs within time range
|
||||
collectionPath: "logs"
|
||||
filters: |
|
||||
{
|
||||
"and": [
|
||||
{"field": "timestamp", "op": ">=", "value": {"timestampValue": "{{.startTime}}"}},
|
||||
{"field": "timestamp", "op": "<=", "value": {"timestampValue": "{{.endTime}}"}},
|
||||
{"field": "severity", "op": "in", "value": {"arrayValue": {"values": [
|
||||
{"stringValue": "ERROR"},
|
||||
{"stringValue": "CRITICAL"}
|
||||
]}}}
|
||||
]
|
||||
}
|
||||
select:
|
||||
- timestamp
|
||||
- message
|
||||
- severity
|
||||
- userId
|
||||
orderBy:
|
||||
field: "timestamp"
|
||||
direction: "DESCENDING"
|
||||
analyzeQuery: true
|
||||
parameters:
|
||||
- name: startTime
|
||||
type: string
|
||||
description: Start time in RFC3339 format
|
||||
required: true
|
||||
- name: endTime
|
||||
type: string
|
||||
description: End time in RFC3339 format
|
||||
required: true
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Invoking the Tool
|
||||
|
||||
```bash
|
||||
# Using curl
|
||||
curl -X POST http://localhost:5000/api/tool/your-tool-name/invoke \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"continent": "Europe",
|
||||
"minPopulation": "1000000",
|
||||
"maxArea": 500000.5,
|
||||
"isActive": true
|
||||
}'
|
||||
```
|
||||
|
||||
### Response Format
|
||||
|
||||
**Without analyzeQuery:**
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "doc1",
|
||||
"path": "countries/doc1",
|
||||
"data": {
|
||||
"name": "France",
|
||||
"continent": "Europe",
|
||||
"population": 67000000,
|
||||
"area": 551695
|
||||
},
|
||||
"createTime": "2024-01-01T00:00:00Z",
|
||||
"updateTime": "2024-01-15T10:30:00Z"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**With analyzeQuery:**
|
||||
```json
|
||||
{
|
||||
"documents": [...],
|
||||
"explainMetrics": {
|
||||
"planSummary": {
|
||||
"indexesUsed": [...]
|
||||
},
|
||||
"executionStats": {
|
||||
"resultsReturned": 10,
|
||||
"executionDuration": "15ms",
|
||||
"readOperations": 10
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Typed Values**: Always use Firestore's native JSON value types for proper type handling
|
||||
2. **String Numbers for Large Integers**: Use string representation for large integers to avoid precision loss
|
||||
3. **Template Security**: Validate all template parameters to prevent injection attacks
|
||||
4. **Index Optimization**: Use `analyzeQuery` to identify missing indexes
|
||||
5. **Limit Results**: Always set a reasonable `limit` to prevent excessive data retrieval
|
||||
6. **Field Selection**: Use `select` to retrieve only necessary fields
|
||||
|
||||
## Technical Notes
|
||||
|
||||
- Queries operate on a single collection (the standard Firestore pattern)
|
||||
- Maximum of 100 filters per query (configurable)
|
||||
- Template parameters must be properly escaped in JSON contexts
|
||||
- Complex nested queries may require composite indexes
|
||||
|
||||
## See Also
|
||||
|
||||
- [firestore-query-collection](firestore-query-collection.md) - Non-parameterizable query tool
|
||||
- [Firestore Source Configuration](../../sources/firestore.md)
|
||||
- [Firestore Query Documentation](https://firebase.google.com/docs/firestore/query-data/queries)
|
||||
43
docs/en/resources/tools/mysql/mysql-list-tables.md
Normal file
43
docs/en/resources/tools/mysql/mysql-list-tables.md
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
title: "mysql-list-tables"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "mysql-list-tables" tool lists schema information for all or specified tables in a MySQL database.
|
||||
aliases:
|
||||
- /resources/tools/mysql-list-tables
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `mysql-list-tables` tool retrieves schema information for all or specified tables in a MySQL database. It is compatible with any of the following sources:
|
||||
|
||||
- [cloud-sql-mysql](../../sources/cloud-sql-mysql.md)
|
||||
- [mysql](../../sources/mysql.md)
|
||||
|
||||
`mysql-list-tables` lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, it lists all tables in user schemas. The output format can be set to `simple` which will return only the table names or `detailed` which is the default.
|
||||
|
||||
The tool takes the following input parameters:
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `table_names` | string | Filters by a comma-separated list of names. By default, it lists all tables in user schemas. Default: `""` | No |
|
||||
| `output_format` | string | Indicate the output format of table schema. `simple` will return only the table names, `detailed` will return the full table information. Default: `detailed`. | No |
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
mysql_list_tables:
|
||||
kind: mysql-list-tables
|
||||
source: mysql-source
|
||||
description: Use this tool to retrieve schema information for all or specified tables. Output format can be simple (only table names) or detailed.
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "mysql-list-tables". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
39
docs/en/resources/tools/postgres/postgres-list-tables.md
Normal file
39
docs/en/resources/tools/postgres/postgres-list-tables.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "postgres-list-tables"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "postgres-list-tables" tool lists schema information for all or specified tables in a Postgres database.
|
||||
aliases:
|
||||
- /resources/tools/postgres-list-tables
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `postgres-list-tables` tool retrieves schema information for all or specified tables in a Postgres database. It's compatible with any of the following sources:
|
||||
|
||||
- [alloydb-postgres](../../sources/alloydb-pg.md)
|
||||
- [cloud-sql-postgres](../../sources/cloud-sql-pg.md)
|
||||
- [postgres](../../sources/postgres.md)
|
||||
|
||||
`postgres-list-tables` lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). The tool takes the following input parameters:
|
||||
* `table_names` (optional): Filters by a comma-separated list of names. By default, it lists all tables in user schemas.
|
||||
* `output_format` (optional): Indicate the output format of table schema. `simple` will return only the table names, `detailed` will return the full table information. Default: `detailed`.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
postgres_list_tables:
|
||||
kind: postgres-list-tables
|
||||
source: postgres-source
|
||||
description: Use this tool to retrieve schema information for all or specified tables. Output format can be simple (only table names) or detailed.
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "postgres-list-tables". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
101
docs/en/resources/tools/yugabytedb-sql.md
Normal file
101
docs/en/resources/tools/yugabytedb-sql.md
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
title: "yugabytedb-sql"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "yugabytedb-sql" tool executes a pre-defined SQL statement against a YugabyteDB
|
||||
database.
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `yugabytedb-sql` tool executes a pre-defined SQL statement against a YugabyteDB
|
||||
database.
|
||||
|
||||
The specified SQL statement is executed as a prepared statement,
|
||||
and specified parameters will inserted according to their position: e.g. `1`
|
||||
will be the first parameter specified, `$@` will be the second parameter, and so
|
||||
on. If template parameters are included, they will be resolved before execution
|
||||
of the prepared statement.
|
||||
|
||||
## Example
|
||||
|
||||
> **Note:** This tool uses parameterized queries to prevent SQL injections.
|
||||
> Query parameters can be used as substitutes for arbitrary expressions.
|
||||
> Parameters cannot be used as substitutes for identifiers, column names, table
|
||||
> names, or other parts of the query.
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
search_flights_by_number:
|
||||
kind: yugabytedb-sql
|
||||
source: my-yb-instance
|
||||
statement: |
|
||||
SELECT * FROM flights
|
||||
WHERE airline = $1
|
||||
AND flight_number = $2
|
||||
LIMIT 10
|
||||
description: |
|
||||
Use this tool to get information for a specific flight.
|
||||
Takes an airline code and flight number and returns info on the flight.
|
||||
Do NOT use this tool with a flight id. Do NOT guess an airline code or flight number.
|
||||
A airline code is a code for an airline service consisting of two-character
|
||||
airline designator and followed by flight number, which is 1 to 4 digit number.
|
||||
For example, if given CY 0123, the airline is "CY", and flight_number is "123".
|
||||
Another example for this is DL 1234, the airline is "DL", and flight_number is "1234".
|
||||
If the tool returns more than one option choose the date closes to today.
|
||||
Example:
|
||||
{{
|
||||
"airline": "CY",
|
||||
"flight_number": "888",
|
||||
}}
|
||||
Example:
|
||||
{{
|
||||
"airline": "DL",
|
||||
"flight_number": "1234",
|
||||
}}
|
||||
parameters:
|
||||
- name: airline
|
||||
type: string
|
||||
description: Airline unique 2 letter identifier
|
||||
- name: flight_number
|
||||
type: string
|
||||
description: 1 to 4 digit number
|
||||
```
|
||||
|
||||
### Example with Template Parameters
|
||||
|
||||
> **Note:** This tool allows direct modifications to the SQL statement, including identifiers, column names,
|
||||
> and table names. **This makes it more vulnerable to SQL injections**. Using basic parameters
|
||||
> only (see above) is recommended for performance and safety reasons. For more details, please
|
||||
> check [templateParameters](_index#template-parameters).
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_table:
|
||||
kind: yugabytedb-sql
|
||||
source: my-yb-instance
|
||||
statement: |
|
||||
SELECT * FROM {{.tableName}}
|
||||
description: |
|
||||
Use this tool to list all information from a specific table.
|
||||
Example:
|
||||
{{
|
||||
"tableName": "flights",
|
||||
}}
|
||||
templateParameters:
|
||||
- name: tableName
|
||||
type: string
|
||||
description: Table to select from
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|---------------------|:---------------------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "yugabytedb-sql". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the LLM. |
|
||||
| statement | string | true | SQL statement to execute on. |
|
||||
| parameters | [parameters](_index#specifying-parameters) | false | List of [parameters](_index#specifying-parameters) that will be inserted into the SQL statement. |
|
||||
| templateParameters | [templateParameters](_index#template-parameters) | false | List of [templateParameters](_index#template-parameters) that will be inserted into the SQL statement before executing prepared statement. |
|
||||
@@ -0,0 +1,130 @@
|
||||
---
|
||||
title: "Access BigQuery from Gemini-CLI with End-User Credentials"
|
||||
type: docs
|
||||
weight: 2
|
||||
description: >
|
||||
How to connect to BigQuery from Gemini-CLI with end-user credentials
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Gemini-CLI can be configured to get an OAuth access token from the Google OAuth endpoint, then send this token to MCP Toolbox as part of the request. MCP Toolbox can then use this token to authentincate with BigQuery. This enables each user to access Toolbox with thier own IAM identity for a Toolbox multi-tenancy use case.
|
||||
|
||||
{{< notice note >}}
|
||||
This feature requires Toolbox v0.14.0 or later.
|
||||
{{< /notice >}}
|
||||
|
||||
## Step 1: Register the OAuth on GCP
|
||||
|
||||
You first need to register the OAuth application following this [guide](register-oauth) to get a client ID and client secret.
|
||||
|
||||
## Step 2: Install and configure Toolbox
|
||||
|
||||
In this section, we will download Toolbox and run the Toolbox server.
|
||||
|
||||
1. Download the latest version of Toolbox as a binary:
|
||||
|
||||
{{< notice tip >}}
|
||||
Select the
|
||||
[correct binary](https://github.com/googleapis/genai-toolbox/releases)
|
||||
corresponding to your OS and CPU architecture.
|
||||
{{< /notice >}}
|
||||
<!-- {x-release-please-start-version} -->
|
||||
```bash
|
||||
export OS="linux/amd64" # one of linux/amd64, darwin/arm64, darwin/amd64, or windows/amd64
|
||||
curl -O https://storage.googleapis.com/genai-toolbox/v0.14.0/$OS/toolbox
|
||||
```
|
||||
<!-- {x-release-please-end} -->
|
||||
|
||||
1. Create a `tools.yaml` file and include the following BigQuery source configuration:
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-bigquery-client-auth-source:
|
||||
kind: "bigquery"
|
||||
project: "my-project-id"
|
||||
useClientOAuth: true
|
||||
# location: "US" # Optional: Specifies the location for query jobs.
|
||||
# allowedDatasets: # Optional: Restricts tool access to a specific list of datasets.
|
||||
# - "my_dataset_1"
|
||||
# - "other_project.my_dataset_2"
|
||||
```
|
||||
|
||||
1. Continue to configure one or more BigQuery tools. Here is a naive example to get started:
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
naive-bq-tool:
|
||||
kind: "bigquery-sql"
|
||||
source: "my-bigquery-client-auth-source"
|
||||
description: Naive BQ Tool that returns 1.
|
||||
statement: |
|
||||
SELECT 1;
|
||||
```
|
||||
|
||||
1. Run the Toolbox server:
|
||||
|
||||
```bash
|
||||
./toolbox --tools-file "tools.yaml"
|
||||
|
||||
```
|
||||
|
||||
The toolbox server will begin listening on localhost port 5000. Leave it
|
||||
running and continue in another terminal.
|
||||
|
||||
Later, when it is time to shut everything down, you can quit the toolbox
|
||||
server with Ctrl-C in this terminal window.
|
||||
|
||||
## Step 3: Configure Gemini-CLI
|
||||
|
||||
1. Edit the file `~/.gemini/settings.json` to include the following configuration:
|
||||
|
||||
```json
|
||||
"mcpServers": {
|
||||
"toolbox": {
|
||||
"httpUrl": "<http://localhost:5000/mcp>", // Replace this with your Toolbox URL if deployed somewhere else.
|
||||
"oauth": {
|
||||
"enabled": true,
|
||||
"clientId": <YOUR_CLIENT_ID>,
|
||||
"clientSecret": <YOUR_CLIENT_SECRET>,
|
||||
"authorizationUrl": "<https://accounts.google.com/o/oauth2/v2/auth>",
|
||||
"tokenUrl": "<https://oauth2.googleapis.com/token>",
|
||||
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Make sure to substitue your client ID and client secret received from step 1.
|
||||
|
||||
1. Start Gemini-CLI:
|
||||
|
||||
```shell
|
||||
gemini-cli
|
||||
```
|
||||
|
||||
1. Authenticate with the command `/mcp auth toolbox`. Gemini-CLI will open up a
|
||||
browser where you will log in to your Google account.
|
||||
|
||||

|
||||
|
||||
1. Use Gemini-CLI with your tools. To test the naive Tool we configured previously, ask Gemini to run this Tool:
|
||||
|
||||
```text
|
||||
Call naive-bq-tool
|
||||
```
|
||||
|
||||
## Using Toolbox as a Shared Service
|
||||
|
||||
Toolbox can be run on another server as a shared service accessed by multiple
|
||||
users. We strongly recommend running toolbox behind a web proxy such as `nginx`
|
||||
which will provide SSL encryption. Google Cloud Run is another good way to run
|
||||
toolbox. You will connect to a service like `https://toolbox.example.com/mcp`.
|
||||
The proxy server will handle the SSL encryption and certificates. Then it will
|
||||
foward the requests to `http://localhost:5000/mcp` running in that environment.
|
||||
The details of the config are beyond the scope of this document, but will be
|
||||
familiar to your system administrators.
|
||||
|
||||
To use the shared service, just change the `localhost:5000` in the `httpUrl` in
|
||||
`~/.gemini/settings.json` to the host name and possibly the port of the shared
|
||||
service.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 26 KiB |
10
go.mod
10
go.mod
@@ -39,6 +39,7 @@ require (
|
||||
github.com/thlib/go-timezone-local v0.0.7
|
||||
github.com/trinodb/trino-go-client v0.328.0
|
||||
github.com/valkey-io/valkey-go v1.0.64
|
||||
github.com/yugabyte/pgx/v5 v5.5.3-yb-5
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
go.opentelemetry.io/contrib/propagators/autoprop v0.62.0
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
@@ -49,7 +50,7 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0
|
||||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
golang.org/x/oauth2 v0.31.0
|
||||
google.golang.org/api v0.248.0
|
||||
google.golang.org/api v0.249.0
|
||||
google.golang.org/genproto v0.0.0-20250826171959-ef028d996bc1
|
||||
modernc.org/sqlite v1.38.2
|
||||
)
|
||||
@@ -64,7 +65,6 @@ require (
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
|
||||
gonum.org/v1/gonum v0.16.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -81,7 +81,7 @@ require (
|
||||
cloud.google.com/go/trace v1.11.6 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
@@ -99,7 +99,7 @@ require (
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
@@ -174,7 +174,7 @@ require (
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect
|
||||
google.golang.org/grpc v1.74.2 // indirect
|
||||
google.golang.org/grpc v1.75.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
modernc.org/libc v1.66.3 // indirect
|
||||
|
||||
18
go.sum
18
go.sum
@@ -661,8 +661,8 @@ github.com/ClickHouse/clickhouse-go/v2 v2.40.1 h1:PbwsHBgqXRydU7jKULD1C8CHmifczf
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.1/go.mod h1:GDzSBLVhladVm8V01aEB36IoBOVLLICfyeuiIp/8Ezc=
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3 h1:2afWGsMzkIcN8Qm4mgPJKZWyroE5QBszMiDMYEBrnfw=
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.29.0 h1:YVtMlmfRUTaWs3+1acwMBp7rBUo6zrxl6Kn13/R9YW4=
|
||||
@@ -866,8 +866,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-goquery/goquery v1.0.1 h1:kpchVA1LdOFWdRpkDPESVdlb1JQI6ixsJ5MiNUITO7U=
|
||||
github.com/go-goquery/goquery v1.0.1/go.mod h1:W5s8OWbqWf6lG0LkXWBeh7U1Y/X5XTI0Br65MHF8uJk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
|
||||
github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
|
||||
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
|
||||
@@ -1270,6 +1270,8 @@ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3i
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||
github.com/yugabyte/pgx/v5 v5.5.3-yb-5 h1:MV66FoH4HFsA9IC+h1hRY/+9Rmo040zVyZovOX7zpuk=
|
||||
github.com/yugabyte/pgx/v5 v5.5.3-yb-5/go.mod h1:2SxizGfDY7UDCRTtbI/xd98C/oGN7S/3YoGF8l9gx/c=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -1821,8 +1823,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
|
||||
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
|
||||
google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
|
||||
google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
|
||||
google.golang.org/api v0.248.0 h1:hUotakSkcwGdYUqzCRc5yGYsg4wXxpkKlW5ryVqvC1Y=
|
||||
google.golang.org/api v0.248.0/go.mod h1:yAFUAF56Li7IuIQbTFoLwXTCI6XCFKueOlS7S9e4F9k=
|
||||
google.golang.org/api v0.249.0 h1:0VrsWAKzIZi058aeq+I86uIXbNhm9GxSHpbmZ92a38w=
|
||||
google.golang.org/api v0.249.0/go.mod h1:dGk9qyI0UYPwO/cjt2q06LG/EhUpwZGdAbYF14wHHrQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -2010,8 +2012,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
|
||||
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
|
||||
google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
|
||||
google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
var expectedToolSources = []string{
|
||||
"alloydb-postgres-admin",
|
||||
"alloydb-postgres-observability",
|
||||
"alloydb-postgres",
|
||||
"bigquery",
|
||||
"clickhouse",
|
||||
@@ -85,6 +86,7 @@ func TestLoadPrebuiltToolYAMLs(t *testing.T) {
|
||||
|
||||
func TestGetPrebuiltTool(t *testing.T) {
|
||||
alloydb_admin_config, _ := Get("alloydb-postgres-admin")
|
||||
alloydb_observability_config, _ := Get("alloydb-postgres-observability")
|
||||
alloydb_config, _ := Get("alloydb-postgres")
|
||||
bigquery_config, _ := Get("bigquery")
|
||||
clickhouse_config, _ := Get("clickhouse")
|
||||
@@ -106,6 +108,9 @@ func TestGetPrebuiltTool(t *testing.T) {
|
||||
if len(alloydb_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch alloydb prebuilt tools yaml")
|
||||
}
|
||||
if len(alloydb_observability_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch alloydb-observability prebuilt tools yaml")
|
||||
}
|
||||
if len(bigquery_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch bigquery prebuilt tools yaml")
|
||||
}
|
||||
|
||||
@@ -0,0 +1,121 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
cloud-monitoring-source:
|
||||
kind: cloud-monitoring
|
||||
tools:
|
||||
get_system_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches system level cloudmonitoring data (timeseries metrics) for an AlloyDB cluster, instance.
|
||||
To use this tool, you must provide the Google Cloud `projectID` and a PromQL `query`.
|
||||
|
||||
Generate the PromQL `query` for AlloyDB system metrics using the provided metrics and rules. Get labels like `cluster_id` and `instance_id` from the user's intent.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="my-instance","cluster_id"="my-cluster"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
8. Percentile with groupby on instanceid, clusterid: `quantile by ("instance_id","cluster_id")(0.99,avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","cluster_id"="my-cluster","instance_id"="my-instance"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels
|
||||
1. `alloydb.googleapis.com/instance/cpu/average_utilization`: The percentage of CPU being used on an instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
2. `alloydb.googleapis.com/instance/cpu/maximum_utilization`: Maximum CPU utilization across all currently serving nodes of the instance from 0 to 100. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
3. `alloydb.googleapis.com/cluster/storage/usage`: The total AlloyDB storage in bytes across the entire cluster. `alloydb.googleapis.com/Cluster`. `cluster_id`.
|
||||
4. `alloydb.googleapis.com/instance/postgres/replication/replicas`: The number of read replicas connected to the primary instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `state`, `replica_instance_id`.
|
||||
5. `alloydb.googleapis.com/instance/postgres/replication/maximum_lag`: The maximum replication time lag calculated across all serving read replicas of the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `replica_instance_id`.
|
||||
6. `alloydb.googleapis.com/instance/memory/min_available_memory`: The minimum available memory across all currently serving nodes of the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
7. `alloydb.googleapis.com/instance/postgres/instances`: The number of nodes in the instance, along with their status, which can be either up or down. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `status`.
|
||||
8. `alloydb.googleapis.com/database/postgresql/tuples`: Number of tuples (rows) by state per database in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`, `state`.
|
||||
9. `alloydb.googleapis.com/database/postgresql/temp_bytes_written_for_top_databases`: The total amount of data(in bytes) written to temporary files by the queries per database for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
10. `alloydb.googleapis.com/database/postgresql/temp_files_written_for_top_databases`: The number of temporary files used for writing data per database while performing internal algorithms like join, sort etc for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
11. `alloydb.googleapis.com/database/postgresql/inserted_tuples_count_for_top_databases`: The total number of rows inserted per db for top 500 dbs as a result of the queries in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
12. `alloydb.googleapis.com/database/postgresql/updated_tuples_count_for_top_databases`: The total number of rows updated per db for top 500 dbs as a result of the queries in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
13. `alloydb.googleapis.com/database/postgresql/deleted_tuples_count_for_top_databases`: The total number of rows deleted per db for top 500 dbs as a result of the queries in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
14. `alloydb.googleapis.com/database/postgresql/backends_for_top_databases`: The current number of connections per database to the instance for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
15. `alloydb.googleapis.com/instance/postgresql/backends_by_state`: The current number of connections to the instance grouped by the state like idle, active, idle_in_transaction, idle_in_transaction_aborted, disabled, and fastpath_function_call. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `state`.
|
||||
16. `alloydb.googleapis.com/instance/postgresql/backends_for_top_applications`: The current number of connections to the AlloyDB instance, grouped by applications for top 500 applications. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `application_name`.
|
||||
17. `alloydb.googleapis.com/database/postgresql/new_connections_for_top_databases`: Total number of new connections added per database for top 500 databases to the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
18. `alloydb.googleapis.com/database/postgresql/deadlock_count_for_top_databases`: Total number of deadlocks detected in the instance per database for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
19. `alloydb.googleapis.com/database/postgresql/statements_executed_count`: Total count of statements executed in the instance per database per operation_type. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`, `operation_type`.
|
||||
20. `alloydb.googleapis.com/instance/postgresql/returned_tuples_count`: Number of rows scanned while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
21. `alloydb.googleapis.com/instance/postgresql/fetched_tuples_count`: Number of rows fetched while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
22. `alloydb.googleapis.com/instance/postgresql/updated_tuples_count`: Number of rows updated while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
23. `alloydb.googleapis.com/instance/postgresql/inserted_tuples_count`: Number of rows inserted while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
24. `alloydb.googleapis.com/instance/postgresql/deleted_tuples_count`: Number of rows deleted while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
25. `alloydb.googleapis.com/instance/postgresql/written_tuples_count`: Number of rows written while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
26. `alloydb.googleapis.com/instance/postgresql/deadlock_count`: Number of deadlocks detected in the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
27. `alloydb.googleapis.com/instance/postgresql/blks_read`: Number of blocks read by Postgres that were not in the buffer cache. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
28. `alloydb.googleapis.com/instance/postgresql/blks_hit`: Number of times Postgres found the requested block in the buffer cache. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
29. `alloydb.googleapis.com/instance/postgresql/temp_bytes_written_count`: The total amount of data(in bytes) written to temporary files by the queries while performing internal algorithms like join, sort etc. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
30. `alloydb.googleapis.com/instance/postgresql/temp_files_written_count`: The number of temporary files used for writing data in the instance while performing internal algorithms like join, sort etc. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
31. `alloydb.googleapis.com/instance/postgresql/new_connections_count`: The number new connections added to the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
32. `alloydb.googleapis.com/instance/postgresql/wait_count`: Total number of times processes waited for each wait event in the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `wait_event_type`, `wait_event_name`.
|
||||
33. `alloydb.googleapis.com/instance/postgresql/wait_time`: Total elapsed wait time for each wait event in the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `wait_event_type`, `wait_event_name`.
|
||||
34. `alloydb.googleapis.com/instance/postgres/transaction_count`: The number of committed and rolled back transactions across all serving nodes of the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
|
||||
get_query_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches query level cloudmonitoring data (timeseries metrics) for queries running in an AlloyDB instance.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate the PromQL `query` for AlloyDB query metrics using the provided metrics and rules. Get labels like `cluster_id`, `instance_id`, and `query_hash` from the user's intent. If `query_hash` is provided, use the per-query metrics.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="my-instance","cluster_id"="my-cluster"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
8. Percentile with groupby on instanceid, clusterid: `quantile by ("instance_id","cluster_id")(0.99,avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","cluster_id"="my-cluster","instance_id"="my-instance"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. aggregate is the aggregated values for all query stats, Use aggregate metrics if query id is not provided. For perquery metrics do not fetch querystring unless specified by user specifically. Have the aggregation on query hash to avoid fetching the querystring. Do not use latency metrics for anything.
|
||||
1. `alloydb.googleapis.com/database/postgresql/insights/aggregate/latencies`: Aggregated query latency distribution. `alloydb.googleapis.com/Database`. `user`, `client_addr`.
|
||||
2. `alloydb.googleapis.com/database/postgresql/insights/aggregate/execution_time`: Accumulated aggregated query execution time since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`.
|
||||
3. `alloydb.googleapis.com/database/postgresql/insights/aggregate/io_time`: Accumulated aggregated IO time since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `io_type`.
|
||||
4. `alloydb.googleapis.com/database/postgresql/insights/aggregate/lock_time`: Accumulated aggregated lock wait time since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `lock_type`.
|
||||
5. `alloydb.googleapis.com/database/postgresql/insights/aggregate/row_count`: Aggregated number of retrieved or affected rows since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`.
|
||||
6. `alloydb.googleapis.com/database/postgresql/insights/aggregate/shared_blk_access_count`: Aggregated shared blocks accessed by statement execution. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `access_type`.
|
||||
7. `alloydb.googleapis.com/database/postgresql/insights/perquery/latencies`: Per query latency distribution. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `querystring`, `query_hash`.
|
||||
8. `alloydb.googleapis.com/database/postgresql/insights/perquery/execution_time`: Accumulated execution times per user per database per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `querystring`, `query_hash`.
|
||||
9. `alloydb.googleapis.com/database/postgresql/insights/perquery/io_time`: Accumulated IO time since the last sample per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `io_type`, `querystring`, `query_hash`.
|
||||
10. `alloydb.googleapis.com/database/postgresql/insights/perquery/lock_time`: Accumulated lock wait time since the last sample per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `lock_type`, `querystring`, `query_hash`.
|
||||
11. `alloydb.googleapis.com/database/postgresql/insights/perquery/row_count`: The number of retrieved or affected rows since the last sample per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `querystring`, `query_hash`.
|
||||
12. `alloydb.googleapis.com/database/postgresql/insights/perquery/shared_blk_access_count`: Shared blocks accessed by statement execution per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `access_type`, `querystring`, `query_hash`.
|
||||
13. `alloydb.googleapis.com/database/postgresql/insights/pertag/latencies`: Query latency distribution. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`.
|
||||
14. `alloydb.googleapis.com/database/postgresql/insights/pertag/execution_time`: Accumulated execution times since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`.
|
||||
15. `alloydb.googleapis.com/database/postgresql/insights/pertag/io_time`: Accumulated IO time since the last sample per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `io_type`, `tag_hash`.
|
||||
16. `alloydb.googleapis.com/database/postgresql/insights/pertag/lock_time`: Accumulated lock wait time since the last sample per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `lock_type`, `tag_hash`.
|
||||
17. `alloydb.googleapis.com/database/postgresql/insights/pertag/shared_blk_access_count`: Shared blocks accessed by statement execution per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `access_type`, `tag_hash`.
|
||||
18. `alloydb.googleapis.com/database/postgresql/insights/pertag/row_count`: The number of retrieved or affected rows since the last sample per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`.
|
||||
|
||||
toolsets:
|
||||
alloydb-postgres-cloud-monitoring-tools:
|
||||
- get_system_metrics
|
||||
- get_query_metrics
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
alloydb-pg-source:
|
||||
kind: "alloydb-postgres"
|
||||
@@ -30,93 +31,9 @@ tools:
|
||||
description: Use this tool to execute sql.
|
||||
|
||||
list_tables:
|
||||
kind: postgres-sql
|
||||
kind: postgres-list-tables
|
||||
source: alloydb-pg-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
alloydb-postgres-database-tools:
|
||||
|
||||
@@ -1,9 +1,29 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
bigquery-source:
|
||||
kind: "bigquery"
|
||||
project: ${BIGQUERY_PROJECT}
|
||||
location: ${BIGQUERY_LOCATION:}
|
||||
|
||||
tools:
|
||||
analyze_contribution:
|
||||
kind: bigquery-analyze-contribution
|
||||
source: bigquery-source
|
||||
description: Use this tool to analyze the contribution about changes to key metrics in multi-dimensional data.
|
||||
|
||||
ask_data_insights:
|
||||
kind: bigquery-conversational-analytics
|
||||
source: bigquery-source
|
||||
@@ -44,6 +64,7 @@ tools:
|
||||
|
||||
toolsets:
|
||||
bigquery-database-tools:
|
||||
- analyze_contribution
|
||||
- ask_data_insights
|
||||
- execute_sql
|
||||
- forecast
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
clickhouse-source:
|
||||
kind: clickhouse
|
||||
@@ -14,6 +27,12 @@ tools:
|
||||
source: clickhouse-source
|
||||
description: Use this tool to execute SQL.
|
||||
|
||||
list_databases:
|
||||
kind: clickhouse-list-databases
|
||||
source: clickhouse-source
|
||||
description: Use this tool to list all databases in ClickHouse.
|
||||
|
||||
toolsets:
|
||||
clickhouse-database-tools:
|
||||
- execute_sql
|
||||
- list_databases
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
cloudsql-mssql-source:
|
||||
kind: cloud-sql-mssql
|
||||
@@ -8,6 +21,7 @@ sources:
|
||||
ipAddress: ${CLOUD_SQL_MSSQL_IP_ADDRESS}
|
||||
user: ${CLOUD_SQL_MSSQL_USER}
|
||||
password: ${CLOUD_SQL_MSSQL_PASSWORD}
|
||||
ipType: ${CLOUD_SQL_MSSQL_IP_TYPE:public}
|
||||
tools:
|
||||
execute_sql:
|
||||
kind: mssql-execute-sql
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloud-sql-mysql-source:
|
||||
kind: cloud-sql-mysql
|
||||
@@ -7,174 +21,16 @@ sources:
|
||||
database: ${CLOUD_SQL_MYSQL_DATABASE}
|
||||
user: ${CLOUD_SQL_MYSQL_USER}
|
||||
password: ${CLOUD_SQL_MYSQL_PASSWORD}
|
||||
ipType: ${CLOUD_SQL_MYSQL_IP_TYPE:PUBLIC}
|
||||
tools:
|
||||
execute_sql:
|
||||
kind: mysql-execute-sql
|
||||
source: cloud-sql-mysql-source
|
||||
description: Use this tool to execute SQL.
|
||||
list_tables:
|
||||
kind: mysql-sql
|
||||
kind: mysql-list-tables
|
||||
source: cloud-sql-mysql-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
SELECT
|
||||
T.TABLE_SCHEMA AS schema_name,
|
||||
T.TABLE_NAME AS object_name,
|
||||
CASE
|
||||
WHEN @output_format = 'simple' THEN
|
||||
JSON_OBJECT('name', T.TABLE_NAME)
|
||||
ELSE
|
||||
CONVERT( JSON_OBJECT(
|
||||
'schema_name', T.TABLE_SCHEMA,
|
||||
'object_name', T.TABLE_NAME,
|
||||
'object_type', 'TABLE',
|
||||
'owner', (
|
||||
SELECT
|
||||
IFNULL(U.GRANTEE, 'N/A')
|
||||
FROM
|
||||
INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
|
||||
WHERE
|
||||
U.TABLE_SCHEMA = T.TABLE_SCHEMA
|
||||
LIMIT 1
|
||||
),
|
||||
'comment', IFNULL(T.TABLE_COMMENT, ''),
|
||||
'columns', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'column_name', C.COLUMN_NAME,
|
||||
'data_type', C.COLUMN_TYPE,
|
||||
'ordinal_position', C.ORDINAL_POSITION,
|
||||
'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
|
||||
'column_default', C.COLUMN_DEFAULT,
|
||||
'column_comment', IFNULL(C.COLUMN_COMMENT, '')
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.COLUMNS C
|
||||
WHERE
|
||||
C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
|
||||
ORDER BY C.ORDINAL_POSITION
|
||||
),
|
||||
'constraints', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'constraint_name', TC.CONSTRAINT_NAME,
|
||||
'constraint_type',
|
||||
CASE TC.CONSTRAINT_TYPE
|
||||
WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
|
||||
WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
|
||||
WHEN 'UNIQUE' THEN 'UNIQUE'
|
||||
ELSE TC.CONSTRAINT_TYPE
|
||||
END,
|
||||
'constraint_definition', '',
|
||||
'constraint_columns', (
|
||||
SELECT
|
||||
IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
|
||||
FROM
|
||||
INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
|
||||
WHERE
|
||||
KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND KCU.TABLE_NAME = TC.TABLE_NAME
|
||||
ORDER BY KCU.ORDINAL_POSITION
|
||||
),
|
||||
'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
|
||||
'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
|
||||
(SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
|
||||
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
|
||||
WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND FKCU.TABLE_NAME = TC.TABLE_NAME
|
||||
AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
|
||||
ORDER BY FKCU.ORDINAL_POSITION),
|
||||
NULL
|
||||
)
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
|
||||
LEFT JOIN
|
||||
INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
|
||||
ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
|
||||
AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
|
||||
AND TC.TABLE_NAME = RC.TABLE_NAME
|
||||
WHERE
|
||||
TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
|
||||
),
|
||||
'indexes', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'index_name', IndexData.INDEX_NAME,
|
||||
'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
|
||||
'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
|
||||
'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM (
|
||||
SELECT
|
||||
S.TABLE_SCHEMA,
|
||||
S.TABLE_NAME,
|
||||
S.INDEX_NAME,
|
||||
MIN(S.NON_UNIQUE) AS NON_UNIQUE, -- Aggregate NON_UNIQUE here to get unique status for the index
|
||||
JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY -- Aggregate columns into an array for this index
|
||||
FROM
|
||||
INFORMATION_SCHEMA.STATISTICS S
|
||||
WHERE
|
||||
S.TABLE_SCHEMA = T.TABLE_SCHEMA AND S.TABLE_NAME = T.TABLE_NAME
|
||||
GROUP BY
|
||||
S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
|
||||
) AS IndexData
|
||||
ORDER BY IndexData.INDEX_NAME
|
||||
),
|
||||
'triggers', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'trigger_name', TR.TRIGGER_NAME,
|
||||
'trigger_definition', TR.ACTION_STATEMENT
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TRIGGERS TR
|
||||
WHERE
|
||||
TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
|
||||
ORDER BY TR.TRIGGER_NAME
|
||||
)
|
||||
) USING utf8mb4)
|
||||
END AS object_details
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLES T
|
||||
CROSS JOIN (SELECT @table_names := ?, @output_format := ?) AS variables
|
||||
WHERE
|
||||
T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
|
||||
AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
|
||||
AND T.TABLE_TYPE = 'BASE TABLE'
|
||||
ORDER BY
|
||||
T.TABLE_SCHEMA, T.TABLE_NAME;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
default: ""
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
cloud-sql-mysql-database-tools:
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloudsql-pg-source:
|
||||
kind: cloud-sql-postgres
|
||||
@@ -5,8 +19,9 @@ sources:
|
||||
region: ${CLOUD_SQL_POSTGRES_REGION}
|
||||
instance: ${CLOUD_SQL_POSTGRES_INSTANCE}
|
||||
database: ${CLOUD_SQL_POSTGRES_DATABASE}
|
||||
user: ${CLOUD_SQL_POSTGRES_USER}
|
||||
password: ${CLOUD_SQL_POSTGRES_PASSWORD}
|
||||
user: ${CLOUD_SQL_POSTGRES_USER:}
|
||||
password: ${CLOUD_SQL_POSTGRES_PASSWORD:}
|
||||
ipType: ${CLOUD_SQL_POSTGRES_IP_TYPE:public}
|
||||
|
||||
tools:
|
||||
execute_sql:
|
||||
@@ -15,93 +30,9 @@ tools:
|
||||
description: Use this tool to execute sql.
|
||||
|
||||
list_tables:
|
||||
kind: postgres-sql
|
||||
kind: postgres-list-tables
|
||||
source: cloudsql-pg-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
cloud-sql-postgres-database-tools:
|
||||
|
||||
@@ -1,8 +1,21 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
firestore-source:
|
||||
kind: firestore
|
||||
project: ${FIRESTORE_PROJECT}
|
||||
database: ${FIRESTORE_DATABASE}
|
||||
database: ${FIRESTORE_DATABASE:}
|
||||
|
||||
tools:
|
||||
firestore-get-documents:
|
||||
@@ -13,26 +26,26 @@ tools:
|
||||
kind: firestore-add-documents
|
||||
source: firestore-source
|
||||
description: |
|
||||
Adds a new document to a Firestore collection. Please follow the best practices :
|
||||
1. Always use typed values in the documentData: Every field must be wrapped with its appropriate type indicator (e.g., {"stringValue": "text"})
|
||||
2. Integer values can be strings in the documentData: The tool accepts integer values as strings (e.g., {"integerValue": "1500"})
|
||||
3. Use returnData sparingly: Only set to true when you need to verify the exact data that was written
|
||||
4. Validate data before sending: Ensure your data matches Firestore's native JSON format
|
||||
5. Handle timestamps properly: Use RFC3339 format for timestamp strings
|
||||
6. Base64 encode binary data: Binary data must be base64 encoded in the bytesValue field
|
||||
7. Consider security rules: Ensure your Firestore security rules allow document creation in the target collection
|
||||
Adds a new document to a Firestore collection. Please follow the best practices :
|
||||
1. Always use typed values in the documentData: Every field must be wrapped with its appropriate type indicator (e.g., {"stringValue": "text"})
|
||||
2. Integer values can be strings in the documentData: The tool accepts integer values as strings (e.g., {"integerValue": "1500"})
|
||||
3. Use returnData sparingly: Only set to true when you need to verify the exact data that was written
|
||||
4. Validate data before sending: Ensure your data matches Firestore's native JSON format
|
||||
5. Handle timestamps properly: Use RFC3339 format for timestamp strings
|
||||
6. Base64 encode binary data: Binary data must be base64 encoded in the bytesValue field
|
||||
7. Consider security rules: Ensure your Firestore security rules allow document creation in the target collection
|
||||
firestore-update-document:
|
||||
kind: firestore-update-document
|
||||
source: firestore-source
|
||||
description: |
|
||||
Updates an existing document in Firestore. Supports both full document updates and selective field updates using an update mask. Please follow the best practices:
|
||||
1. Use update masks for precision: When you only need to update specific fields, use the updateMask parameter to avoid unintended changes
|
||||
2. Always use typed values in the documentData: Every field must be wrapped with its appropriate type indicator (e.g., {"stringValue": "text"})
|
||||
3. Delete fields using update mask: To delete fields, include them in the updateMask but omit them from documentData
|
||||
4. Integer values can be strings: The tool accepts integer values as strings (e.g., {"integerValue": "1500"})
|
||||
5. Use returnData sparingly: Only set to true when you need to verify the exact data after the update
|
||||
6. Handle timestamps properly: Use RFC3339 format for timestamp strings
|
||||
7. Consider security rules: Ensure your Firestore security rules allow document updates
|
||||
Updates an existing document in Firestore. Supports both full document updates and selective field updates using an update mask. Please follow the best practices:
|
||||
1. Use update masks for precision: When you only need to update specific fields, use the updateMask parameter to avoid unintended changes
|
||||
2. Always use typed values in the documentData: Every field must be wrapped with its appropriate type indicator (e.g., {"stringValue": "text"})
|
||||
3. Delete fields using update mask: To delete fields, include them in the updateMask but omit them from documentData
|
||||
4. Integer values can be strings: The tool accepts integer values as strings (e.g., {"integerValue": "1500"})
|
||||
5. Use returnData sparingly: Only set to true when you need to verify the exact data after the update
|
||||
6. Handle timestamps properly: Use RFC3339 format for timestamp strings
|
||||
7. Consider security rules: Ensure your Firestore security rules allow document updates
|
||||
firestore-list-collections:
|
||||
kind: firestore-list-collections
|
||||
source: firestore-source
|
||||
@@ -44,8 +57,8 @@ tools:
|
||||
firestore-query-collection:
|
||||
kind: firestore-query-collection
|
||||
source: firestore-source
|
||||
description: |
|
||||
Retrieves one or more Firestore documents from a collection in a database in the current project by a collection with a full document path.
|
||||
description: |
|
||||
Retrieves one or more Firestore documents from a collection in a database in the current project by a collection with a full document path.
|
||||
Use this if you know the exact path of a collection and the filtering clause you would like for the document.
|
||||
firestore-get-rules:
|
||||
kind: firestore-get-rules
|
||||
|
||||
@@ -32,168 +32,9 @@ tools:
|
||||
source: mysql-source
|
||||
description: Use this tool to execute SQL.
|
||||
list_tables:
|
||||
kind: mysql-sql
|
||||
kind: mysql-list-tables
|
||||
source: mysql-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
SELECT
|
||||
T.TABLE_SCHEMA AS schema_name,
|
||||
T.TABLE_NAME AS object_name,
|
||||
CASE
|
||||
WHEN @output_format = 'simple' THEN
|
||||
JSON_OBJECT('name', T.TABLE_NAME)
|
||||
ELSE
|
||||
CONVERT( JSON_OBJECT(
|
||||
'schema_name', T.TABLE_SCHEMA,
|
||||
'object_name', T.TABLE_NAME,
|
||||
'object_type', 'TABLE',
|
||||
'owner', (
|
||||
SELECT
|
||||
IFNULL(U.GRANTEE, 'N/A')
|
||||
FROM
|
||||
INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
|
||||
WHERE
|
||||
U.TABLE_SCHEMA = T.TABLE_SCHEMA
|
||||
LIMIT 1
|
||||
),
|
||||
'comment', IFNULL(T.TABLE_COMMENT, ''),
|
||||
'columns', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'column_name', C.COLUMN_NAME,
|
||||
'data_type', C.COLUMN_TYPE,
|
||||
'ordinal_position', C.ORDINAL_POSITION,
|
||||
'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
|
||||
'column_default', C.COLUMN_DEFAULT,
|
||||
'column_comment', IFNULL(C.COLUMN_COMMENT, '')
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.COLUMNS C
|
||||
WHERE
|
||||
C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
|
||||
ORDER BY C.ORDINAL_POSITION
|
||||
),
|
||||
'constraints', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'constraint_name', TC.CONSTRAINT_NAME,
|
||||
'constraint_type',
|
||||
CASE TC.CONSTRAINT_TYPE
|
||||
WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
|
||||
WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
|
||||
WHEN 'UNIQUE' THEN 'UNIQUE'
|
||||
ELSE TC.CONSTRAINT_TYPE
|
||||
END,
|
||||
'constraint_definition', '',
|
||||
'constraint_columns', (
|
||||
SELECT
|
||||
IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
|
||||
FROM
|
||||
INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
|
||||
WHERE
|
||||
KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND KCU.TABLE_NAME = TC.TABLE_NAME
|
||||
ORDER BY KCU.ORDINAL_POSITION
|
||||
),
|
||||
'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
|
||||
'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
|
||||
(SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
|
||||
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
|
||||
WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND FKCU.TABLE_NAME = TC.TABLE_NAME
|
||||
AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
|
||||
ORDER BY FKCU.ORDINAL_POSITION),
|
||||
NULL
|
||||
)
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
|
||||
LEFT JOIN
|
||||
INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
|
||||
ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
|
||||
AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
|
||||
AND TC.TABLE_NAME = RC.TABLE_NAME
|
||||
WHERE
|
||||
TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
|
||||
),
|
||||
'indexes', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'index_name', IndexData.INDEX_NAME,
|
||||
'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
|
||||
'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
|
||||
'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM (
|
||||
SELECT
|
||||
S.TABLE_SCHEMA,
|
||||
S.TABLE_NAME,
|
||||
S.INDEX_NAME,
|
||||
MIN(S.NON_UNIQUE) AS NON_UNIQUE, -- Aggregate NON_UNIQUE here to get unique status for the index
|
||||
JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY -- Aggregate columns into an array for this index
|
||||
FROM
|
||||
INFORMATION_SCHEMA.STATISTICS S
|
||||
WHERE
|
||||
S.TABLE_SCHEMA = T.TABLE_SCHEMA AND S.TABLE_NAME = T.TABLE_NAME
|
||||
GROUP BY
|
||||
S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
|
||||
) AS IndexData
|
||||
ORDER BY IndexData.INDEX_NAME
|
||||
),
|
||||
'triggers', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'trigger_name', TR.TRIGGER_NAME,
|
||||
'trigger_definition', TR.ACTION_STATEMENT
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TRIGGERS TR
|
||||
WHERE
|
||||
TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
|
||||
ORDER BY TR.TRIGGER_NAME
|
||||
)
|
||||
) USING utf8mb4)
|
||||
END AS object_details
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLES T
|
||||
CROSS JOIN (SELECT @table_names := ?, @output_format := ?) AS variables
|
||||
WHERE
|
||||
T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
|
||||
AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
|
||||
AND T.TABLE_TYPE = 'BASE TABLE'
|
||||
ORDER BY
|
||||
T.TABLE_SCHEMA, T.TABLE_NAME;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
default: ""
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
mysql-database-tools:
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
postgresql-source:
|
||||
kind: postgres
|
||||
@@ -6,6 +20,7 @@ sources:
|
||||
database: ${POSTGRES_DATABASE}
|
||||
user: ${POSTGRES_USER}
|
||||
password: ${POSTGRES_PASSWORD}
|
||||
queryParams: ${POSTGRES_QUERY_PARAMS:}
|
||||
|
||||
tools:
|
||||
execute_sql:
|
||||
@@ -14,93 +29,9 @@ tools:
|
||||
description: Use this tool to execute SQL.
|
||||
|
||||
list_tables:
|
||||
kind: postgres-sql
|
||||
kind: postgres-list-tables
|
||||
source: postgresql-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
postgres-database-tools:
|
||||
|
||||
117
internal/sources/alloydbadmin/alloydbadmin.go
Normal file
117
internal/sources/alloydbadmin/alloydbadmin.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package alloydbadmin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
alloydbrestapi "google.golang.org/api/alloydb/v1"
|
||||
)
|
||||
|
||||
const SourceKind string = "alloydb-admin"
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
ua, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("Error in User Agent retrieval: %s", err)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if r.UseClientOAuth {
|
||||
client = nil
|
||||
} else {
|
||||
// Use Application Default Credentials
|
||||
creds, err := google.FindDefaultCredentials(ctx, alloydbrestapi.CloudPlatformScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find default credentials: %w", err)
|
||||
}
|
||||
client = oauth2.NewClient(ctx, creds.TokenSource)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
BaseURL: "https://alloydb.googleapis.com",
|
||||
Client: client,
|
||||
UserAgent: ua,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
BaseURL string
|
||||
Client *http.Client
|
||||
UserAgent string
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) GetClient(ctx context.Context, accessToken string) (*http.Client, error) {
|
||||
if s.UseClientOAuth {
|
||||
if accessToken == "" {
|
||||
return nil, fmt.Errorf("client-side OAuth is enabled but no access token was provided")
|
||||
}
|
||||
token := &oauth2.Token{AccessToken: accessToken}
|
||||
return oauth2.NewClient(ctx, oauth2.StaticTokenSource(token)), nil
|
||||
}
|
||||
return s.Client, nil
|
||||
}
|
||||
|
||||
func (s *Source) UseClientAuthorization() bool {
|
||||
return s.UseClientOAuth
|
||||
}
|
||||
125
internal/sources/alloydbadmin/alloydbadmin_test.go
Normal file
125
internal/sources/alloydbadmin/alloydbadmin_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbadmin_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
func TestParseFromYamlAlloyDBAdmin(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
kind: alloydb-admin
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-alloydb-admin-instance": alloydbadmin.Config{
|
||||
Name: "my-alloydb-admin-instance",
|
||||
Kind: alloydbadmin.SourceKind,
|
||||
UseClientOAuth: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "use client auth example",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
kind: alloydb-admin
|
||||
useClientOAuth: true
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-alloydb-admin-instance": alloydbadmin.Config{
|
||||
Name: "my-alloydb-admin-instance",
|
||||
Kind: alloydbadmin.SourceKind,
|
||||
UseClientOAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYaml(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
kind: alloydb-admin
|
||||
project: test-project
|
||||
`,
|
||||
err: "unable to parse source \"my-alloydb-admin-instance\" as \"alloydb-admin\": [2:1] unknown field \"project\"\n 1 | kind: alloydb-admin\n> 2 | project: test-project\n ^\n",
|
||||
},
|
||||
{
|
||||
desc: "missing required field",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
useClientOAuth: true
|
||||
`,
|
||||
err: "missing 'kind' field for source \"my-alloydb-admin-instance\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if errStr != tc.err {
|
||||
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -17,16 +17,18 @@ package bigquery
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
bigqueryapi "cloud.google.com/go/bigquery"
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
bigqueryrestapi "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
@@ -35,7 +37,7 @@ const SourceKind string = "bigquery"
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
type BigqueryClientCreator func(tokenString tools.AccessToken, wantRestService bool) (*bigqueryapi.Client, *bigqueryrestapi.Service, error)
|
||||
type BigqueryClientCreator func(tokenString string, wantRestService bool) (*bigqueryapi.Client, *bigqueryrestapi.Service, error)
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
@@ -53,11 +55,12 @@ func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources
|
||||
|
||||
type Config struct {
|
||||
// BigQuery configs
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Project string `yaml:"project" validate:"required"`
|
||||
Location string `yaml:"location"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Project string `yaml:"project" validate:"required"`
|
||||
Location string `yaml:"location"`
|
||||
AllowedDatasets []string `yaml:"allowedDatasets"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
@@ -85,6 +88,37 @@ func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.So
|
||||
}
|
||||
}
|
||||
|
||||
allowedDatasets := make(map[string]struct{})
|
||||
// Get full id of allowed datasets and verify they exist.
|
||||
if len(r.AllowedDatasets) > 0 {
|
||||
for _, allowed := range r.AllowedDatasets {
|
||||
var projectID, datasetID, allowedFullID string
|
||||
if strings.Contains(allowed, ".") {
|
||||
parts := strings.Split(allowed, ".")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid allowedDataset format: %q, expected 'project.dataset' or 'dataset'", allowed)
|
||||
}
|
||||
projectID = parts[0]
|
||||
datasetID = parts[1]
|
||||
allowedFullID = allowed
|
||||
} else {
|
||||
projectID = client.Project()
|
||||
datasetID = allowed
|
||||
allowedFullID = fmt.Sprintf("%s.%s", projectID, datasetID)
|
||||
}
|
||||
|
||||
dataset := client.DatasetInProject(projectID, datasetID)
|
||||
_, err := dataset.Metadata(ctx)
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusNotFound {
|
||||
return nil, fmt.Errorf("allowedDataset '%s' not found in project '%s'", datasetID, projectID)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to verify allowedDataset '%s' in project '%s': %w", datasetID, projectID, err)
|
||||
}
|
||||
allowedDatasets[allowedFullID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
@@ -95,6 +129,7 @@ func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.So
|
||||
TokenSource: tokenSource,
|
||||
MaxQueryResultRows: 50,
|
||||
ClientCreator: clientCreator,
|
||||
AllowedDatasets: allowedDatasets,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
return s, nil
|
||||
@@ -114,6 +149,7 @@ type Source struct {
|
||||
TokenSource oauth2.TokenSource
|
||||
MaxQueryResultRows int
|
||||
ClientCreator BigqueryClientCreator
|
||||
AllowedDatasets map[string]struct{}
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
@@ -154,6 +190,29 @@ func (s *Source) BigQueryClientCreator() BigqueryClientCreator {
|
||||
return s.ClientCreator
|
||||
}
|
||||
|
||||
func (s *Source) BigQueryAllowedDatasets() []string {
|
||||
if len(s.AllowedDatasets) == 0 {
|
||||
return nil
|
||||
}
|
||||
datasets := make([]string, 0, len(s.AllowedDatasets))
|
||||
for d := range s.AllowedDatasets {
|
||||
datasets = append(datasets, d)
|
||||
}
|
||||
return datasets
|
||||
}
|
||||
|
||||
// IsDatasetAllowed checks if a given dataset is accessible based on the source's configuration.
|
||||
func (s *Source) IsDatasetAllowed(projectID, datasetID string) bool {
|
||||
// If the normalized map is empty, it means no restrictions were configured.
|
||||
if len(s.AllowedDatasets) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
targetDataset := fmt.Sprintf("%s.%s", projectID, datasetID)
|
||||
_, ok := s.AllowedDatasets[targetDataset]
|
||||
return ok
|
||||
}
|
||||
|
||||
func initBigQueryConnection(
|
||||
ctx context.Context,
|
||||
tracer trace.Tracer,
|
||||
@@ -199,7 +258,7 @@ func initBigQueryConnectionWithOAuthToken(
|
||||
location string,
|
||||
name string,
|
||||
userAgent string,
|
||||
tokenString tools.AccessToken,
|
||||
tokenString string,
|
||||
wantRestService bool,
|
||||
) (*bigqueryapi.Client, *bigqueryrestapi.Service, error) {
|
||||
ctx, span := sources.InitConnectionSpan(ctx, tracer, SourceKind, name)
|
||||
@@ -238,13 +297,13 @@ func newBigQueryClientCreator(
|
||||
project string,
|
||||
location string,
|
||||
name string,
|
||||
) (func(tools.AccessToken, bool) (*bigqueryapi.Client, *bigqueryrestapi.Service, error), error) {
|
||||
) (func(string, bool) (*bigqueryapi.Client, *bigqueryrestapi.Service, error), error) {
|
||||
userAgent, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func(tokenString tools.AccessToken, wantRestService bool) (*bigqueryapi.Client, *bigqueryrestapi.Service, error) {
|
||||
return func(tokenString string, wantRestService bool) (*bigqueryapi.Client, *bigqueryrestapi.Service, error) {
|
||||
return initBigQueryConnectionWithOAuthToken(ctx, tracer, project, location, name, userAgent, tokenString, wantRestService)
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -69,6 +69,27 @@ func TestParseFromYamlBigQuery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with allowed datasets example",
|
||||
in: `
|
||||
sources:
|
||||
my-instance:
|
||||
kind: bigquery
|
||||
project: my-project
|
||||
location: us
|
||||
allowedDatasets:
|
||||
- my_dataset
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-instance": bigquery.Config{
|
||||
Name: "my-instance",
|
||||
Kind: bigquery.SourceKind,
|
||||
Project: "my-project",
|
||||
Location: "us",
|
||||
AllowedDatasets: []string{"my_dataset"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
|
||||
117
internal/sources/cloudmonitoring/cloud_monitoring.go
Normal file
117
internal/sources/cloudmonitoring/cloud_monitoring.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package cloudmonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
monitoring "google.golang.org/api/monitoring/v3"
|
||||
)
|
||||
|
||||
const SourceKind string = "cloud-monitoring"
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
// Initialize initializes a Cloud Monitoring Source instance.
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
ua, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in User Agent retrieval: %s", err)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if r.UseClientOAuth {
|
||||
client = nil
|
||||
} else {
|
||||
// Use Application Default Credentials
|
||||
creds, err := google.FindDefaultCredentials(ctx, monitoring.MonitoringScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find default credentials: %w", err)
|
||||
}
|
||||
client = oauth2.NewClient(ctx, creds.TokenSource)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
BaseURL: "https://monitoring.googleapis.com",
|
||||
Client: client,
|
||||
UserAgent: ua,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
BaseURL string `yaml:"baseUrl"`
|
||||
Client *http.Client
|
||||
UserAgent string
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) GetClient(ctx context.Context, accessToken string) (*http.Client, error) {
|
||||
if s.UseClientOAuth {
|
||||
if accessToken == "" {
|
||||
return nil, fmt.Errorf("client-side OAuth is enabled but no access token was provided")
|
||||
}
|
||||
token := &oauth2.Token{AccessToken: accessToken}
|
||||
return oauth2.NewClient(ctx, oauth2.StaticTokenSource(token)), nil
|
||||
}
|
||||
return s.Client, nil
|
||||
}
|
||||
|
||||
func (s *Source) UseClientAuthorization() bool {
|
||||
return s.UseClientOAuth
|
||||
}
|
||||
135
internal/sources/cloudmonitoring/cloud_monitoring_test.go
Normal file
135
internal/sources/cloudmonitoring/cloud_monitoring_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudmonitoring_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudmonitoring"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
func TestParseFromYamlCloudMonitoring(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
kind: cloud-monitoring
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-monitoring-instance": cloudmonitoring.Config{
|
||||
Name: "my-cloud-monitoring-instance",
|
||||
Kind: cloudmonitoring.SourceKind,
|
||||
UseClientOAuth: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "use client auth example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
kind: cloud-monitoring
|
||||
useClientOAuth: true
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-monitoring-instance": cloudmonitoring.Config{
|
||||
Name: "my-cloud-monitoring-instance",
|
||||
Kind: cloudmonitoring.SourceKind,
|
||||
UseClientOAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYaml(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
kind: cloud-monitoring
|
||||
project: test-project
|
||||
`,
|
||||
err: `unable to parse source "my-cloud-monitoring-instance" as "cloud-monitoring": [2:1] unknown field "project"
|
||||
1 | kind: cloud-monitoring
|
||||
> 2 | project: test-project
|
||||
^
|
||||
`,
|
||||
},
|
||||
{
|
||||
desc: "missing required field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
useClientOAuth: true
|
||||
`,
|
||||
err: "missing 'kind' field for source \"my-cloud-monitoring-instance\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if errStr != tc.err {
|
||||
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
117
internal/sources/cloudsqladmin/cloud_sql_admin.go
Normal file
117
internal/sources/cloudsqladmin/cloud_sql_admin.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package cloudsqladmin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
sqladmin "google.golang.org/api/sqladmin/v1"
|
||||
)
|
||||
|
||||
const SourceKind string = "cloud-sql-admin"
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
// Initialize initializes a CloudSQL Admin Source instance.
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
ua, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in User Agent retrieval: %s", err)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if r.UseClientOAuth {
|
||||
client = nil
|
||||
} else {
|
||||
// Use Application Default Credentials
|
||||
creds, err := google.FindDefaultCredentials(ctx, sqladmin.SqlserviceAdminScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find default credentials: %w", err)
|
||||
}
|
||||
client = oauth2.NewClient(ctx, creds.TokenSource)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
BaseURL: "https://sqladmin.googleapis.com",
|
||||
Client: client,
|
||||
UserAgent: ua,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
BaseURL string
|
||||
Client *http.Client
|
||||
UserAgent string
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) GetClient(ctx context.Context, accessToken string) (*http.Client, error) {
|
||||
if s.UseClientOAuth {
|
||||
if accessToken == "" {
|
||||
return nil, fmt.Errorf("client-side OAuth is enabled but no access token was provided")
|
||||
}
|
||||
token := &oauth2.Token{AccessToken: accessToken}
|
||||
return oauth2.NewClient(ctx, oauth2.StaticTokenSource(token)), nil
|
||||
}
|
||||
return s.Client, nil
|
||||
}
|
||||
|
||||
func (s *Source) UseClientAuthorization() bool {
|
||||
return s.UseClientOAuth
|
||||
}
|
||||
135
internal/sources/cloudsqladmin/cloud_sql_admin_test.go
Normal file
135
internal/sources/cloudsqladmin/cloud_sql_admin_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudsqladmin_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqladmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
func TestParseFromYamlCloudSQLAdmin(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
kind: cloud-sql-admin
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-sql-admin-instance": cloudsqladmin.Config{
|
||||
Name: "my-cloud-sql-admin-instance",
|
||||
Kind: cloudsqladmin.SourceKind,
|
||||
UseClientOAuth: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "use client auth example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
kind: cloud-sql-admin
|
||||
useClientOAuth: true
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-sql-admin-instance": cloudsqladmin.Config{
|
||||
Name: "my-cloud-sql-admin-instance",
|
||||
Kind: cloudsqladmin.SourceKind,
|
||||
UseClientOAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYaml(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
kind: cloud-sql-admin
|
||||
project: test-project
|
||||
`,
|
||||
err: `unable to parse source "my-cloud-sql-admin-instance" as "cloud-sql-admin": [2:1] unknown field "project"
|
||||
1 | kind: cloud-sql-admin
|
||||
> 2 | project: test-project
|
||||
^
|
||||
`,
|
||||
},
|
||||
{
|
||||
desc: "missing required field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
useClientOAuth: true
|
||||
`,
|
||||
err: "missing 'kind' field for source \"my-cloud-sql-admin-instance\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if errStr != tc.err {
|
||||
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -52,7 +52,7 @@ type Config struct {
|
||||
Project string `yaml:"project" validate:"required"`
|
||||
Region string `yaml:"region" validate:"required"`
|
||||
Instance string `yaml:"instance" validate:"required"`
|
||||
IPType sources.IPType `yaml:"ipType" validate:"required"`
|
||||
IPType sources.IPType `yaml:"ipType"`
|
||||
User string `yaml:"user" validate:"required"`
|
||||
Password string `yaml:"password" validate:"required"`
|
||||
Database string `yaml:"database" validate:"required"`
|
||||
|
||||
127
internal/sources/yugabytedb/yugabytedb.go
Normal file
127
internal/sources/yugabytedb/yugabytedb.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yugabytedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/yugabyte/pgx/v5/pgxpool"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const SourceKind string = "yugabytedb"
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Host string `yaml:"host" validate:"required"`
|
||||
Port string `yaml:"port" validate:"required"`
|
||||
User string `yaml:"user" validate:"required"`
|
||||
Password string `yaml:"password" validate:"required"`
|
||||
Database string `yaml:"database" validate:"required"`
|
||||
LoadBalance string `yaml:"loadBalance"`
|
||||
TopologyKeys string `yaml:"topologyKeys"`
|
||||
YBServersRefreshInterval string `yaml:"ybServersRefreshInterval"`
|
||||
FallBackToTopologyKeysOnly string `yaml:"fallbackToTopologyKeysOnly"`
|
||||
FailedHostReconnectDelaySeconds string `yaml:"failedHostReconnectDelaySecs"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
pool, err := initYugabyteDBConnectionPool(ctx, tracer, r.Name, r.Host, r.Port, r.User, r.Password, r.Database, r.LoadBalance, r.TopologyKeys, r.YBServersRefreshInterval, r.FallBackToTopologyKeysOnly, r.FailedHostReconnectDelaySeconds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create pool: %w", err)
|
||||
}
|
||||
|
||||
err = pool.Ping(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to connect successfully: %w", err)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
Pool: pool,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
Pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) YugabyteDBPool() *pgxpool.Pool {
|
||||
return s.Pool
|
||||
}
|
||||
|
||||
func initYugabyteDBConnectionPool(ctx context.Context, tracer trace.Tracer, name, host, port, user, pass, dbname, loadBalance, topologyKeys, refreshInterval, explicitFallback, failedHostTTL string) (*pgxpool.Pool, error) {
|
||||
//nolint:all // Reassigned ctx
|
||||
ctx, span := sources.InitConnectionSpan(ctx, tracer, SourceKind, name)
|
||||
defer span.End()
|
||||
// urlExample := "postgres://username:password@localhost:5433/database_name"
|
||||
i := fmt.Sprintf("postgres://%s:%s@%s:%s/%s", user, pass, host, port, dbname)
|
||||
if loadBalance == "true" {
|
||||
i = fmt.Sprintf("%s?load_balance=%s", i, loadBalance)
|
||||
if topologyKeys != "" {
|
||||
i = fmt.Sprintf("%s&topology_keys=%s", i, topologyKeys)
|
||||
if explicitFallback == "true" {
|
||||
i = fmt.Sprintf("%s&fallback_to_topology_keys_only=%s", i, explicitFallback)
|
||||
}
|
||||
}
|
||||
if refreshInterval != "" {
|
||||
i = fmt.Sprintf("%s&yb_servers_refresh_interval=%s", i, refreshInterval)
|
||||
}
|
||||
if failedHostTTL != "" {
|
||||
i = fmt.Sprintf("%s&failed_host_reconnect_delay_secs=%s", i, failedHostTTL)
|
||||
}
|
||||
}
|
||||
pool, err := pgxpool.New(ctx, i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create connection pool: %w", err)
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
299
internal/sources/yugabytedb/yugabytedb_test.go
Normal file
299
internal/sources/yugabytedb/yugabytedb_test.go
Normal file
@@ -0,0 +1,299 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yugabytedb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"strings"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/yugabytedb"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
// Basic config parse
|
||||
func TestParseFromYamlYugabyteDB(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "only required fields",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-instance:
|
||||
kind: yugabytedb
|
||||
name: my-yb-instance
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
database: yb_db
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-yb-instance": yugabytedb.Config{
|
||||
Name: "my-yb-instance",
|
||||
Kind: "yugabytedb",
|
||||
Host: "yb-host",
|
||||
Port: "yb-port",
|
||||
User: "yb_user",
|
||||
Password: "yb_pass",
|
||||
Database: "yb_db",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with loadBalance only",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-instance:
|
||||
kind: yugabytedb
|
||||
name: my-yb-instance
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
database: yb_db
|
||||
loadBalance: true
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-yb-instance": yugabytedb.Config{
|
||||
Name: "my-yb-instance",
|
||||
Kind: "yugabytedb",
|
||||
Host: "yb-host",
|
||||
Port: "yb-port",
|
||||
User: "yb_user",
|
||||
Password: "yb_pass",
|
||||
Database: "yb_db",
|
||||
LoadBalance: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "loadBalance with topologyKeys",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-instance:
|
||||
kind: yugabytedb
|
||||
name: my-yb-instance
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
database: yb_db
|
||||
loadBalance: true
|
||||
topologyKeys: zone1,zone2
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-yb-instance": yugabytedb.Config{
|
||||
Name: "my-yb-instance",
|
||||
Kind: "yugabytedb",
|
||||
Host: "yb-host",
|
||||
Port: "yb-port",
|
||||
User: "yb_user",
|
||||
Password: "yb_pass",
|
||||
Database: "yb_db",
|
||||
LoadBalance: "true",
|
||||
TopologyKeys: "zone1,zone2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with fallback only",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-instance:
|
||||
kind: yugabytedb
|
||||
name: my-yb-instance
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
database: yb_db
|
||||
loadBalance: true
|
||||
topologyKeys: zone1
|
||||
fallbackToTopologyKeysOnly: true
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-yb-instance": yugabytedb.Config{
|
||||
Name: "my-yb-instance",
|
||||
Kind: "yugabytedb",
|
||||
Host: "yb-host",
|
||||
Port: "yb-port",
|
||||
User: "yb_user",
|
||||
Password: "yb_pass",
|
||||
Database: "yb_db",
|
||||
LoadBalance: "true",
|
||||
TopologyKeys: "zone1",
|
||||
FallBackToTopologyKeysOnly: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with refresh interval and reconnect delay",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-instance:
|
||||
kind: yugabytedb
|
||||
name: my-yb-instance
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
database: yb_db
|
||||
loadBalance: true
|
||||
ybServersRefreshInterval: 20
|
||||
failedHostReconnectDelaySecs: 5
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-yb-instance": yugabytedb.Config{
|
||||
Name: "my-yb-instance",
|
||||
Kind: "yugabytedb",
|
||||
Host: "yb-host",
|
||||
Port: "yb-port",
|
||||
User: "yb_user",
|
||||
Password: "yb_pass",
|
||||
Database: "yb_db",
|
||||
LoadBalance: "true",
|
||||
YBServersRefreshInterval: "20",
|
||||
FailedHostReconnectDelaySeconds: "5",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "all fields set",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-instance:
|
||||
kind: yugabytedb
|
||||
name: my-yb-instance
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
database: yb_db
|
||||
loadBalance: true
|
||||
topologyKeys: zone1,zone2
|
||||
fallbackToTopologyKeysOnly: true
|
||||
ybServersRefreshInterval: 30
|
||||
failedHostReconnectDelaySecs: 10
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-yb-instance": yugabytedb.Config{
|
||||
Name: "my-yb-instance",
|
||||
Kind: "yugabytedb",
|
||||
Host: "yb-host",
|
||||
Port: "yb-port",
|
||||
User: "yb_user",
|
||||
Password: "yb_pass",
|
||||
Database: "yb_db",
|
||||
LoadBalance: "true",
|
||||
TopologyKeys: "zone1,zone2",
|
||||
FallBackToTopologyKeysOnly: "true",
|
||||
YBServersRefreshInterval: "30",
|
||||
FailedHostReconnectDelaySeconds: "10",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse (-want +got):\n%s", cmp.Diff(tc.want, got.Sources))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYamlYugabyteDB(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-source:
|
||||
kind: yugabytedb
|
||||
name: my-yb-source
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
database: yb_db
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
foo: bar
|
||||
`,
|
||||
err: "unable to parse source \"my-yb-source\" as \"yugabytedb\": [2:1] unknown field \"foo\"",
|
||||
},
|
||||
{
|
||||
desc: "missing required field (password)",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-source:
|
||||
kind: yugabytedb
|
||||
name: my-yb-source
|
||||
host: yb-host
|
||||
port: yb-port
|
||||
database: yb_db
|
||||
user: yb_user
|
||||
`,
|
||||
err: "unable to parse source \"my-yb-source\" as \"yugabytedb\": Key: 'Config.Password' Error:Field validation for 'Password' failed on the 'required' tag",
|
||||
},
|
||||
{
|
||||
desc: "missing required field (host)",
|
||||
in: `
|
||||
sources:
|
||||
my-yb-source:
|
||||
kind: yugabytedb
|
||||
name: my-yb-source
|
||||
port: yb-port
|
||||
database: yb_db
|
||||
user: yb_user
|
||||
password: yb_pass
|
||||
`,
|
||||
err: "unable to parse source \"my-yb-source\" as \"yugabytedb\": Key: 'Config.Host' Error:Field validation for 'Host' failed on the 'required' tag",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expected parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if !strings.Contains(errStr, tc.err) {
|
||||
t.Fatalf("unexpected error:\nGot: %q\nWant: %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,307 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigqueryanalyzecontribution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
bigqueryapi "cloud.google.com/go/bigquery"
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/uuid"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
bigqueryds "github.com/googleapis/genai-toolbox/internal/sources/bigquery"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
bigqueryrestapi "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
const kind string = "bigquery-analyze-contribution"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
BigQueryClient() *bigqueryapi.Client
|
||||
BigQueryRestService() *bigqueryrestapi.Service
|
||||
BigQueryClientCreator() bigqueryds.BigqueryClientCreator
|
||||
UseClientAuthorization() bool
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &bigqueryds.Source{}
|
||||
|
||||
var compatibleSources = [...]string{bigqueryds.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
inputDataParameter := tools.NewStringParameter("input_data",
|
||||
"The data that contain the test and control data to analyze. Can be a fully qualified BigQuery table ID or a SQL query.")
|
||||
contributionMetricParameter := tools.NewStringParameter("contribution_metric",
|
||||
`The name of the column that contains the metric to analyze.
|
||||
Provides the expression to use to calculate the metric you are analyzing.
|
||||
To calculate a summable metric, the expression must be in the form SUM(metric_column_name),
|
||||
where metric_column_name is a numeric data type.
|
||||
|
||||
To calculate a summable ratio metric, the expression must be in the form
|
||||
SUM(numerator_metric_column_name)/SUM(denominator_metric_column_name),
|
||||
where numerator_metric_column_name and denominator_metric_column_name are numeric data types.
|
||||
|
||||
To calculate a summable by category metric, the expression must be in the form
|
||||
SUM(metric_sum_column_name)/COUNT(DISTINCT categorical_column_name). The summed column must be a numeric data type.
|
||||
The categorical column must have type BOOL, DATE, DATETIME, TIME, TIMESTAMP, STRING, or INT64.`)
|
||||
isTestColParameter := tools.NewStringParameter("is_test_col",
|
||||
"The name of the column that identifies whether a row is in the test or control group.")
|
||||
dimensionIDColsParameter := tools.NewArrayParameterWithRequired("dimension_id_cols",
|
||||
"An array of column names that uniquely identify each dimension.", false, tools.NewStringParameter("dimension_id_col", "A dimension column name."))
|
||||
topKInsightsParameter := tools.NewIntParameterWithDefault("top_k_insights_by_apriori_support", 30,
|
||||
"The number of top insights to return, ranked by apriori support.")
|
||||
pruningMethodParameter := tools.NewStringParameterWithDefault("pruning_method", "PRUNE_REDUNDANT_INSIGHTS",
|
||||
"The method to use for pruning redundant insights. Can be 'NO_PRUNING' or 'PRUNE_REDUNDANT_INSIGHTS'.")
|
||||
|
||||
parameters := tools.Parameters{
|
||||
inputDataParameter,
|
||||
contributionMetricParameter,
|
||||
isTestColParameter,
|
||||
dimensionIDColsParameter,
|
||||
topKInsightsParameter,
|
||||
pruningMethodParameter,
|
||||
}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: parameters.McpManifest(),
|
||||
}
|
||||
|
||||
// finish tool setup
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Parameters: parameters,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
UseClientOAuth: s.UseClientAuthorization(),
|
||||
ClientCreator: s.BigQueryClientCreator(),
|
||||
Client: s.BigQueryClient(),
|
||||
RestService: s.BigQueryRestService(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: parameters.Manifest(), AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
|
||||
Client *bigqueryapi.Client
|
||||
RestService *bigqueryrestapi.Service
|
||||
ClientCreator bigqueryds.BigqueryClientCreator
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke runs the contribution analysis.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
inputData, ok := paramsMap["input_data"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to cast input_data parameter %s", paramsMap["input_data"])
|
||||
}
|
||||
|
||||
modelID := fmt.Sprintf("contribution_analysis_model_%s", strings.ReplaceAll(uuid.New().String(), "-", ""))
|
||||
|
||||
var options []string
|
||||
options = append(options, "MODEL_TYPE = 'CONTRIBUTION_ANALYSIS'")
|
||||
options = append(options, fmt.Sprintf("CONTRIBUTION_METRIC = '%s'", paramsMap["contribution_metric"]))
|
||||
options = append(options, fmt.Sprintf("IS_TEST_COL = '%s'", paramsMap["is_test_col"]))
|
||||
|
||||
if val, ok := paramsMap["dimension_id_cols"]; ok {
|
||||
if cols, ok := val.([]any); ok {
|
||||
var strCols []string
|
||||
for _, c := range cols {
|
||||
strCols = append(strCols, fmt.Sprintf("'%s'", c))
|
||||
}
|
||||
options = append(options, fmt.Sprintf("DIMENSION_ID_COLS = [%s]", strings.Join(strCols, ", ")))
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to cast dimension_id_cols parameter %s", paramsMap["dimension_id_cols"])
|
||||
}
|
||||
}
|
||||
if val, ok := paramsMap["top_k_insights_by_apriori_support"]; ok {
|
||||
options = append(options, fmt.Sprintf("TOP_K_INSIGHTS_BY_APRIORI_SUPPORT = %v", val))
|
||||
}
|
||||
if val, ok := paramsMap["pruning_method"].(string); ok {
|
||||
upperVal := strings.ToUpper(val)
|
||||
if upperVal != "NO_PRUNING" && upperVal != "PRUNE_REDUNDANT_INSIGHTS" {
|
||||
return nil, fmt.Errorf("invalid pruning_method: %s", val)
|
||||
}
|
||||
options = append(options, fmt.Sprintf("PRUNING_METHOD = '%s'", upperVal))
|
||||
}
|
||||
|
||||
var inputDataSource string
|
||||
trimmedUpperInputData := strings.TrimSpace(strings.ToUpper(inputData))
|
||||
if strings.HasPrefix(trimmedUpperInputData, "SELECT") || strings.HasPrefix(trimmedUpperInputData, "WITH") {
|
||||
inputDataSource = fmt.Sprintf("(%s)", inputData)
|
||||
} else {
|
||||
inputDataSource = fmt.Sprintf("SELECT * FROM `%s`", inputData)
|
||||
}
|
||||
|
||||
// Use temp model to skip the clean up at the end. To use TEMP MODEL, queries have to be
|
||||
// in the same BigQuery session.
|
||||
createModelSQL := fmt.Sprintf("CREATE TEMP MODEL %s OPTIONS(%s) AS %s",
|
||||
modelID,
|
||||
strings.Join(options, ", "),
|
||||
inputDataSource,
|
||||
)
|
||||
|
||||
bqClient := t.Client
|
||||
var err error
|
||||
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
createModelQuery := bqClient.Query(createModelSQL)
|
||||
createModelQuery.CreateSession = true
|
||||
createModelJob, err := createModelQuery.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start create model job: %w", err)
|
||||
}
|
||||
|
||||
status, err := createModelJob.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to wait for create model job: %w", err)
|
||||
}
|
||||
if err := status.Err(); err != nil {
|
||||
return nil, fmt.Errorf("create model job failed: %w", err)
|
||||
}
|
||||
|
||||
if status.Statistics == nil || status.Statistics.SessionInfo == nil || status.Statistics.SessionInfo.SessionID == "" {
|
||||
return nil, fmt.Errorf("failed to create a BigQuery session")
|
||||
}
|
||||
sessionID := status.Statistics.SessionInfo.SessionID
|
||||
getInsightsSQL := fmt.Sprintf("SELECT * FROM ML.GET_INSIGHTS(MODEL %s)", modelID)
|
||||
|
||||
getInsightsQuery := bqClient.Query(getInsightsSQL)
|
||||
getInsightsQuery.QueryConfig.ConnectionProperties = []*bigqueryapi.ConnectionProperty{
|
||||
{Key: "session_id", Value: sessionID},
|
||||
}
|
||||
|
||||
job, err := getInsightsQuery.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute get insights query: %w", err)
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read query results: %w", err)
|
||||
}
|
||||
|
||||
var out []any
|
||||
for {
|
||||
var row map[string]bigqueryapi.Value
|
||||
err := it.Next(&row)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to iterate through query results: %w", err)
|
||||
}
|
||||
vMap := make(map[string]any)
|
||||
for key, value := range row {
|
||||
vMap[key] = value
|
||||
}
|
||||
out = append(out, vMap)
|
||||
}
|
||||
|
||||
if len(out) > 0 {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// This handles the standard case for a SELECT query that successfully
|
||||
// executes but returns zero rows.
|
||||
return "The query returned 0 rows.", nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.Parameters, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.UseClientOAuth
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigqueryanalyzecontribution_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryanalyzecontribution"
|
||||
)
|
||||
|
||||
func TestParseFromYamlBigQueryAnalyzeContribution(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: bigquery-analyze-contribution
|
||||
source: my-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": bigqueryanalyzecontribution.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "bigquery-analyze-contribution",
|
||||
Source: "my-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -183,14 +183,18 @@ type Tool struct {
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
var tokenStr string
|
||||
var err error
|
||||
|
||||
// Get credentials for the API call
|
||||
if t.UseClientOAuth {
|
||||
// Use client-side access token
|
||||
if accessToken == "" {
|
||||
return nil, fmt.Errorf("tool is configured for client OAuth but no token was provided in the request header")
|
||||
return nil, fmt.Errorf("tool is configured for client OAuth but no token was provided in the request header: %w", tools.ErrUnauthorized)
|
||||
}
|
||||
tokenStr, err = accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
tokenStr = string(accessToken)
|
||||
} else {
|
||||
// Use ADC
|
||||
if t.TokenSource == nil {
|
||||
|
||||
@@ -150,7 +150,11 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
var err error
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, restService, err = t.ClientCreator(accessToken, true)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, restService, err = t.ClientCreator(tokenStr, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
|
||||
@@ -199,7 +199,11 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, _, err = t.ClientCreator(accessToken, false)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
|
||||
@@ -142,7 +142,11 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, _, err = t.ClientCreator(accessToken, false)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
|
||||
@@ -149,7 +149,11 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
var err error
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, _, err = t.ClientCreator(accessToken, false)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
|
||||
@@ -133,10 +133,13 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
}
|
||||
|
||||
bqClient := t.Client
|
||||
var err error
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, _, err = t.ClientCreator(accessToken, false)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ package bigquerylisttableids
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
bigqueryapi "cloud.google.com/go/bigquery"
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
@@ -49,6 +51,8 @@ type compatibleSource interface {
|
||||
BigQueryClientCreator() bigqueryds.BigqueryClientCreator
|
||||
BigQueryProject() string
|
||||
UseClientAuthorization() bool
|
||||
IsDatasetAllowed(projectID, datasetID string) bool
|
||||
BigQueryAllowedDatasets() []string
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
@@ -84,8 +88,44 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
projectParameter := tools.NewStringParameterWithDefault(projectKey, s.BigQueryProject(), "The Google Cloud project ID containing the dataset.")
|
||||
datasetParameter := tools.NewStringParameter(datasetKey, "The dataset to list table ids.")
|
||||
defaultProjectID := s.BigQueryProject()
|
||||
projectDescription := "The Google Cloud project ID containing the dataset."
|
||||
datasetDescription := "The dataset to list table ids."
|
||||
var datasetParameter tools.Parameter
|
||||
allowedDatasets := s.BigQueryAllowedDatasets()
|
||||
if len(allowedDatasets) > 0 {
|
||||
if len(allowedDatasets) == 1 {
|
||||
parts := strings.Split(allowedDatasets[0], ".")
|
||||
defaultProjectID = parts[0]
|
||||
datasetID := parts[1]
|
||||
projectDescription += fmt.Sprintf(" Must be `%s`.", defaultProjectID)
|
||||
datasetDescription += fmt.Sprintf(" Must be `%s`.", datasetID)
|
||||
datasetParameter = tools.NewStringParameterWithDefault(datasetKey, datasetID, datasetDescription)
|
||||
} else {
|
||||
datasetIDsByProject := make(map[string][]string)
|
||||
for _, ds := range allowedDatasets {
|
||||
parts := strings.Split(ds, ".")
|
||||
project := parts[0]
|
||||
dataset := parts[1]
|
||||
datasetIDsByProject[project] = append(datasetIDsByProject[project], fmt.Sprintf("`%s`", dataset))
|
||||
}
|
||||
|
||||
var datasetDescriptions, projectIDList []string
|
||||
for project, datasets := range datasetIDsByProject {
|
||||
sort.Strings(datasets)
|
||||
projectIDList = append(projectIDList, fmt.Sprintf("`%s`", project))
|
||||
datasetList := strings.Join(datasets, ", ")
|
||||
datasetDescriptions = append(datasetDescriptions, fmt.Sprintf("%s from project `%s`", datasetList, project))
|
||||
}
|
||||
projectDescription += fmt.Sprintf(" Must be one of the following: %s.", strings.Join(projectIDList, ", "))
|
||||
datasetDescription += fmt.Sprintf(" Must be one of the allowed datasets: %s.", strings.Join(datasetDescriptions, "; "))
|
||||
datasetParameter = tools.NewStringParameter(datasetKey, datasetDescription)
|
||||
}
|
||||
} else {
|
||||
datasetParameter = tools.NewStringParameter(datasetKey, datasetDescription)
|
||||
}
|
||||
projectParameter := tools.NewStringParameterWithDefault(projectKey, defaultProjectID, projectDescription)
|
||||
|
||||
parameters := tools.Parameters{projectParameter, datasetParameter}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
@@ -96,15 +136,16 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
|
||||
// finish tool setup
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Parameters: parameters,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
UseClientOAuth: s.UseClientAuthorization(),
|
||||
ClientCreator: s.BigQueryClientCreator(),
|
||||
Client: s.BigQueryClient(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: parameters.Manifest(), AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Parameters: parameters,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
UseClientOAuth: s.UseClientAuthorization(),
|
||||
ClientCreator: s.BigQueryClientCreator(),
|
||||
Client: s.BigQueryClient(),
|
||||
IsDatasetAllowed: s.IsDatasetAllowed,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: parameters.Manifest(), AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
@@ -119,11 +160,12 @@ type Tool struct {
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
|
||||
Client *bigqueryapi.Client
|
||||
ClientCreator bigqueryds.BigqueryClientCreator
|
||||
Statement string
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
Client *bigqueryapi.Client
|
||||
ClientCreator bigqueryds.BigqueryClientCreator
|
||||
IsDatasetAllowed func(projectID, datasetID string) bool
|
||||
Statement string
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
@@ -138,11 +180,18 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
return nil, fmt.Errorf("invalid or missing '%s' parameter; expected a string", datasetKey)
|
||||
}
|
||||
|
||||
if !t.IsDatasetAllowed(projectId, datasetId) {
|
||||
return nil, fmt.Errorf("access denied to dataset '%s' because it is not in the configured list of allowed datasets for project '%s'", datasetId, projectId)
|
||||
}
|
||||
|
||||
bqClient := t.Client
|
||||
var err error
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, _, err = t.ClientCreator(accessToken, false)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
@@ -158,7 +207,7 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to iterate through tables in dataset %s.%s: %w", bqClient.Project(), datasetId, err)
|
||||
return nil, fmt.Errorf("failed to iterate through tables in dataset %s.%s: %w", projectId, datasetId, err)
|
||||
}
|
||||
|
||||
// Remove leading and trailing quotes
|
||||
|
||||
@@ -222,7 +222,11 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
bqClient, restService, err = t.ClientCreator(accessToken, true)
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, restService, err = t.ClientCreator(tokenStr, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,157 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
type compatibleSource interface {
|
||||
ClickHousePool() *sql.DB
|
||||
}
|
||||
|
||||
var compatibleSources = []string{"clickhouse"}
|
||||
|
||||
const listDatabasesKind string = "clickhouse-list-databases"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(listDatabasesKind, newListDatabasesConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", listDatabasesKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newListDatabasesConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
}
|
||||
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return listDatabasesKind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", listDatabasesKind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters, paramManifest, paramMcpManifest, _ := tools.ProcessParameters(nil, cfg.Parameters)
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: paramMcpManifest,
|
||||
}
|
||||
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: listDatabasesKind,
|
||||
Parameters: cfg.Parameters,
|
||||
AllParams: allParameters,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
Pool: s.ClickHousePool(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
Pool *sql.DB
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, token tools.AccessToken) (any, error) {
|
||||
// Query to list all databases
|
||||
query := "SHOW DATABASES"
|
||||
|
||||
results, err := t.Pool.QueryContext(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
var databases []map[string]any
|
||||
for results.Next() {
|
||||
var dbName string
|
||||
err := results.Scan(&dbName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
databases = append(databases, map[string]any{
|
||||
"name": dbName,
|
||||
})
|
||||
}
|
||||
|
||||
if err := results.Err(); err != nil {
|
||||
return nil, fmt.Errorf("errors encountered by results.Scan: %w", err)
|
||||
}
|
||||
|
||||
return databases, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
func TestListDatabasesConfigToolConfigKind(t *testing.T) {
|
||||
cfg := Config{}
|
||||
if cfg.ToolConfigKind() != listDatabasesKind {
|
||||
t.Errorf("expected %q, got %q", listDatabasesKind, cfg.ToolConfigKind())
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDatabasesConfigInitializeMissingSource(t *testing.T) {
|
||||
cfg := Config{
|
||||
Name: "test-list-databases",
|
||||
Kind: listDatabasesKind,
|
||||
Source: "missing-source",
|
||||
Description: "Test list databases tool",
|
||||
}
|
||||
|
||||
srcs := map[string]sources.Source{}
|
||||
_, err := cfg.Initialize(srcs)
|
||||
if err == nil {
|
||||
t.Error("expected error for missing source")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFromYamlClickHouseListDatabases(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: clickhouse-list-databases
|
||||
source: my-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": Config{
|
||||
Name: "example_tool",
|
||||
Kind: "clickhouse-list-databases",
|
||||
Source: "my-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDatabasesToolParseParams(t *testing.T) {
|
||||
tool := Tool{
|
||||
Parameters: tools.Parameters{},
|
||||
}
|
||||
|
||||
params, err := tool.ParseParams(map[string]any{}, map[string]map[string]any{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(params) != 0 {
|
||||
t.Errorf("expected 0 parameters, got %d", len(params))
|
||||
}
|
||||
}
|
||||
177
internal/tools/cloudmonitoring/cloudmonitoring.go
Normal file
177
internal/tools/cloudmonitoring/cloudmonitoring.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudmonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
cloudmonitoringsrc "github.com/googleapis/genai-toolbox/internal/sources/cloudmonitoring"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "cloud-monitoring-query-prometheus"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(*cloudmonitoringsrc.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `cloudmonitoring`", kind)
|
||||
}
|
||||
|
||||
// Define the parameters internally instead of from the config file.
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("projectId", "The Id of the Google Cloud project.", true),
|
||||
tools.NewStringParameterWithRequired("query", "The promql query to execute.", true),
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Description: cfg.Description,
|
||||
AllParams: allParameters,
|
||||
BaseURL: s.BaseURL,
|
||||
UserAgent: s.UserAgent,
|
||||
Client: s.Client,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: allParameters.Manifest()},
|
||||
mcpManifest: tools.McpManifest{Name: cfg.Name, Description: cfg.Description, InputSchema: allParameters.McpManifest()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
Description string `yaml:"description"`
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
UserAgent string
|
||||
Client *http.Client
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
projectID, ok := paramsMap["projectId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("projectId parameter not found or not a string")
|
||||
}
|
||||
query, ok := paramsMap["query"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("query parameter not found or not a string")
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/v1/projects/%s/location/global/prometheus/api/v1/query", t.BaseURL, projectID)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("query", query)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.Header.Set("User-Agent", t.UserAgent)
|
||||
|
||||
resp, err := t.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("request failed: %s, body: %s", resp.Status, string(body))
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal json: %w, body: %s", err, string(body))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return false
|
||||
}
|
||||
155
internal/tools/cloudmonitoring/cloudmonitoring_test.go
Normal file
155
internal/tools/cloudmonitoring/cloudmonitoring_test.go
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudmonitoring_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/cloudmonitoring"
|
||||
)
|
||||
|
||||
func TestParseFromYamlCloudMonitoring(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: my-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": cloudmonitoring.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "cloud-monitoring-query-prometheus",
|
||||
Source: "my-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "advanced example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: my-instance
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": cloudmonitoring.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "cloud-monitoring-query-prometheus",
|
||||
Source: "my-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYamlCloudMonitoring(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "Invalid kind",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: invalid-kind
|
||||
source: my-instance
|
||||
description: some description
|
||||
`,
|
||||
err: `unknown tool kind: "invalid-kind"`,
|
||||
},
|
||||
{
|
||||
desc: "missing source",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
description: some description
|
||||
`,
|
||||
err: `Key: 'Config.Source' Error:Field validation for 'Source' failed on the 'required' tag`,
|
||||
},
|
||||
{
|
||||
desc: "missing description",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: my-instance
|
||||
`,
|
||||
err: `Key: 'Config.Description' Error:Field validation for 'Description' failed on the 'required' tag`,
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if !strings.Contains(errStr, tc.err) {
|
||||
t.Fatalf("unexpected error string: got %q, want substring %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -87,18 +87,30 @@ func convertParamToJSON(param any) (string, error) {
|
||||
|
||||
// PopulateTemplateWithJSON populate a Go template with a custom `json` array formatter
|
||||
func PopulateTemplateWithJSON(templateName, templateString string, data map[string]any) (string, error) {
|
||||
funcMap := template.FuncMap{
|
||||
return PopulateTemplateWithFunc(templateName, templateString, data, template.FuncMap{
|
||||
"json": convertParamToJSON,
|
||||
})
|
||||
}
|
||||
|
||||
// PopulateTemplate populate a Go template with no custom formatters
|
||||
func PopulateTemplate(templateName, templateString string, data map[string]any) (string, error) {
|
||||
return PopulateTemplateWithFunc(templateName, templateString, data, nil)
|
||||
}
|
||||
|
||||
// PopulateTemplateWithFunc populate a Go template with provided functions
|
||||
func PopulateTemplateWithFunc(templateName, templateString string, data map[string]any, funcMap template.FuncMap) (string, error) {
|
||||
tmpl := template.New(templateName)
|
||||
if funcMap != nil {
|
||||
tmpl = tmpl.Funcs(funcMap)
|
||||
}
|
||||
|
||||
tmpl, err := template.New(templateName).Funcs(funcMap).Parse(templateString)
|
||||
parsedTmpl, err := tmpl.Parse(templateString)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing template '%s': %w", templateName, err)
|
||||
}
|
||||
|
||||
var result bytes.Buffer
|
||||
err = tmpl.Execute(&result, data)
|
||||
if err != nil {
|
||||
if err := parsedTmpl.Execute(&result, data); err != nil {
|
||||
return "", fmt.Errorf("error executing template '%s': %w", templateName, err)
|
||||
}
|
||||
return result.String(), nil
|
||||
|
||||
297
internal/tools/common_test.go
Normal file
297
internal/tools/common_test.go
Normal file
@@ -0,0 +1,297 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tools_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
func TestPopulateTemplate(t *testing.T) {
|
||||
tcs := []struct {
|
||||
name string
|
||||
templateName string
|
||||
templateString string
|
||||
data map[string]any
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "simple string substitution",
|
||||
templateName: "test",
|
||||
templateString: "Hello {{.name}}!",
|
||||
data: map[string]any{"name": "World"},
|
||||
want: "Hello World!",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple substitutions",
|
||||
templateName: "test",
|
||||
templateString: "{{.greeting}} {{.name}}, you are {{.age}} years old",
|
||||
data: map[string]any{"greeting": "Hello", "name": "Alice", "age": 30},
|
||||
want: "Hello Alice, you are 30 years old",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty template",
|
||||
templateName: "test",
|
||||
templateString: "",
|
||||
data: map[string]any{},
|
||||
want: "",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no substitutions",
|
||||
templateName: "test",
|
||||
templateString: "Plain text without templates",
|
||||
data: map[string]any{},
|
||||
want: "Plain text without templates",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid template syntax",
|
||||
templateName: "test",
|
||||
templateString: "{{.name",
|
||||
data: map[string]any{"name": "World"},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing field",
|
||||
templateName: "test",
|
||||
templateString: "{{.missing}}",
|
||||
data: map[string]any{"name": "World"},
|
||||
want: "<no value>",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid function call",
|
||||
templateName: "test",
|
||||
templateString: "{{.name.invalid}}",
|
||||
data: map[string]any{"name": "World"},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := tools.PopulateTemplate(tc.templateName, tc.templateString, tc.data)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Fatalf("incorrect result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateTemplateWithFunc(t *testing.T) {
|
||||
// Custom function for testing
|
||||
customFuncs := template.FuncMap{
|
||||
"upper": strings.ToUpper,
|
||||
"add": func(a, b int) int {
|
||||
return a + b
|
||||
},
|
||||
}
|
||||
|
||||
tcs := []struct {
|
||||
name string
|
||||
templateName string
|
||||
templateString string
|
||||
data map[string]any
|
||||
funcMap template.FuncMap
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "with custom upper function",
|
||||
templateName: "test",
|
||||
templateString: "{{upper .text}}",
|
||||
data: map[string]any{"text": "hello"},
|
||||
funcMap: customFuncs,
|
||||
want: "HELLO",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "with custom add function",
|
||||
templateName: "test",
|
||||
templateString: "Result: {{add .x .y}}",
|
||||
data: map[string]any{"x": 5, "y": 3},
|
||||
funcMap: customFuncs,
|
||||
want: "Result: 8",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil funcMap",
|
||||
templateName: "test",
|
||||
templateString: "Hello {{.name}}",
|
||||
data: map[string]any{"name": "World"},
|
||||
funcMap: nil,
|
||||
want: "Hello World",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "combine custom function with regular substitution",
|
||||
templateName: "test",
|
||||
templateString: "{{upper .greeting}} {{.name}}!",
|
||||
data: map[string]any{"greeting": "hello", "name": "Alice"},
|
||||
funcMap: customFuncs,
|
||||
want: "HELLO Alice!",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "undefined function",
|
||||
templateName: "test",
|
||||
templateString: "{{undefined .text}}",
|
||||
data: map[string]any{"text": "hello"},
|
||||
funcMap: nil,
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "wrong number of arguments",
|
||||
templateName: "test",
|
||||
templateString: "{{upper}}",
|
||||
data: map[string]any{},
|
||||
funcMap: template.FuncMap{"upper": strings.ToUpper},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := tools.PopulateTemplateWithFunc(tc.templateName, tc.templateString, tc.data, tc.funcMap)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Fatalf("incorrect result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateTemplateWithJSON(t *testing.T) {
|
||||
tcs := []struct {
|
||||
name string
|
||||
templateName string
|
||||
templateString string
|
||||
data map[string]any
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "json string",
|
||||
templateName: "test",
|
||||
templateString: "Data: {{json .value}}",
|
||||
data: map[string]any{"value": "hello"},
|
||||
want: `Data: "hello"`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "json number",
|
||||
templateName: "test",
|
||||
templateString: "Number: {{json .num}}",
|
||||
data: map[string]any{"num": 42},
|
||||
want: "Number: 42",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "json boolean",
|
||||
templateName: "test",
|
||||
templateString: "Bool: {{json .flag}}",
|
||||
data: map[string]any{"flag": true},
|
||||
want: "Bool: true",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "json array",
|
||||
templateName: "test",
|
||||
templateString: "Array: {{json .items}}",
|
||||
data: map[string]any{"items": []any{"a", "b", "c"}},
|
||||
want: `Array: ["a","b","c"]`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "json object",
|
||||
templateName: "test",
|
||||
templateString: "Object: {{json .obj}}",
|
||||
data: map[string]any{"obj": map[string]any{"name": "Alice", "age": 30}},
|
||||
want: `Object: {"age":30,"name":"Alice"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "json null",
|
||||
templateName: "test",
|
||||
templateString: "Null: {{json .nullValue}}",
|
||||
data: map[string]any{"nullValue": nil},
|
||||
want: "Null: null",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "combine json with regular substitution",
|
||||
templateName: "test",
|
||||
templateString: "User {{.name}} has data: {{json .data}}",
|
||||
data: map[string]any{"name": "Bob", "data": map[string]any{"id": 123}},
|
||||
want: `User Bob has data: {"id":123}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "missing field for json",
|
||||
templateName: "test",
|
||||
templateString: "{{json .missing}}",
|
||||
data: map[string]any{},
|
||||
want: "null",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := tools.PopulateTemplateWithJSON(tc.templateName, tc.templateString, tc.data)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Fatalf("incorrect result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
if err := util.ValidateDocumentPath(parentPath); err != nil {
|
||||
return nil, fmt.Errorf("invalid parent document path: %w", err)
|
||||
}
|
||||
|
||||
|
||||
// List subcollections of the specified document
|
||||
docRef := t.Client.Doc(parentPath)
|
||||
collectionRefs, err = docRef.Collections(ctx).GetAll()
|
||||
|
||||
548
internal/tools/firestore/firestorequery/firestorequery.go
Normal file
548
internal/tools/firestore/firestorequery/firestorequery.go
Normal file
@@ -0,0 +1,548 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestorequery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
firestoreapi "cloud.google.com/go/firestore"
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
firestoreds "github.com/googleapis/genai-toolbox/internal/sources/firestore"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/firestore/util"
|
||||
)
|
||||
|
||||
// Constants for tool configuration
|
||||
const (
|
||||
kind = "firestore-query"
|
||||
defaultLimit = 100
|
||||
)
|
||||
|
||||
// Firestore operators
|
||||
var validOperators = map[string]bool{
|
||||
"<": true,
|
||||
"<=": true,
|
||||
">": true,
|
||||
">=": true,
|
||||
"==": true,
|
||||
"!=": true,
|
||||
"array-contains": true,
|
||||
"array-contains-any": true,
|
||||
"in": true,
|
||||
"not-in": true,
|
||||
}
|
||||
|
||||
// Error messages
|
||||
const (
|
||||
errFilterParseFailed = "failed to parse filters: %w"
|
||||
errQueryExecutionFailed = "failed to execute query: %w"
|
||||
errTemplateParseFailed = "failed to parse template: %w"
|
||||
errTemplateExecFailed = "failed to execute template: %w"
|
||||
errLimitParseFailed = "failed to parse limit value '%s': %w"
|
||||
errSelectFieldParseFailed = "failed to parse select field: %w"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// compatibleSource defines the interface for sources that can provide a Firestore client
|
||||
type compatibleSource interface {
|
||||
FirestoreClient() *firestoreapi.Client
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &firestoreds.Source{}
|
||||
|
||||
var compatibleSources = [...]string{firestoreds.SourceKind}
|
||||
|
||||
// Config represents the configuration for the Firestore query tool
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
|
||||
// Template fields
|
||||
CollectionPath string `yaml:"collectionPath" validate:"required"`
|
||||
Filters string `yaml:"filters"` // JSON string template
|
||||
Select []string `yaml:"select"` // Fields to select
|
||||
OrderBy map[string]any `yaml:"orderBy"` // Order by configuration
|
||||
Limit string `yaml:"limit"` // Limit template (can be a number or template)
|
||||
AnalyzeQuery bool `yaml:"analyzeQuery"` // Analyze query (boolean, not parameterizable)
|
||||
|
||||
// Parameters for template substitution
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of tool configuration
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize creates a new Tool instance from the configuration
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
// Set default limit if not specified
|
||||
if cfg.Limit == "" {
|
||||
cfg.Limit = fmt.Sprintf("%d", defaultLimit)
|
||||
}
|
||||
|
||||
// Create MCP manifest
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: cfg.Parameters.McpManifest(),
|
||||
}
|
||||
|
||||
// finish tool setup
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
Client: s.FirestoreClient(),
|
||||
CollectionPathTemplate: cfg.CollectionPath,
|
||||
FiltersTemplate: cfg.Filters,
|
||||
SelectTemplate: cfg.Select,
|
||||
OrderByTemplate: cfg.OrderBy,
|
||||
LimitTemplate: cfg.Limit,
|
||||
AnalyzeQuery: cfg.AnalyzeQuery,
|
||||
Parameters: cfg.Parameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: cfg.Parameters.Manifest(), AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
// Tool represents the Firestore query tool
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
|
||||
Client *firestoreapi.Client
|
||||
CollectionPathTemplate string
|
||||
FiltersTemplate string
|
||||
SelectTemplate []string
|
||||
OrderByTemplate map[string]any
|
||||
LimitTemplate string
|
||||
AnalyzeQuery bool
|
||||
Parameters tools.Parameters
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// SimplifiedFilter represents the simplified filter format
|
||||
type SimplifiedFilter struct {
|
||||
And []SimplifiedFilter `json:"and,omitempty"`
|
||||
Or []SimplifiedFilter `json:"or,omitempty"`
|
||||
Field string `json:"field,omitempty"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Value interface{} `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// OrderByConfig represents ordering configuration
|
||||
type OrderByConfig struct {
|
||||
Field string `json:"field"`
|
||||
Direction string `json:"direction"`
|
||||
}
|
||||
|
||||
// GetDirection returns the Firestore direction constant
|
||||
func (o *OrderByConfig) GetDirection() firestoreapi.Direction {
|
||||
if strings.EqualFold(o.Direction, "DESCENDING") || strings.EqualFold(o.Direction, "DESC") {
|
||||
return firestoreapi.Desc
|
||||
}
|
||||
return firestoreapi.Asc
|
||||
}
|
||||
|
||||
// QueryResult represents a document result from the query
|
||||
type QueryResult struct {
|
||||
ID string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Data map[string]any `json:"data"`
|
||||
CreateTime interface{} `json:"createTime,omitempty"`
|
||||
UpdateTime interface{} `json:"updateTime,omitempty"`
|
||||
ReadTime interface{} `json:"readTime,omitempty"`
|
||||
}
|
||||
|
||||
// QueryResponse represents the full response including optional metrics
|
||||
type QueryResponse struct {
|
||||
Documents []QueryResult `json:"documents"`
|
||||
ExplainMetrics map[string]any `json:"explainMetrics,omitempty"`
|
||||
}
|
||||
|
||||
// Invoke executes the Firestore query based on the provided parameters
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
// Process collection path with template substitution
|
||||
collectionPath, err := tools.PopulateTemplate("collectionPath", t.CollectionPathTemplate, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process collection path: %w", err)
|
||||
}
|
||||
|
||||
// Build the query
|
||||
query, err := t.buildQuery(collectionPath, paramsMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Execute the query and return results
|
||||
return t.executeQuery(ctx, query)
|
||||
}
|
||||
|
||||
// buildQuery constructs the Firestore query from parameters
|
||||
func (t Tool) buildQuery(collectionPath string, params map[string]any) (*firestoreapi.Query, error) {
|
||||
collection := t.Client.Collection(collectionPath)
|
||||
query := collection.Query
|
||||
|
||||
// Process and apply filters if template is provided
|
||||
if t.FiltersTemplate != "" {
|
||||
// Apply template substitution to filters
|
||||
filtersJSON, err := tools.PopulateTemplateWithJSON("filters", t.FiltersTemplate, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process filters template: %w", err)
|
||||
}
|
||||
|
||||
// Parse the simplified filter format
|
||||
var simplifiedFilter SimplifiedFilter
|
||||
if err := json.Unmarshal([]byte(filtersJSON), &simplifiedFilter); err != nil {
|
||||
return nil, fmt.Errorf(errFilterParseFailed, err)
|
||||
}
|
||||
|
||||
// Convert simplified filter to Firestore filter
|
||||
if filter := t.convertToFirestoreFilter(simplifiedFilter); filter != nil {
|
||||
query = query.WhereEntity(filter)
|
||||
}
|
||||
}
|
||||
|
||||
// Process select fields
|
||||
selectFields, err := t.processSelectFields(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(selectFields) > 0 {
|
||||
query = query.Select(selectFields...)
|
||||
}
|
||||
|
||||
// Process and apply ordering
|
||||
orderBy, err := t.getOrderBy(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if orderBy != nil {
|
||||
query = query.OrderBy(orderBy.Field, orderBy.GetDirection())
|
||||
}
|
||||
|
||||
// Process and apply limit
|
||||
limit, err := t.getLimit(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query = query.Limit(limit)
|
||||
|
||||
// Apply analyze options if enabled
|
||||
if t.AnalyzeQuery {
|
||||
query = query.WithRunOptions(firestoreapi.ExplainOptions{
|
||||
Analyze: true,
|
||||
})
|
||||
}
|
||||
|
||||
return &query, nil
|
||||
}
|
||||
|
||||
// convertToFirestoreFilter converts simplified filter format to Firestore EntityFilter
|
||||
func (t Tool) convertToFirestoreFilter(filter SimplifiedFilter) firestoreapi.EntityFilter {
|
||||
// Handle AND filters
|
||||
if len(filter.And) > 0 {
|
||||
filters := make([]firestoreapi.EntityFilter, 0, len(filter.And))
|
||||
for _, f := range filter.And {
|
||||
if converted := t.convertToFirestoreFilter(f); converted != nil {
|
||||
filters = append(filters, converted)
|
||||
}
|
||||
}
|
||||
if len(filters) > 0 {
|
||||
return firestoreapi.AndFilter{Filters: filters}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle OR filters
|
||||
if len(filter.Or) > 0 {
|
||||
filters := make([]firestoreapi.EntityFilter, 0, len(filter.Or))
|
||||
for _, f := range filter.Or {
|
||||
if converted := t.convertToFirestoreFilter(f); converted != nil {
|
||||
filters = append(filters, converted)
|
||||
}
|
||||
}
|
||||
if len(filters) > 0 {
|
||||
return firestoreapi.OrFilter{Filters: filters}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle simple property filter
|
||||
if filter.Field != "" && filter.Op != "" && filter.Value != nil {
|
||||
if validOperators[filter.Op] {
|
||||
// Convert the value using the Firestore native JSON converter
|
||||
convertedValue, err := util.JSONToFirestoreValue(filter.Value, t.Client)
|
||||
if err != nil {
|
||||
// If conversion fails, use the original value
|
||||
convertedValue = filter.Value
|
||||
}
|
||||
|
||||
return firestoreapi.PropertyFilter{
|
||||
Path: filter.Field,
|
||||
Operator: filter.Op,
|
||||
Value: convertedValue,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processSelectFields processes the select fields with parameter substitution
|
||||
func (t Tool) processSelectFields(params map[string]any) ([]string, error) {
|
||||
var selectFields []string
|
||||
|
||||
// Process configured select fields with template substitution
|
||||
for _, field := range t.SelectTemplate {
|
||||
// Check if it's a template
|
||||
if strings.Contains(field, "{{") {
|
||||
processed, err := tools.PopulateTemplate("selectField", field, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if processed != "" {
|
||||
// The processed field might be an array format [a b c] or a single value
|
||||
trimmedProcessed := strings.TrimSpace(processed)
|
||||
|
||||
// Check if it's in array format [a b c]
|
||||
if strings.HasPrefix(trimmedProcessed, "[") && strings.HasSuffix(trimmedProcessed, "]") {
|
||||
// Remove brackets and split by spaces
|
||||
arrayContent := strings.TrimPrefix(trimmedProcessed, "[")
|
||||
arrayContent = strings.TrimSuffix(arrayContent, "]")
|
||||
fields := strings.Fields(arrayContent) // Fields splits by any whitespace
|
||||
for _, f := range fields {
|
||||
if f != "" {
|
||||
selectFields = append(selectFields, f)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
selectFields = append(selectFields, processed)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
selectFields = append(selectFields, field)
|
||||
}
|
||||
}
|
||||
|
||||
return selectFields, nil
|
||||
}
|
||||
|
||||
// getOrderBy processes the orderBy configuration with parameter substitution
|
||||
func (t Tool) getOrderBy(params map[string]any) (*OrderByConfig, error) {
|
||||
if t.OrderByTemplate == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
orderBy := &OrderByConfig{}
|
||||
|
||||
// Process field
|
||||
field, err := t.getOrderByForKey("field", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
orderBy.Field = field
|
||||
|
||||
// Process direction
|
||||
direction, err := t.getOrderByForKey("direction", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
orderBy.Direction = direction
|
||||
|
||||
if orderBy.Field == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return orderBy, nil
|
||||
}
|
||||
|
||||
func (t Tool) getOrderByForKey(key string, params map[string]any) (string, error) {
|
||||
value, ok := t.OrderByTemplate[key].(string)
|
||||
if !ok {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
processedValue, err := tools.PopulateTemplate(fmt.Sprintf("orderBy%s", key), value, params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return processedValue, nil
|
||||
}
|
||||
|
||||
// processLimit processes the limit field with parameter substitution
|
||||
func (t Tool) getLimit(params map[string]any) (int, error) {
|
||||
limit := defaultLimit
|
||||
if t.LimitTemplate != "" {
|
||||
processedValue, err := tools.PopulateTemplate("limit", t.LimitTemplate, params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Try to parse as integer
|
||||
if processedValue != "" {
|
||||
parsedLimit, err := strconv.Atoi(processedValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errLimitParseFailed, processedValue, err)
|
||||
}
|
||||
limit = parsedLimit
|
||||
}
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
|
||||
// executeQuery runs the query and formats the results
|
||||
func (t Tool) executeQuery(ctx context.Context, query *firestoreapi.Query) (any, error) {
|
||||
docIterator := query.Documents(ctx)
|
||||
docs, err := docIterator.GetAll()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(errQueryExecutionFailed, err)
|
||||
}
|
||||
|
||||
// Convert results to structured format
|
||||
results := make([]QueryResult, len(docs))
|
||||
for i, doc := range docs {
|
||||
results[i] = QueryResult{
|
||||
ID: doc.Ref.ID,
|
||||
Path: doc.Ref.Path,
|
||||
Data: doc.Data(),
|
||||
CreateTime: doc.CreateTime,
|
||||
UpdateTime: doc.UpdateTime,
|
||||
ReadTime: doc.ReadTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Return with explain metrics if requested
|
||||
if t.AnalyzeQuery {
|
||||
explainMetrics, err := t.getExplainMetrics(docIterator)
|
||||
if err == nil && explainMetrics != nil {
|
||||
response := QueryResponse{
|
||||
Documents: results,
|
||||
ExplainMetrics: explainMetrics,
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// getExplainMetrics extracts explain metrics from the query iterator
|
||||
func (t Tool) getExplainMetrics(docIterator *firestoreapi.DocumentIterator) (map[string]any, error) {
|
||||
explainMetrics, err := docIterator.ExplainMetrics()
|
||||
if err != nil || explainMetrics == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metricsData := make(map[string]any)
|
||||
|
||||
// Add plan summary if available
|
||||
if explainMetrics.PlanSummary != nil {
|
||||
planSummary := make(map[string]any)
|
||||
planSummary["indexesUsed"] = explainMetrics.PlanSummary.IndexesUsed
|
||||
metricsData["planSummary"] = planSummary
|
||||
}
|
||||
|
||||
// Add execution stats if available
|
||||
if explainMetrics.ExecutionStats != nil {
|
||||
executionStats := make(map[string]any)
|
||||
executionStats["resultsReturned"] = explainMetrics.ExecutionStats.ResultsReturned
|
||||
executionStats["readOperations"] = explainMetrics.ExecutionStats.ReadOperations
|
||||
|
||||
if explainMetrics.ExecutionStats.ExecutionDuration != nil {
|
||||
executionStats["executionDuration"] = explainMetrics.ExecutionStats.ExecutionDuration.String()
|
||||
}
|
||||
|
||||
if explainMetrics.ExecutionStats.DebugStats != nil {
|
||||
executionStats["debugStats"] = *explainMetrics.ExecutionStats.DebugStats
|
||||
}
|
||||
|
||||
metricsData["executionStats"] = executionStats
|
||||
}
|
||||
|
||||
return metricsData, nil
|
||||
}
|
||||
|
||||
// ParseParams parses and validates input parameters
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.Parameters, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool manifest
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the MCP manifest
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized based on verified auth services
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return false
|
||||
}
|
||||
492
internal/tools/firestore/firestorequery/firestorequery_test.go
Normal file
492
internal/tools/firestore/firestorequery/firestorequery_test.go
Normal file
@@ -0,0 +1,492 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestorequery_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/firestore/firestorequery"
|
||||
)
|
||||
|
||||
func TestParseFromYamlFirestoreQuery(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example with parameterized collection path",
|
||||
in: `
|
||||
tools:
|
||||
query_users_tool:
|
||||
kind: firestore-query
|
||||
source: my-firestore-instance
|
||||
description: Query users collection with parameterized path
|
||||
collectionPath: "users/{{.userId}}/documents"
|
||||
parameters:
|
||||
- name: userId
|
||||
type: string
|
||||
description: The user ID to query documents for
|
||||
required: true
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"query_users_tool": firestorequery.Config{
|
||||
Name: "query_users_tool",
|
||||
Kind: "firestore-query",
|
||||
Source: "my-firestore-instance",
|
||||
Description: "Query users collection with parameterized path",
|
||||
CollectionPath: "users/{{.userId}}/documents",
|
||||
AuthRequired: []string{},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("userId", "The user ID to query documents for", true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with parameterized filters",
|
||||
in: `
|
||||
tools:
|
||||
query_products_tool:
|
||||
kind: firestore-query
|
||||
source: prod-firestore
|
||||
description: Query products with dynamic filters
|
||||
collectionPath: "products"
|
||||
filters: |
|
||||
{
|
||||
"and": [
|
||||
{"field": "category", "op": "==", "value": {"stringValue": "{{.category}}"}},
|
||||
{"field": "price", "op": "<=", "value": {"doubleValue": {{.maxPrice}}}}
|
||||
]
|
||||
}
|
||||
parameters:
|
||||
- name: category
|
||||
type: string
|
||||
description: Product category to filter by
|
||||
required: true
|
||||
- name: maxPrice
|
||||
type: float
|
||||
description: Maximum price for products
|
||||
required: true
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"query_products_tool": firestorequery.Config{
|
||||
Name: "query_products_tool",
|
||||
Kind: "firestore-query",
|
||||
Source: "prod-firestore",
|
||||
Description: "Query products with dynamic filters",
|
||||
CollectionPath: "products",
|
||||
Filters: `{
|
||||
"and": [
|
||||
{"field": "category", "op": "==", "value": {"stringValue": "{{.category}}"}},
|
||||
{"field": "price", "op": "<=", "value": {"doubleValue": {{.maxPrice}}}}
|
||||
]
|
||||
}
|
||||
`,
|
||||
AuthRequired: []string{},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("category", "Product category to filter by", true),
|
||||
tools.NewFloatParameterWithRequired("maxPrice", "Maximum price for products", true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with select fields and orderBy",
|
||||
in: `
|
||||
tools:
|
||||
query_orders_tool:
|
||||
kind: firestore-query
|
||||
source: orders-firestore
|
||||
description: Query orders with field selection
|
||||
collectionPath: "orders"
|
||||
select:
|
||||
- orderId
|
||||
- customerName
|
||||
- totalAmount
|
||||
orderBy:
|
||||
field: "{{.sortField}}"
|
||||
direction: "DESCENDING"
|
||||
limit: 50
|
||||
parameters:
|
||||
- name: sortField
|
||||
type: string
|
||||
description: Field to sort by
|
||||
required: true
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"query_orders_tool": firestorequery.Config{
|
||||
Name: "query_orders_tool",
|
||||
Kind: "firestore-query",
|
||||
Source: "orders-firestore",
|
||||
Description: "Query orders with field selection",
|
||||
CollectionPath: "orders",
|
||||
Select: []string{"orderId", "customerName", "totalAmount"},
|
||||
OrderBy: map[string]any{
|
||||
"field": "{{.sortField}}",
|
||||
"direction": "DESCENDING",
|
||||
},
|
||||
Limit: "50",
|
||||
AuthRequired: []string{},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("sortField", "Field to sort by", true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth requirements and complex filters",
|
||||
in: `
|
||||
tools:
|
||||
secure_query_tool:
|
||||
kind: firestore-query
|
||||
source: secure-firestore
|
||||
description: Query with authentication and complex filters
|
||||
collectionPath: "{{.collection}}"
|
||||
filters: |
|
||||
{
|
||||
"or": [
|
||||
{
|
||||
"and": [
|
||||
{"field": "status", "op": "==", "value": {"stringValue": "{{.status}}"}},
|
||||
{"field": "priority", "op": ">=", "value": {"integerValue": "{{.minPriority}}"}}
|
||||
]
|
||||
},
|
||||
{"field": "urgent", "op": "==", "value": {"booleanValue": true}}
|
||||
]
|
||||
}
|
||||
analyzeQuery: true
|
||||
authRequired:
|
||||
- google-auth-service
|
||||
- api-key-service
|
||||
parameters:
|
||||
- name: collection
|
||||
type: string
|
||||
description: Collection name to query
|
||||
required: true
|
||||
- name: status
|
||||
type: string
|
||||
description: Status to filter by
|
||||
required: true
|
||||
- name: minPriority
|
||||
type: integer
|
||||
description: Minimum priority level
|
||||
default: 1
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"secure_query_tool": firestorequery.Config{
|
||||
Name: "secure_query_tool",
|
||||
Kind: "firestore-query",
|
||||
Source: "secure-firestore",
|
||||
Description: "Query with authentication and complex filters",
|
||||
CollectionPath: "{{.collection}}",
|
||||
Filters: `{
|
||||
"or": [
|
||||
{
|
||||
"and": [
|
||||
{"field": "status", "op": "==", "value": {"stringValue": "{{.status}}"}},
|
||||
{"field": "priority", "op": ">=", "value": {"integerValue": "{{.minPriority}}"}}
|
||||
]
|
||||
},
|
||||
{"field": "urgent", "op": "==", "value": {"booleanValue": true}}
|
||||
]
|
||||
}
|
||||
`,
|
||||
AnalyzeQuery: true,
|
||||
AuthRequired: []string{"google-auth-service", "api-key-service"},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("collection", "Collection name to query", true),
|
||||
tools.NewStringParameterWithRequired("status", "Status to filter by", true),
|
||||
tools.NewIntParameterWithDefault("minPriority", 1, "Minimum priority level"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with Firestore native JSON value types and template parameters",
|
||||
in: `
|
||||
tools:
|
||||
query_with_typed_values:
|
||||
kind: firestore-query
|
||||
source: typed-firestore
|
||||
description: Query with Firestore native JSON value types
|
||||
collectionPath: "countries"
|
||||
filters: |
|
||||
{
|
||||
"or": [
|
||||
{"field": "continent", "op": "==", "value": {"stringValue": "{{.continent}}"}},
|
||||
{
|
||||
"and": [
|
||||
{"field": "area", "op": ">", "value": {"integerValue": "2000000"}},
|
||||
{"field": "area", "op": "<", "value": {"integerValue": "3000000"}},
|
||||
{"field": "population", "op": ">=", "value": {"integerValue": "{{.minPopulation}}"}},
|
||||
{"field": "gdp", "op": ">", "value": {"doubleValue": {{.minGdp}}}},
|
||||
{"field": "isActive", "op": "==", "value": {"booleanValue": {{.isActive}}}},
|
||||
{"field": "lastUpdated", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
parameters:
|
||||
- name: continent
|
||||
type: string
|
||||
description: Continent to filter by
|
||||
required: true
|
||||
- name: minPopulation
|
||||
type: string
|
||||
description: Minimum population as string
|
||||
required: true
|
||||
- name: minGdp
|
||||
type: float
|
||||
description: Minimum GDP value
|
||||
required: true
|
||||
- name: isActive
|
||||
type: boolean
|
||||
description: Filter by active status
|
||||
required: true
|
||||
- name: startDate
|
||||
type: string
|
||||
description: Start date in RFC3339 format
|
||||
required: true
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"query_with_typed_values": firestorequery.Config{
|
||||
Name: "query_with_typed_values",
|
||||
Kind: "firestore-query",
|
||||
Source: "typed-firestore",
|
||||
Description: "Query with Firestore native JSON value types",
|
||||
CollectionPath: "countries",
|
||||
Filters: `{
|
||||
"or": [
|
||||
{"field": "continent", "op": "==", "value": {"stringValue": "{{.continent}}"}},
|
||||
{
|
||||
"and": [
|
||||
{"field": "area", "op": ">", "value": {"integerValue": "2000000"}},
|
||||
{"field": "area", "op": "<", "value": {"integerValue": "3000000"}},
|
||||
{"field": "population", "op": ">=", "value": {"integerValue": "{{.minPopulation}}"}},
|
||||
{"field": "gdp", "op": ">", "value": {"doubleValue": {{.minGdp}}}},
|
||||
{"field": "isActive", "op": "==", "value": {"booleanValue": {{.isActive}}}},
|
||||
{"field": "lastUpdated", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
AuthRequired: []string{},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("continent", "Continent to filter by", true),
|
||||
tools.NewStringParameterWithRequired("minPopulation", "Minimum population as string", true),
|
||||
tools.NewFloatParameterWithRequired("minGdp", "Minimum GDP value", true),
|
||||
tools.NewBooleanParameterWithRequired("isActive", "Filter by active status", true),
|
||||
tools.NewStringParameterWithRequired("startDate", "Start date in RFC3339 format", true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFromYamlMultipleQueryTools(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
in := `
|
||||
tools:
|
||||
query_user_posts:
|
||||
kind: firestore-query
|
||||
source: social-firestore
|
||||
description: Query user posts with filtering
|
||||
collectionPath: "users/{{.userId}}/posts"
|
||||
filters: |
|
||||
{
|
||||
"and": [
|
||||
{"field": "visibility", "op": "==", "value": {"stringValue": "{{.visibility}}"}},
|
||||
{"field": "createdAt", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
|
||||
]
|
||||
}
|
||||
select:
|
||||
- title
|
||||
- content
|
||||
- likes
|
||||
orderBy:
|
||||
field: createdAt
|
||||
direction: "{{.sortOrder}}"
|
||||
limit: 20
|
||||
parameters:
|
||||
- name: userId
|
||||
type: string
|
||||
description: User ID whose posts to query
|
||||
required: true
|
||||
- name: visibility
|
||||
type: string
|
||||
description: Post visibility (public, private, friends)
|
||||
required: true
|
||||
- name: startDate
|
||||
type: string
|
||||
description: Start date for posts
|
||||
required: true
|
||||
- name: sortOrder
|
||||
type: string
|
||||
description: Sort order (ASCENDING or DESCENDING)
|
||||
default: "DESCENDING"
|
||||
query_inventory:
|
||||
kind: firestore-query
|
||||
source: inventory-firestore
|
||||
description: Query inventory items
|
||||
collectionPath: "warehouses/{{.warehouseId}}/inventory"
|
||||
filters: |
|
||||
{
|
||||
"field": "quantity", "op": "<", "value": {"integerValue": "{{.threshold}}"}}
|
||||
parameters:
|
||||
- name: warehouseId
|
||||
type: string
|
||||
description: Warehouse ID to check inventory
|
||||
required: true
|
||||
- name: threshold
|
||||
type: integer
|
||||
description: Quantity threshold for low stock
|
||||
required: true
|
||||
query_transactions:
|
||||
kind: firestore-query
|
||||
source: finance-firestore
|
||||
description: Query financial transactions
|
||||
collectionPath: "accounts/{{.accountId}}/transactions"
|
||||
filters: |
|
||||
{
|
||||
"or": [
|
||||
{"field": "type", "op": "==", "value": {"stringValue": "{{.transactionType}}"}},
|
||||
{"field": "amount", "op": ">", "value": {"doubleValue": {{.minAmount}}}}
|
||||
]
|
||||
}
|
||||
analyzeQuery: true
|
||||
authRequired:
|
||||
- finance-auth
|
||||
parameters:
|
||||
- name: accountId
|
||||
type: string
|
||||
description: Account ID for transactions
|
||||
required: true
|
||||
- name: transactionType
|
||||
type: string
|
||||
description: Type of transaction
|
||||
default: "all"
|
||||
- name: minAmount
|
||||
type: float
|
||||
description: Minimum transaction amount
|
||||
default: 0
|
||||
`
|
||||
want := server.ToolConfigs{
|
||||
"query_user_posts": firestorequery.Config{
|
||||
Name: "query_user_posts",
|
||||
Kind: "firestore-query",
|
||||
Source: "social-firestore",
|
||||
Description: "Query user posts with filtering",
|
||||
CollectionPath: "users/{{.userId}}/posts",
|
||||
Filters: `{
|
||||
"and": [
|
||||
{"field": "visibility", "op": "==", "value": {"stringValue": "{{.visibility}}"}},
|
||||
{"field": "createdAt", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
|
||||
]
|
||||
}
|
||||
`,
|
||||
Select: []string{"title", "content", "likes"},
|
||||
OrderBy: map[string]any{
|
||||
"field": "createdAt",
|
||||
"direction": "{{.sortOrder}}",
|
||||
},
|
||||
Limit: "20",
|
||||
AuthRequired: []string{},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("userId", "User ID whose posts to query", true),
|
||||
tools.NewStringParameterWithRequired("visibility", "Post visibility (public, private, friends)", true),
|
||||
tools.NewStringParameterWithRequired("startDate", "Start date for posts", true),
|
||||
tools.NewStringParameterWithDefault("sortOrder", "DESCENDING", "Sort order (ASCENDING or DESCENDING)"),
|
||||
},
|
||||
},
|
||||
"query_inventory": firestorequery.Config{
|
||||
Name: "query_inventory",
|
||||
Kind: "firestore-query",
|
||||
Source: "inventory-firestore",
|
||||
Description: "Query inventory items",
|
||||
CollectionPath: "warehouses/{{.warehouseId}}/inventory",
|
||||
Filters: `{
|
||||
"field": "quantity", "op": "<", "value": {"integerValue": "{{.threshold}}"}}
|
||||
`,
|
||||
AuthRequired: []string{},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("warehouseId", "Warehouse ID to check inventory", true),
|
||||
tools.NewIntParameterWithRequired("threshold", "Quantity threshold for low stock", true),
|
||||
},
|
||||
},
|
||||
"query_transactions": firestorequery.Config{
|
||||
Name: "query_transactions",
|
||||
Kind: "firestore-query",
|
||||
Source: "finance-firestore",
|
||||
Description: "Query financial transactions",
|
||||
CollectionPath: "accounts/{{.accountId}}/transactions",
|
||||
Filters: `{
|
||||
"or": [
|
||||
{"field": "type", "op": "==", "value": {"stringValue": "{{.transactionType}}"}},
|
||||
{"field": "amount", "op": ">", "value": {"doubleValue": {{.minAmount}}}}
|
||||
]
|
||||
}
|
||||
`,
|
||||
AnalyzeQuery: true,
|
||||
AuthRequired: []string{"finance-auth"},
|
||||
Parameters: tools.Parameters{
|
||||
tools.NewStringParameterWithRequired("accountId", "Account ID for transactions", true),
|
||||
tools.NewStringParameterWithDefault("transactionType", "all", "Type of transaction"),
|
||||
tools.NewFloatParameterWithDefault("minAmount", 0, "Minimum transaction amount"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err = yaml.UnmarshalContext(ctx, testutils.FormatYaml(in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
}
|
||||
360
internal/tools/mysql/mysqllisttables/mysqllisttables.go
Normal file
360
internal/tools/mysql/mysqllisttables/mysqllisttables.go
Normal file
@@ -0,0 +1,360 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mysqllisttables
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmysql"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/mysql"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlcommon"
|
||||
)
|
||||
|
||||
const kind string = "mysql-list-tables"
|
||||
|
||||
const listTablesStatement = `
|
||||
SELECT
|
||||
T.TABLE_SCHEMA AS schema_name,
|
||||
T.TABLE_NAME AS object_name,
|
||||
CASE
|
||||
WHEN @output_format = 'simple' THEN
|
||||
JSON_OBJECT('name', T.TABLE_NAME)
|
||||
ELSE
|
||||
CONVERT(
|
||||
JSON_OBJECT(
|
||||
'schema_name', T.TABLE_SCHEMA,
|
||||
'object_name', T.TABLE_NAME,
|
||||
'object_type', 'TABLE',
|
||||
'owner', (
|
||||
SELECT
|
||||
IFNULL(U.GRANTEE, 'N/A')
|
||||
FROM
|
||||
INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
|
||||
WHERE
|
||||
U.TABLE_SCHEMA = T.TABLE_SCHEMA
|
||||
LIMIT 1
|
||||
),
|
||||
'comment', IFNULL(T.TABLE_COMMENT, ''),
|
||||
'columns', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'column_name', C.COLUMN_NAME,
|
||||
'data_type', C.COLUMN_TYPE,
|
||||
'ordinal_position', C.ORDINAL_POSITION,
|
||||
'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
|
||||
'column_default', C.COLUMN_DEFAULT,
|
||||
'column_comment', IFNULL(C.COLUMN_COMMENT, '')
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.COLUMNS C
|
||||
WHERE
|
||||
C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
|
||||
ORDER BY C.ORDINAL_POSITION
|
||||
),
|
||||
'constraints', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'constraint_name', TC.CONSTRAINT_NAME,
|
||||
'constraint_type',
|
||||
CASE TC.CONSTRAINT_TYPE
|
||||
WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
|
||||
WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
|
||||
WHEN 'UNIQUE' THEN 'UNIQUE'
|
||||
ELSE TC.CONSTRAINT_TYPE
|
||||
END,
|
||||
'constraint_definition', '',
|
||||
'constraint_columns', (
|
||||
SELECT
|
||||
IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
|
||||
FROM
|
||||
INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
|
||||
WHERE
|
||||
KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND KCU.TABLE_NAME = TC.TABLE_NAME
|
||||
ORDER BY KCU.ORDINAL_POSITION
|
||||
),
|
||||
'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
|
||||
'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
|
||||
(SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
|
||||
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
|
||||
WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND FKCU.TABLE_NAME = TC.TABLE_NAME
|
||||
AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
|
||||
ORDER BY FKCU.ORDINAL_POSITION),
|
||||
NULL
|
||||
)
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
|
||||
LEFT JOIN
|
||||
INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
|
||||
ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
|
||||
AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
|
||||
AND TC.TABLE_NAME = RC.TABLE_NAME
|
||||
WHERE
|
||||
TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
|
||||
),
|
||||
'indexes', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'index_name', IndexData.INDEX_NAME,
|
||||
'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
|
||||
'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
|
||||
'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM (
|
||||
SELECT
|
||||
S.TABLE_SCHEMA,
|
||||
S.TABLE_NAME,
|
||||
S.INDEX_NAME,
|
||||
MIN(S.NON_UNIQUE) AS NON_UNIQUE,
|
||||
JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY
|
||||
FROM
|
||||
INFORMATION_SCHEMA.STATISTICS S
|
||||
WHERE
|
||||
S.TABLE_SCHEMA = T.TABLE_SCHEMA AND S.TABLE_NAME = T.TABLE_NAME
|
||||
GROUP BY
|
||||
S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
|
||||
) AS IndexData
|
||||
ORDER BY IndexData.INDEX_NAME
|
||||
),
|
||||
'triggers', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'trigger_name', TR.TRIGGER_NAME,
|
||||
'trigger_definition', TR.ACTION_STATEMENT
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TRIGGERS TR
|
||||
WHERE
|
||||
TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
|
||||
ORDER BY TR.TRIGGER_NAME
|
||||
)
|
||||
)
|
||||
USING utf8mb4)
|
||||
END AS object_details
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLES T
|
||||
CROSS JOIN (SELECT @table_names := ?, @output_format := ?) AS variables
|
||||
WHERE
|
||||
T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
|
||||
AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
|
||||
AND T.TABLE_TYPE = 'BASE TABLE'
|
||||
ORDER BY
|
||||
T.TABLE_SCHEMA, T.TABLE_NAME;
|
||||
`
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
MySQLPool() *sql.DB
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &cloudsqlmysql.Source{}
|
||||
var _ compatibleSource = &mysql.Source{}
|
||||
|
||||
var compatibleSources = [...]string{cloudsqlmysql.SourceKind, mysql.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("table_names", "Optional: A comma-separated list of table names. If empty, details for all tables will be listed."),
|
||||
tools.NewStringParameterWithDefault("output_format", "detailed", "Optional: Use 'simple' for names only or 'detailed' for full info."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
inputSchema := allParameters.McpManifest()
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
// finish tool setup
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
AllParams: allParameters,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
Pool: s.MySQLPool(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
Pool *sql.DB
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
tableNames, ok := paramsMap["table_names"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing '%s' parameter; expected a string", tableNames)
|
||||
}
|
||||
outputFormat, _ := paramsMap["output_format"].(string)
|
||||
if outputFormat != "simple" && outputFormat != "detailed" {
|
||||
return nil, fmt.Errorf("invalid value for output_format: must be 'simple' or 'detailed', but got %q", outputFormat)
|
||||
}
|
||||
|
||||
results, err := t.Pool.QueryContext(ctx, listTablesStatement, tableNames, outputFormat)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
|
||||
cols, err := results.Columns()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to retrieve rows column name: %w", err)
|
||||
}
|
||||
|
||||
// create an array of values for each column, which can be re-used to scan each row
|
||||
rawValues := make([]any, len(cols))
|
||||
values := make([]any, len(cols))
|
||||
for i := range rawValues {
|
||||
values[i] = &rawValues[i]
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
colTypes, err := results.ColumnTypes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get column types: %w", err)
|
||||
}
|
||||
|
||||
var out []any
|
||||
for results.Next() {
|
||||
err := results.Scan(values...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
vMap := make(map[string]any)
|
||||
for i, name := range cols {
|
||||
val := rawValues[i]
|
||||
if val == nil {
|
||||
vMap[name] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
vMap[name], err = mysqlcommon.ConvertToType(colTypes[i], val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("errors encountered when converting values: %w", err)
|
||||
}
|
||||
}
|
||||
out = append(out, vMap)
|
||||
}
|
||||
|
||||
if err := results.Err(); err != nil {
|
||||
return nil, fmt.Errorf("errors encountered during row iteration: %w", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return false
|
||||
}
|
||||
75
internal/tools/mysql/mysqllisttables/mysqllisttables_test.go
Normal file
75
internal/tools/mysql/mysqllisttables/mysqllisttables_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mysqllisttables_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
mysqllisttables "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqllisttables"
|
||||
)
|
||||
|
||||
func TestParseFromYamlMySQLListTables(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: mysql-list-tables
|
||||
source: my-mysql-instance
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": mysqllisttables.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "mysql-list-tables",
|
||||
Source: "my-mysql-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
261
internal/tools/postgres/postgreslisttables/postgreslisttables.go
Normal file
261
internal/tools/postgres/postgreslisttables/postgreslisttables.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslisttables
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/alloydbpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/postgres"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
const kind string = "postgres-list-tables"
|
||||
|
||||
const listTablesStatement = `
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
`
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
PostgresPool() *pgxpool.Pool
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &alloydbpg.Source{}
|
||||
var _ compatibleSource = &cloudsqlpg.Source{}
|
||||
var _ compatibleSource = &postgres.Source{}
|
||||
|
||||
var compatibleSources = [...]string{alloydbpg.SourceKind, cloudsqlpg.SourceKind, postgres.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameterWithDefault("table_names", "", "Optional: A comma-separated list of table names. If empty, details for all tables will be listed."),
|
||||
tools.NewStringParameterWithDefault("output_format", "detailed", "Optional: Use 'simple' for names only or 'detailed' for full info."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
inputSchema := allParameters.McpManifest()
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
AllParams: allParameters,
|
||||
Pool: s.PostgresPool(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
Pool *pgxpool.Pool
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
tableNames, ok := paramsMap["table_names"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'table_names' parameter; expected a string")
|
||||
}
|
||||
outputFormat, _ := paramsMap["output_format"].(string)
|
||||
if outputFormat != "simple" && outputFormat != "detailed" {
|
||||
return nil, fmt.Errorf("invalid value for output_format: must be 'simple' or 'detailed', but got %q", outputFormat)
|
||||
}
|
||||
|
||||
results, err := t.Pool.Query(ctx, listTablesStatement, tableNames, outputFormat)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
fields := results.FieldDescriptions()
|
||||
var out []map[string]any
|
||||
|
||||
for results.Next() {
|
||||
values, err := results.Values()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
rowMap := make(map[string]any)
|
||||
for i, field := range fields {
|
||||
rowMap[string(field.Name)] = values[i]
|
||||
}
|
||||
out = append(out, rowMap)
|
||||
}
|
||||
|
||||
if err := results.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error reading query results: %w", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslisttables_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
postgreslisttables "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslisttables"
|
||||
)
|
||||
|
||||
func TestParseFromYamlPostgresListTables(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-tables
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslisttables.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-tables",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
@@ -66,6 +67,14 @@ type ToolConfig interface {
|
||||
|
||||
type AccessToken string
|
||||
|
||||
func (token AccessToken) ParseBearerToken() (string, error) {
|
||||
headerParts := strings.Split(string(token), " ")
|
||||
if len(headerParts) != 2 || strings.ToLower(headerParts[0]) != "bearer" {
|
||||
return "", fmt.Errorf("authorization header must be in the format 'Bearer <token>': %w", ErrUnauthorized)
|
||||
}
|
||||
return headerParts[1], nil
|
||||
}
|
||||
|
||||
type Tool interface {
|
||||
Invoke(context.Context, ParamValues, AccessToken) (any, error)
|
||||
ParseParams(map[string]any, map[string]map[string]any) (ParamValues, error)
|
||||
|
||||
178
internal/tools/yugabytedbsql/yugabytedbsql.go
Normal file
178
internal/tools/yugabytedbsql/yugabytedbsql.go
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yugabytedbsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/yugabytedb"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/yugabyte/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
const kind string = "yugabytedb-sql"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
YugabyteDBPool() *pgxpool.Pool
|
||||
}
|
||||
|
||||
var compatibleSources = [...]string{yugabytedb.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
Statement string `yaml:"statement" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
TemplateParameters tools.Parameters `yaml:"templateParameters"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters, paramManifest, paramMcpManifest, err := tools.ProcessParameters(cfg.TemplateParameters, cfg.Parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: paramMcpManifest,
|
||||
}
|
||||
|
||||
// finish tool setup
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Parameters: cfg.Parameters,
|
||||
TemplateParameters: cfg.TemplateParameters,
|
||||
AllParams: allParameters,
|
||||
Statement: cfg.Statement,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
Pool: s.YugabyteDBPool(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
TemplateParameters tools.Parameters `yaml:"templateParameters"`
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
Pool *pgxpool.Pool
|
||||
Statement string
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
newStatement, err := tools.ResolveTemplateParams(t.TemplateParameters, t.Statement, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract template params %w", err)
|
||||
}
|
||||
|
||||
newParams, err := tools.GetParams(t.Parameters, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
results, err := t.Pool.Query(ctx, newStatement, sliceParams...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
|
||||
fields := results.FieldDescriptions()
|
||||
|
||||
var out []any
|
||||
for results.Next() {
|
||||
v, err := results.Values()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
vMap := make(map[string]any)
|
||||
for i, f := range fields {
|
||||
vMap[f.Name] = v[i]
|
||||
}
|
||||
out = append(out, vMap)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return false
|
||||
}
|
||||
214
internal/tools/yugabytedbsql/yugabytedbsql_test.go
Normal file
214
internal/tools/yugabytedbsql/yugabytedbsql_test.go
Normal file
@@ -0,0 +1,214 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yugabytedbsql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/yugabytedbsql"
|
||||
)
|
||||
|
||||
func TestParseFromYamlYugabyteDBSQL(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic valid config",
|
||||
in: `
|
||||
tools:
|
||||
hotel_search:
|
||||
kind: yugabytedb-sql
|
||||
source: yb-source
|
||||
description: search hotels by city
|
||||
statement: |
|
||||
SELECT * FROM hotels WHERE city = $1;
|
||||
authRequired:
|
||||
- auth-service-a
|
||||
- auth-service-b
|
||||
parameters:
|
||||
- name: city
|
||||
type: string
|
||||
description: city name
|
||||
authServices:
|
||||
- name: auth-service-a
|
||||
field: user_id
|
||||
- name: auth-service-b
|
||||
field: user_id
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"hotel_search": yugabytedbsql.Config{
|
||||
Name: "hotel_search",
|
||||
Kind: "yugabytedb-sql",
|
||||
Source: "yb-source",
|
||||
Description: "search hotels by city",
|
||||
Statement: "SELECT * FROM hotels WHERE city = $1;\n",
|
||||
AuthRequired: []string{"auth-service-a", "auth-service-b"},
|
||||
Parameters: []tools.Parameter{
|
||||
tools.NewStringParameterWithAuth("city", "city name",
|
||||
[]tools.ParamAuthService{
|
||||
{Name: "auth-service-a", Field: "user_id"},
|
||||
{Name: "auth-service-b", Field: "user_id"},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYamlYugabyteDBSQL(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
cases := []struct {
|
||||
desc string
|
||||
in string
|
||||
}{
|
||||
{
|
||||
desc: "missing required field (statement)",
|
||||
in: `
|
||||
tools:
|
||||
tool1:
|
||||
kind: yugabytedb-sql
|
||||
source: yb-source
|
||||
description: incomplete config
|
||||
`,
|
||||
},
|
||||
{
|
||||
desc: "unknown field (foo)",
|
||||
in: `
|
||||
tools:
|
||||
tool2:
|
||||
kind: yugabytedb-sql
|
||||
source: yb-source
|
||||
description: test
|
||||
statement: SELECT 1;
|
||||
foo: bar
|
||||
`,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
cfg := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &cfg)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error but got none")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFromYamlWithTemplateParamsYugabyteDB(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: yugabytedb-sql
|
||||
source: my-yb-instance
|
||||
description: some description
|
||||
statement: |
|
||||
SELECT * FROM SQL_STATEMENT;
|
||||
parameters:
|
||||
- name: name
|
||||
type: string
|
||||
description: some description
|
||||
templateParameters:
|
||||
- name: tableName
|
||||
type: string
|
||||
description: The table to select hotels from.
|
||||
- name: fieldArray
|
||||
type: array
|
||||
description: The columns to return for the query.
|
||||
items:
|
||||
name: column
|
||||
type: string
|
||||
description: A column name that will be returned from the query.
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": yugabytedbsql.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "yugabytedb-sql",
|
||||
Source: "my-yb-instance",
|
||||
Description: "some description",
|
||||
Statement: "SELECT * FROM SQL_STATEMENT;\n",
|
||||
AuthRequired: []string{},
|
||||
Parameters: []tools.Parameter{
|
||||
tools.NewStringParameter("name", "some description"),
|
||||
},
|
||||
TemplateParameters: []tools.Parameter{
|
||||
tools.NewStringParameter("tableName", "The table to select hotels from."),
|
||||
tools.NewArrayParameter("fieldArray", "The columns to return for the query.", tools.NewStringParameter("column", "A column name that will be returned from the query.")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -112,6 +113,12 @@ func TestBigQueryToolEndpoints(t *testing.T) {
|
||||
strings.ReplaceAll(uuid.New().String(), "-", ""),
|
||||
)
|
||||
|
||||
tableNameAnalyzeContribution := fmt.Sprintf("`%s.%s.analyze_contribution_table_%s`",
|
||||
BigqueryProject,
|
||||
datasetName,
|
||||
strings.ReplaceAll(uuid.New().String(), "-", ""),
|
||||
)
|
||||
|
||||
// set up data for param tool
|
||||
createParamTableStmt, insertParamTableStmt, paramToolStmt, idParamToolStmt, nameParamToolStmt, arrayToolStmt, paramTestParams := getBigQueryParamToolInfo(tableNameParam)
|
||||
teardownTable1 := setupBigQueryTable(t, ctx, client, createParamTableStmt, insertParamTableStmt, datasetName, tableNameParam, paramTestParams)
|
||||
@@ -132,6 +139,11 @@ func TestBigQueryToolEndpoints(t *testing.T) {
|
||||
teardownTable4 := setupBigQueryTable(t, ctx, client, createForecastTableStmt, insertForecastTableStmt, datasetName, tableNameForecast, forecastTestParams)
|
||||
defer teardownTable4(t)
|
||||
|
||||
// set up data for analyze contribution tool
|
||||
createAnalyzeContributionTableStmt, insertAnalyzeContributionTableStmt, analyzeContributionTestParams := getBigQueryAnalyzeContributionToolInfo(tableNameAnalyzeContribution)
|
||||
teardownTable5 := setupBigQueryTable(t, ctx, client, createAnalyzeContributionTableStmt, insertAnalyzeContributionTableStmt, datasetName, tableNameAnalyzeContribution, analyzeContributionTestParams)
|
||||
defer teardownTable5(t)
|
||||
|
||||
// Write config into a file and pass it to command
|
||||
toolsFile := tests.GetToolsConfig(sourceConfig, BigqueryToolKind, paramToolStmt, idParamToolStmt, nameParamToolStmt, arrayToolStmt, authToolStmt)
|
||||
toolsFile = addClientAuthSourceConfig(t, toolsFile)
|
||||
@@ -181,6 +193,7 @@ func TestBigQueryToolEndpoints(t *testing.T) {
|
||||
runBigQueryExecuteSqlToolInvokeTest(t, select1Want, invokeParamWant, tableNameParam, ddlWant)
|
||||
runBigQueryExecuteSqlToolInvokeDryRunTest(t, datasetName)
|
||||
runBigQueryForecastToolInvokeTest(t, tableNameForecast)
|
||||
runBigQueryAnalyzeContributionToolInvokeTest(t, tableNameAnalyzeContribution)
|
||||
runBigQueryDataTypeTests(t)
|
||||
runBigQueryListDatasetToolInvokeTest(t, datasetName)
|
||||
runBigQueryGetDatasetInfoToolInvokeTest(t, datasetName, datasetInfoWant)
|
||||
@@ -189,6 +202,102 @@ func TestBigQueryToolEndpoints(t *testing.T) {
|
||||
runBigQueryConversationalAnalyticsInvokeTest(t, datasetName, tableName, dataInsightsWant)
|
||||
}
|
||||
|
||||
func TestBigQueryToolWithDatasetRestriction(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
client, err := initBigQueryConnection(BigqueryProject)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create BigQuery client: %s", err)
|
||||
}
|
||||
|
||||
// Create two datasets, one allowed, one not.
|
||||
baseName := strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
allowedDatasetName1 := fmt.Sprintf("allowed_dataset_1_%s", baseName)
|
||||
allowedDatasetName2 := fmt.Sprintf("allowed_dataset_2_%s", baseName)
|
||||
disallowedDatasetName := fmt.Sprintf("disallowed_dataset_%s", baseName)
|
||||
allowedTableName1 := "allowed_table_1"
|
||||
allowedTableName2 := "allowed_table_2"
|
||||
disallowedTableName := "disallowed_table"
|
||||
allowedForecastTableName1 := "allowed_forecast_table_1"
|
||||
allowedForecastTableName2 := "allowed_forecast_table_2"
|
||||
disallowedForecastTableName := "disallowed_forecast_table"
|
||||
|
||||
// Setup allowed table
|
||||
allowedTableNameParam1 := fmt.Sprintf("`%s.%s.%s`", BigqueryProject, allowedDatasetName1, allowedTableName1)
|
||||
createAllowedTableStmt1 := fmt.Sprintf("CREATE TABLE %s (id INT64)", allowedTableNameParam1)
|
||||
teardownAllowed1 := setupBigQueryTable(t, ctx, client, createAllowedTableStmt1, "", allowedDatasetName1, allowedTableNameParam1, nil)
|
||||
defer teardownAllowed1(t)
|
||||
|
||||
allowedTableNameParam2 := fmt.Sprintf("`%s.%s.%s`", BigqueryProject, allowedDatasetName2, allowedTableName2)
|
||||
createAllowedTableStmt2 := fmt.Sprintf("CREATE TABLE %s (id INT64)", allowedTableNameParam2)
|
||||
teardownAllowed2 := setupBigQueryTable(t, ctx, client, createAllowedTableStmt2, "", allowedDatasetName2, allowedTableNameParam2, nil)
|
||||
defer teardownAllowed2(t)
|
||||
|
||||
// Setup allowed forecast table
|
||||
allowedForecastTableFullName1 := fmt.Sprintf("`%s.%s.%s`", BigqueryProject, allowedDatasetName1, allowedForecastTableName1)
|
||||
createForecastStmt1, insertForecastStmt1, forecastParams1 := getBigQueryForecastToolInfo(allowedForecastTableFullName1)
|
||||
teardownAllowedForecast1 := setupBigQueryTable(t, ctx, client, createForecastStmt1, insertForecastStmt1, allowedDatasetName1, allowedForecastTableFullName1, forecastParams1)
|
||||
defer teardownAllowedForecast1(t)
|
||||
|
||||
allowedForecastTableFullName2 := fmt.Sprintf("`%s.%s.%s`", BigqueryProject, allowedDatasetName2, allowedForecastTableName2)
|
||||
createForecastStmt2, insertForecastStmt2, forecastParams2 := getBigQueryForecastToolInfo(allowedForecastTableFullName2)
|
||||
teardownAllowedForecast2 := setupBigQueryTable(t, ctx, client, createForecastStmt2, insertForecastStmt2, allowedDatasetName2, allowedForecastTableFullName2, forecastParams2)
|
||||
defer teardownAllowedForecast2(t)
|
||||
|
||||
// Setup disallowed table
|
||||
disallowedTableNameParam := fmt.Sprintf("`%s.%s.%s`", BigqueryProject, disallowedDatasetName, disallowedTableName)
|
||||
createDisallowedTableStmt := fmt.Sprintf("CREATE TABLE %s (id INT64)", disallowedTableNameParam)
|
||||
teardownDisallowed := setupBigQueryTable(t, ctx, client, createDisallowedTableStmt, "", disallowedDatasetName, disallowedTableNameParam, nil)
|
||||
defer teardownDisallowed(t)
|
||||
|
||||
// Setup disallowed forecast table
|
||||
disallowedForecastTableFullName := fmt.Sprintf("`%s.%s.%s`", BigqueryProject, disallowedDatasetName, disallowedForecastTableName)
|
||||
createDisallowedForecastStmt, insertDisallowedForecastStmt, disallowedForecastParams := getBigQueryForecastToolInfo(disallowedForecastTableFullName)
|
||||
teardownDisallowedForecast := setupBigQueryTable(t, ctx, client, createDisallowedForecastStmt, insertDisallowedForecastStmt, disallowedDatasetName, disallowedForecastTableFullName, disallowedForecastParams)
|
||||
defer teardownDisallowedForecast(t)
|
||||
|
||||
// Configure source with dataset restriction.
|
||||
sourceConfig := getBigQueryVars(t)
|
||||
sourceConfig["allowedDatasets"] = []string{allowedDatasetName1, allowedDatasetName2}
|
||||
|
||||
// Configure tool
|
||||
toolsConfig := map[string]any{
|
||||
"list-table-ids-restricted": map[string]any{
|
||||
"kind": "bigquery-list-table-ids",
|
||||
"source": "my-instance",
|
||||
"description": "Tool to list table within a dataset",
|
||||
},
|
||||
}
|
||||
|
||||
// Create config file
|
||||
config := map[string]any{
|
||||
"sources": map[string]any{
|
||||
"my-instance": sourceConfig,
|
||||
},
|
||||
"tools": toolsConfig,
|
||||
}
|
||||
|
||||
// Start server
|
||||
cmd, cleanup, err := tests.StartCmd(ctx, config)
|
||||
if err != nil {
|
||||
t.Fatalf("command initialization returned an error: %s", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
out, err := testutils.WaitForString(waitCtx, regexp.MustCompile(`Server ready to serve`), cmd.Out)
|
||||
if err != nil {
|
||||
t.Logf("toolbox command logs: \n%s", out)
|
||||
t.Fatalf("toolbox didn't start successfully: %s", err)
|
||||
}
|
||||
|
||||
// Run tests
|
||||
runListTableIdsWithRestriction(t, allowedDatasetName1, disallowedDatasetName, allowedTableName1, allowedForecastTableName1)
|
||||
runListTableIdsWithRestriction(t, allowedDatasetName2, disallowedDatasetName, allowedTableName2, allowedForecastTableName2)
|
||||
}
|
||||
|
||||
// getBigQueryParamToolInfo returns statements and param for my-tool for bigquery kind
|
||||
func getBigQueryParamToolInfo(tableName string) (string, string, string, string, string, string, []bigqueryapi.QueryParameter) {
|
||||
createStatement := fmt.Sprintf(`
|
||||
@@ -244,7 +353,7 @@ func getBigQueryForecastToolInfo(tableName string) (string, string, []bigqueryap
|
||||
createStatement := fmt.Sprintf(`
|
||||
CREATE TABLE IF NOT EXISTS %s (ts TIMESTAMP, data FLOAT64, id STRING);`, tableName)
|
||||
insertStatement := fmt.Sprintf(`
|
||||
INSERT INTO %s (ts, data, id) VALUES
|
||||
INSERT INTO %s (ts, data, id) VALUES
|
||||
(?, ?, ?), (?, ?, ?), (?, ?, ?),
|
||||
(?, ?, ?), (?, ?, ?), (?, ?, ?);`, tableName)
|
||||
params := []bigqueryapi.QueryParameter{
|
||||
@@ -258,6 +367,26 @@ func getBigQueryForecastToolInfo(tableName string) (string, string, []bigqueryap
|
||||
return createStatement, insertStatement, params
|
||||
}
|
||||
|
||||
// getBigQueryAnalyzeContributionToolInfo returns statements and params for the analyze-contribution tool.
|
||||
func getBigQueryAnalyzeContributionToolInfo(tableName string) (string, string, []bigqueryapi.QueryParameter) {
|
||||
createStatement := fmt.Sprintf(`
|
||||
CREATE TABLE IF NOT EXISTS %s (dim1 STRING, dim2 STRING, is_test BOOL, metric FLOAT64);`, tableName)
|
||||
insertStatement := fmt.Sprintf(`
|
||||
INSERT INTO %s (dim1, dim2, is_test, metric) VALUES
|
||||
(?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?);`, tableName)
|
||||
params := []bigqueryapi.QueryParameter{
|
||||
{Value: "a"}, {Value: "x"}, {Value: true}, {Value: 100.0},
|
||||
{Value: "a"}, {Value: "x"}, {Value: false}, {Value: 110.0},
|
||||
{Value: "a"}, {Value: "y"}, {Value: true}, {Value: 120.0},
|
||||
{Value: "a"}, {Value: "y"}, {Value: false}, {Value: 100.0},
|
||||
{Value: "b"}, {Value: "x"}, {Value: true}, {Value: 40.0},
|
||||
{Value: "b"}, {Value: "x"}, {Value: false}, {Value: 100.0},
|
||||
{Value: "b"}, {Value: "y"}, {Value: true}, {Value: 60.0},
|
||||
{Value: "b"}, {Value: "y"}, {Value: false}, {Value: 60.0},
|
||||
}
|
||||
return createStatement, insertStatement, params
|
||||
}
|
||||
|
||||
// getBigQueryTmplToolStatement returns statements for template parameter test cases for bigquery kind
|
||||
func getBigQueryTmplToolStatement() (string, string) {
|
||||
tmplSelectCombined := "SELECT * FROM {{.tableName}} WHERE id = ? ORDER BY id"
|
||||
@@ -295,19 +424,21 @@ func setupBigQueryTable(t *testing.T, ctx context.Context, client *bigqueryapi.C
|
||||
t.Fatalf("Create table job for %s failed: %v", tableName, err)
|
||||
}
|
||||
|
||||
// Insert test data
|
||||
insertQuery := client.Query(insertStatement)
|
||||
insertQuery.Parameters = params
|
||||
insertJob, err := insertQuery.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start insert job for %s: %v", tableName, err)
|
||||
}
|
||||
insertStatus, err := insertJob.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to wait for insert job for %s: %v", tableName, err)
|
||||
}
|
||||
if err := insertStatus.Err(); err != nil {
|
||||
t.Fatalf("Insert job for %s failed: %v", tableName, err)
|
||||
if len(params) > 0 {
|
||||
// Insert test data
|
||||
insertQuery := client.Query(insertStatement)
|
||||
insertQuery.Parameters = params
|
||||
insertJob, err := insertQuery.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start insert job for %s: %v", tableName, err)
|
||||
}
|
||||
insertStatus, err := insertJob.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to wait for insert job for %s: %v", tableName, err)
|
||||
}
|
||||
if err := insertStatus.Err(); err != nil {
|
||||
t.Fatalf("Insert job for %s failed: %v", tableName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return func(t *testing.T) {
|
||||
@@ -383,6 +514,24 @@ func addBigQueryPrebuiltToolsConfig(t *testing.T, config map[string]any) map[str
|
||||
"source": "my-client-auth-source",
|
||||
"description": "Tool to forecast time series data with auth.",
|
||||
}
|
||||
tools["my-analyze-contribution-tool"] = map[string]any{
|
||||
"kind": "bigquery-analyze-contribution",
|
||||
"source": "my-instance",
|
||||
"description": "Tool to analyze contribution.",
|
||||
}
|
||||
tools["my-auth-analyze-contribution-tool"] = map[string]any{
|
||||
"kind": "bigquery-analyze-contribution",
|
||||
"source": "my-instance",
|
||||
"description": "Tool to analyze contribution with auth.",
|
||||
"authRequired": []string{
|
||||
"my-google-auth",
|
||||
},
|
||||
}
|
||||
tools["my-client-auth-analyze-contribution-tool"] = map[string]any{
|
||||
"kind": "bigquery-analyze-contribution",
|
||||
"source": "my-client-auth-source",
|
||||
"description": "Tool to analyze contribution with auth.",
|
||||
}
|
||||
tools["my-list-dataset-ids-tool"] = map[string]any{
|
||||
"kind": "bigquery-list-dataset-ids",
|
||||
"source": "my-instance",
|
||||
@@ -542,6 +691,7 @@ func runBigQueryExecuteSqlToolInvokeTest(t *testing.T, select1Want, invokeParamW
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -824,6 +974,7 @@ func runBigQueryForecastToolInvokeTest(t *testing.T, tableName string) {
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
historyDataTable := strings.ReplaceAll(tableName, "`", "")
|
||||
historyDataQuery := fmt.Sprintf("SELECT ts, data, id FROM %s", tableName)
|
||||
@@ -950,6 +1101,127 @@ func runBigQueryForecastToolInvokeTest(t *testing.T, tableName string) {
|
||||
}
|
||||
}
|
||||
|
||||
func runBigQueryAnalyzeContributionToolInvokeTest(t *testing.T, tableName string) {
|
||||
idToken, err := tests.GetGoogleIdToken(tests.ClientId)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting Google ID token: %s", err)
|
||||
}
|
||||
|
||||
// Get access token
|
||||
accessToken, err := sources.GetIAMAccessToken(t.Context())
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
dataTable := strings.ReplaceAll(tableName, "`", "")
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
api string
|
||||
requestHeader map[string]string
|
||||
requestBody io.Reader
|
||||
want string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "invoke my-analyze-contribution-tool without required params",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s"}`, dataTable))),
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "invoke my-analyze-contribution-tool with table",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s", "contribution_metric": "SUM(metric)", "is_test_col": "is_test", "dimension_id_cols": ["dim1", "dim2"]}`, dataTable))),
|
||||
want: `"relative_difference"`,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "invoke my-auth-analyze-contribution-tool with auth token",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-auth-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{"my-google-auth_token": idToken},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s", "contribution_metric": "SUM(metric)", "is_test_col": "is_test", "dimension_id_cols": ["dim1", "dim2"]}`, dataTable))),
|
||||
want: `"relative_difference"`,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "invoke my-auth-analyze-contribution-tool with invalid auth token",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-auth-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{"my-google-auth_token": "INVALID_TOKEN"},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s", "contribution_metric": "SUM(metric)", "is_test_col": "is_test", "dimension_id_cols": ["dim1", "dim2"]}`, dataTable))),
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "Invoke my-client-auth-analyze-contribution-tool with auth token",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-client-auth-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{"Authorization": accessToken},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s", "contribution_metric": "SUM(metric)", "is_test_col": "is_test", "dimension_id_cols": ["dim1", "dim2"]}`, dataTable))),
|
||||
want: `"relative_difference"`,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "Invoke my-client-auth-analyze-contribution-tool without auth token",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-client-auth-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s", "contribution_metric": "SUM(metric)", "is_test_col": "is_test", "dimension_id_cols": ["dim1", "dim2"]}`, dataTable))),
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
|
||||
name: "Invoke my-client-auth-analyze-contribution-tool with invalid auth token",
|
||||
api: "http://127.0.0.1:5000/api/tool/my-client-auth-analyze-contribution-tool/invoke",
|
||||
requestHeader: map[string]string{"Authorization": "Bearer invalid-token"},
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"input_data": "%s", "contribution_metric": "SUM(metric)", "is_test_col": "is_test", "dimension_id_cols": ["dim1", "dim2"]}`, dataTable))),
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Send Tool invocation request
|
||||
req, err := http.NewRequest(http.MethodPost, tc.api, tc.requestBody)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create request: %s", err)
|
||||
}
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
for k, v := range tc.requestHeader {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send request: %s", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if tc.isErr {
|
||||
return
|
||||
}
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("response status code is not 200, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
// Check response body
|
||||
var body map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&body)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing response body")
|
||||
}
|
||||
|
||||
got, ok := body["result"].(string)
|
||||
if !ok {
|
||||
t.Fatalf("unable to find result in response body")
|
||||
}
|
||||
|
||||
if !strings.Contains(got, tc.want) {
|
||||
t.Fatalf("expected %q to contain %q, but it did not", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runBigQueryDataTypeTests(t *testing.T) {
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -1040,6 +1312,7 @@ func runBigQueryListDatasetToolInvokeTest(t *testing.T, datasetWant string) {
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -1161,6 +1434,7 @@ func runBigQueryGetDatasetInfoToolInvokeTest(t *testing.T, datasetName, datasetI
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -1310,6 +1584,7 @@ func runBigQueryListTableIdsToolInvokeTest(t *testing.T, datasetName, tablename_
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -1459,6 +1734,7 @@ func runBigQueryGetTableInfoToolInvokeTest(t *testing.T, datasetName, tableName,
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -1608,6 +1884,7 @@ func runBigQueryConversationalAnalyticsInvokeTest(t *testing.T, datasetName, tab
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
tableRefsJSON := fmt.Sprintf(`[{"projectId":"%s","datasetId":"%s","tableId":"%s"}]`, BigqueryProject, datasetName, tableName)
|
||||
|
||||
@@ -1724,3 +2001,86 @@ func runBigQueryConversationalAnalyticsInvokeTest(t *testing.T, datasetName, tab
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runListTableIdsWithRestriction(t *testing.T, allowedDatasetName, disallowedDatasetName string, allowedTableNames ...string) {
|
||||
sort.Strings(allowedTableNames)
|
||||
var quotedNames []string
|
||||
for _, name := range allowedTableNames {
|
||||
quotedNames = append(quotedNames, fmt.Sprintf(`"%s"`, name))
|
||||
}
|
||||
wantResult := fmt.Sprintf(`[%s]`, strings.Join(quotedNames, ","))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataset string
|
||||
wantStatusCode int
|
||||
wantInResult string
|
||||
wantInError string
|
||||
}{
|
||||
{
|
||||
name: "invoke on allowed dataset",
|
||||
dataset: allowedDatasetName,
|
||||
wantStatusCode: http.StatusOK,
|
||||
wantInResult: wantResult,
|
||||
},
|
||||
{
|
||||
name: "invoke on disallowed dataset",
|
||||
dataset: disallowedDatasetName,
|
||||
wantStatusCode: http.StatusBadRequest, // Or the specific error code returned
|
||||
wantInError: fmt.Sprintf("access denied to dataset '%s'", disallowedDatasetName),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
body := bytes.NewBuffer([]byte(fmt.Sprintf(`{"dataset":"%s"}`, tc.dataset)))
|
||||
req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:5000/api/tool/list-table-ids-restricted/invoke", body)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create request: %s", err)
|
||||
}
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send request: %s", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("unexpected status code: got %d, want %d. Body: %s", resp.StatusCode, tc.wantStatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
if tc.wantInResult != "" {
|
||||
var respBody map[string]interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&respBody); err != nil {
|
||||
t.Fatalf("error parsing response body: %v", err)
|
||||
}
|
||||
got, ok := respBody["result"].(string)
|
||||
if !ok {
|
||||
t.Fatalf("unable to find result in response body")
|
||||
}
|
||||
|
||||
var gotSlice []string
|
||||
if err := json.Unmarshal([]byte(got), &gotSlice); err != nil {
|
||||
t.Fatalf("error unmarshalling result: %v", err)
|
||||
}
|
||||
sort.Strings(gotSlice)
|
||||
sortedGotBytes, err := json.Marshal(gotSlice)
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling sorted result: %v", err)
|
||||
}
|
||||
|
||||
if string(sortedGotBytes) != tc.wantInResult {
|
||||
t.Errorf("unexpected result: got %q, want %q", string(sortedGotBytes), tc.wantInResult)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.wantInError != "" {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
if !strings.Contains(string(bodyBytes), tc.wantInError) {
|
||||
t.Errorf("unexpected error message: got %q, want to contain %q", string(bodyBytes), tc.wantInError)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
clickhouseexecutesql "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhouseexecutesql"
|
||||
clickhouselistdatabases "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhouselistdatabases"
|
||||
clickhousesql "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhousesql"
|
||||
"github.com/googleapis/genai-toolbox/tests"
|
||||
"go.opentelemetry.io/otel/trace/noop"
|
||||
@@ -1012,3 +1013,103 @@ func setupClickHouseSQLTable(t *testing.T, ctx context.Context, pool *sql.DB, cr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClickHouseListDatabasesTool(t *testing.T) {
|
||||
_ = getClickHouseVars(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
pool, err := initClickHouseConnectionPool(ClickHouseHost, ClickHousePort, ClickHouseUser, ClickHousePass, ClickHouseDatabase, ClickHouseProtocol)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ClickHouse connection pool: %s", err)
|
||||
}
|
||||
defer pool.Close()
|
||||
|
||||
// Create a test database
|
||||
testDBName := "test_list_db_" + strings.ReplaceAll(uuid.New().String(), "-", "")[:8]
|
||||
_, err = pool.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", testDBName))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test database: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
_, _ = pool.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", testDBName))
|
||||
}()
|
||||
|
||||
t.Run("ListDatabases", func(t *testing.T) {
|
||||
toolConfig := clickhouselistdatabases.Config{
|
||||
Name: "test-list-databases",
|
||||
Kind: "clickhouse-list-databases",
|
||||
Source: "test-clickhouse",
|
||||
Description: "Test listing databases",
|
||||
}
|
||||
|
||||
source := createMockSource(t, pool)
|
||||
sourcesMap := map[string]sources.Source{
|
||||
"test-clickhouse": source,
|
||||
}
|
||||
|
||||
tool, err := toolConfig.Initialize(sourcesMap)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to initialize tool: %v", err)
|
||||
}
|
||||
|
||||
params := tools.ParamValues{}
|
||||
|
||||
result, err := tool.Invoke(ctx, params, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list databases: %v", err)
|
||||
}
|
||||
|
||||
databases, ok := result.([]map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("Expected result to be []map[string]any, got %T", result)
|
||||
}
|
||||
|
||||
// Should contain at least the default database and our test database - system and default
|
||||
if len(databases) < 2 {
|
||||
t.Errorf("Expected at least 2 databases, got %d", len(databases))
|
||||
}
|
||||
|
||||
found := false
|
||||
foundDefault := false
|
||||
for _, db := range databases {
|
||||
if name, ok := db["name"].(string); ok {
|
||||
if name == testDBName {
|
||||
found = true
|
||||
}
|
||||
if name == "default" || name == "system" {
|
||||
foundDefault = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("Test database %s not found in list", testDBName)
|
||||
}
|
||||
if !foundDefault {
|
||||
t.Errorf("Default/system database not found in list")
|
||||
}
|
||||
|
||||
t.Logf("Successfully listed %d databases", len(databases))
|
||||
})
|
||||
|
||||
t.Run("ListDatabasesWithInvalidSource", func(t *testing.T) {
|
||||
toolConfig := clickhouselistdatabases.Config{
|
||||
Name: "test-invalid-source",
|
||||
Kind: "clickhouse-list-databases",
|
||||
Source: "non-existent-source",
|
||||
Description: "Test with invalid source",
|
||||
}
|
||||
|
||||
sourcesMap := map[string]sources.Source{}
|
||||
|
||||
_, err := toolConfig.Initialize(sourcesMap)
|
||||
if err == nil {
|
||||
t.Error("Expected error for non-existent source, got nil")
|
||||
} else {
|
||||
t.Logf("Got expected error for invalid source: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Logf("✅ clickhouse-list-databases tool tests completed successfully")
|
||||
}
|
||||
|
||||
113
tests/cloudmonitoring/cloud_monitoring_integration_test.go
Normal file
113
tests/cloudmonitoring/cloud_monitoring_integration_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudmonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/cloudmonitoring"
|
||||
)
|
||||
|
||||
func TestTool_Invoke(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Mock the monitoring server
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/v1/projects/test-project/location/global/prometheus/api/v1/query" {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
query := r.URL.Query().Get("query")
|
||||
if query != "up" {
|
||||
http.Error(w, "bad request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, `{"status":"success","data":{"resultType":"vector","result":[]}}`)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create a new observability tool
|
||||
tool := &cloudmonitoring.Tool{
|
||||
Name: "test-cloudmonitoring",
|
||||
Kind: "cloud-monitoring-query-prometheus",
|
||||
Description: "Test Cloudmonitoring Tool",
|
||||
AllParams: tools.Parameters{},
|
||||
BaseURL: server.URL,
|
||||
Client: &http.Client{},
|
||||
}
|
||||
|
||||
// Define the test parameters
|
||||
params := tools.ParamValues{
|
||||
{Name: "projectId", Value: "test-project"},
|
||||
{Name: "query", Value: "up"},
|
||||
}
|
||||
|
||||
// Invoke the tool
|
||||
result, err := tool.Invoke(context.Background(), params, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Invoke() error = %v", err)
|
||||
}
|
||||
|
||||
// Check the result
|
||||
expected := map[string]any{
|
||||
"status": "success",
|
||||
"data": map[string]any{
|
||||
"resultType": "vector",
|
||||
"result": []any{},
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(expected, result); diff != "" {
|
||||
t.Errorf("Invoke() result mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTool_Invoke_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Mock the monitoring server to return an error
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create a new observability tool
|
||||
tool := &cloudmonitoring.Tool{
|
||||
Name: "test-cloudmonitoring",
|
||||
Kind: "clou-monitoring-query-prometheus",
|
||||
Description: "Test Cloudmonitoring Tool",
|
||||
AllParams: tools.Parameters{},
|
||||
BaseURL: server.URL,
|
||||
Client: &http.Client{},
|
||||
}
|
||||
|
||||
// Define the test parameters
|
||||
params := tools.ParamValues{
|
||||
{Name: "projectId", Value: "test-project"},
|
||||
{Name: "query", Value: "up"},
|
||||
}
|
||||
|
||||
// Invoke the tool
|
||||
_, err := tool.Invoke(context.Background(), params, "")
|
||||
if err == nil {
|
||||
t.Fatal("Invoke() error = nil, want error")
|
||||
}
|
||||
}
|
||||
114
tests/common.go
114
tests/common.go
@@ -23,6 +23,11 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmysql"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
@@ -645,3 +650,112 @@ func GetRedisValkeyToolsConfig(sourceConfig map[string]any, toolKind string) map
|
||||
}
|
||||
return toolsFile
|
||||
}
|
||||
|
||||
// TestCloudSQLMySQL_IPTypeParsingFromYAML verifies the IPType field parsing from YAML
|
||||
// for the cloud-sql-mysql source, mimicking the structure of tests in cloudsql_mysql_test.go.
|
||||
func TestCloudSQLMySQL_IPTypeParsingFromYAML(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "IPType Defaulting to Public",
|
||||
in: `
|
||||
sources:
|
||||
my-mysql-instance:
|
||||
kind: cloud-sql-mysql
|
||||
project: my-project
|
||||
region: my-region
|
||||
instance: my-instance
|
||||
database: my_db
|
||||
user: my_user
|
||||
password: my_pass
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-mysql-instance": cloudsqlmysql.Config{
|
||||
Name: "my-mysql-instance",
|
||||
Kind: cloudsqlmysql.SourceKind,
|
||||
Project: "my-project",
|
||||
Region: "my-region",
|
||||
Instance: "my-instance",
|
||||
IPType: "public", // Default value
|
||||
Database: "my_db",
|
||||
User: "my_user",
|
||||
Password: "my_pass",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "IPType Explicit Public",
|
||||
in: `
|
||||
sources:
|
||||
my-mysql-instance:
|
||||
kind: cloud-sql-mysql
|
||||
project: my-project
|
||||
region: my-region
|
||||
instance: my-instance
|
||||
ipType: Public
|
||||
database: my_db
|
||||
user: my_user
|
||||
password: my_pass
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-mysql-instance": cloudsqlmysql.Config{
|
||||
Name: "my-mysql-instance",
|
||||
Kind: cloudsqlmysql.SourceKind,
|
||||
Project: "my-project",
|
||||
Region: "my-region",
|
||||
Instance: "my-instance",
|
||||
IPType: "public",
|
||||
Database: "my_db",
|
||||
User: "my_user",
|
||||
Password: "my_pass",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "IPType Explicit Private",
|
||||
in: `
|
||||
sources:
|
||||
my-mysql-instance:
|
||||
kind: cloud-sql-mysql
|
||||
project: my-project
|
||||
region: my-region
|
||||
instance: my-instance
|
||||
ipType: private
|
||||
database: my_db
|
||||
user: my_user
|
||||
password: my_pass
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-mysql-instance": cloudsqlmysql.Config{
|
||||
Name: "my-mysql-instance",
|
||||
Kind: cloudsqlmysql.SourceKind,
|
||||
Project: "my-project",
|
||||
Region: "my-region",
|
||||
Instance: "my-instance",
|
||||
IPType: "private",
|
||||
Database: "my_db",
|
||||
User: "my_user",
|
||||
Password: "my_pass",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: diff (-want +got):\n%s", cmp.Diff(tc.want, got.Sources))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,6 +131,8 @@ func TestFirestoreToolEndpoints(t *testing.T) {
|
||||
// Run specific Firestore tool tests
|
||||
runFirestoreGetDocumentsTest(t, docPath1, docPath2)
|
||||
runFirestoreQueryCollectionTest(t, testCollectionName)
|
||||
runFirestoreQueryTest(t, testCollectionName)
|
||||
runFirestoreQuerySelectArrayTest(t, testCollectionName)
|
||||
runFirestoreListCollectionsTest(t, testCollectionName, testSubCollectionName, docPath1)
|
||||
runFirestoreAddDocumentsTest(t, testCollectionName)
|
||||
runFirestoreUpdateDocumentTest(t, testCollectionName, testDocID1)
|
||||
@@ -562,6 +564,63 @@ func getFirestoreToolsConfig(sourceConfig map[string]any) map[string]any {
|
||||
"source": "my-instance",
|
||||
"description": "Query a Firestore collection",
|
||||
},
|
||||
"firestore-query-param": map[string]any{
|
||||
"kind": "firestore-query",
|
||||
"source": "my-instance",
|
||||
"description": "Query a Firestore collection with parameterizable filters",
|
||||
"collectionPath": "{{.collection}}",
|
||||
"filters": `{
|
||||
"field": "age", "op": "{{.operator}}", "value": {"integerValue": "{{.ageValue}}"}
|
||||
}`,
|
||||
"limit": 10,
|
||||
"parameters": []map[string]any{
|
||||
{
|
||||
"name": "collection",
|
||||
"type": "string",
|
||||
"description": "Collection to query",
|
||||
"required": true,
|
||||
},
|
||||
{
|
||||
"name": "operator",
|
||||
"type": "string",
|
||||
"description": "Comparison operator",
|
||||
"required": true,
|
||||
},
|
||||
{
|
||||
"name": "ageValue",
|
||||
"type": "string",
|
||||
"description": "Age value to compare",
|
||||
"required": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"firestore-query-select-array": map[string]any{
|
||||
"kind": "firestore-query",
|
||||
"source": "my-instance",
|
||||
"description": "Query with array-based select fields",
|
||||
"collectionPath": "{{.collection}}",
|
||||
"select": []string{"{{.fields}}"},
|
||||
"limit": 10,
|
||||
"parameters": []map[string]any{
|
||||
{
|
||||
"name": "collection",
|
||||
"type": "string",
|
||||
"description": "Collection to query",
|
||||
"required": true,
|
||||
},
|
||||
{
|
||||
"name": "fields",
|
||||
"type": "array",
|
||||
"description": "Fields to select",
|
||||
"required": true,
|
||||
"items": map[string]any{
|
||||
"name": "field",
|
||||
"type": "string",
|
||||
"description": "field",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"firestore-get-rules": map[string]any{
|
||||
"kind": "firestore-get-rules",
|
||||
"source": "my-instance",
|
||||
@@ -1356,6 +1415,246 @@ func runFirestoreDeleteDocumentsTest(t *testing.T, docPath string) {
|
||||
}
|
||||
}
|
||||
|
||||
func runFirestoreQueryTest(t *testing.T, collectionName string) {
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
api string
|
||||
requestBody io.Reader
|
||||
wantRegex string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "query with parameterized filters - age greater than",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-param/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{
|
||||
"collection": "%s",
|
||||
"operator": ">",
|
||||
"ageValue": "25"
|
||||
}`, collectionName))),
|
||||
wantRegex: `"name":"Alice"`,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with parameterized filters - exact name match",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-param/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{
|
||||
"collection": "%s",
|
||||
"operator": "==",
|
||||
"ageValue": "25"
|
||||
}`, collectionName))),
|
||||
wantRegex: `"name":"Bob"`,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with parameterized filters - age less than or equal",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-param/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{
|
||||
"collection": "%s",
|
||||
"operator": "<=",
|
||||
"ageValue": "29"
|
||||
}`, collectionName))),
|
||||
wantRegex: `"name":"Bob"`,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "missing required parameter",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-param/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"collection": "test", "operator": ">"}`)),
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "query non-existent collection with parameters",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-param/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(`{
|
||||
"collection": "non-existent-collection",
|
||||
"operator": "==",
|
||||
"ageValue": "30"
|
||||
}`)),
|
||||
wantRegex: `^\[\]$`, // Empty array
|
||||
isErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, tc.api, tc.requestBody)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create request: %s", err)
|
||||
}
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send request: %s", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if tc.isErr {
|
||||
return
|
||||
}
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("response status code is not 200, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
var body map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&body)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing response body: %v", err)
|
||||
}
|
||||
|
||||
got, ok := body["result"].(string)
|
||||
if !ok {
|
||||
t.Fatalf("unable to find result in response body")
|
||||
}
|
||||
|
||||
if tc.wantRegex != "" {
|
||||
matched, err := regexp.MatchString(tc.wantRegex, got)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid regex pattern: %v", err)
|
||||
}
|
||||
if !matched {
|
||||
t.Fatalf("result does not match expected pattern.\nGot: %s\nWant pattern: %s", got, tc.wantRegex)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runFirestoreQuerySelectArrayTest(t *testing.T, collectionName string) {
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
api string
|
||||
requestBody io.Reader
|
||||
wantRegex string
|
||||
validateFields bool
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "query with array select fields - single field",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-select-array/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{
|
||||
"collection": "%s",
|
||||
"fields": ["name"]
|
||||
}`, collectionName))),
|
||||
wantRegex: `"name":"`,
|
||||
validateFields: true,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with array select fields - multiple fields",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-select-array/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{
|
||||
"collection": "%s",
|
||||
"fields": ["name", "age"]
|
||||
}`, collectionName))),
|
||||
wantRegex: `"name":".*"age":`,
|
||||
validateFields: true,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with empty array select fields",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-select-array/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{
|
||||
"collection": "%s",
|
||||
"fields": []
|
||||
}`, collectionName))),
|
||||
wantRegex: `\[.*\]`, // Should return documents with all fields
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "missing fields parameter",
|
||||
api: "http://127.0.0.1:5000/api/tool/firestore-query-select-array/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"collection": "%s"}`, collectionName))),
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, tc.api, tc.requestBody)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create request: %s", err)
|
||||
}
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send request: %s", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if tc.isErr {
|
||||
return
|
||||
}
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("response status code is not 200, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
var body map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&body)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing response body: %v", err)
|
||||
}
|
||||
|
||||
got, ok := body["result"].(string)
|
||||
if !ok {
|
||||
t.Fatalf("unable to find result in response body")
|
||||
}
|
||||
|
||||
if tc.wantRegex != "" {
|
||||
matched, err := regexp.MatchString(tc.wantRegex, got)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid regex pattern: %v", err)
|
||||
}
|
||||
if !matched {
|
||||
t.Fatalf("result does not match expected pattern.\nGot: %s\nWant pattern: %s", got, tc.wantRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// Additional validation for field selection
|
||||
if tc.validateFields {
|
||||
// Parse the result to check if only selected fields are present
|
||||
var results []map[string]interface{}
|
||||
err = json.Unmarshal([]byte(got), &results)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing result as JSON array: %v", err)
|
||||
}
|
||||
|
||||
// For single field test, ensure only 'name' field is present in data
|
||||
if tc.name == "query with array select fields - single field" && len(results) > 0 {
|
||||
for _, result := range results {
|
||||
if data, ok := result["data"].(map[string]interface{}); ok {
|
||||
if _, hasName := data["name"]; !hasName {
|
||||
t.Fatalf("expected 'name' field in data, but not found")
|
||||
}
|
||||
// The 'age' field should not be present when only 'name' is selected
|
||||
if _, hasAge := data["age"]; hasAge {
|
||||
t.Fatalf("unexpected 'age' field in data when only 'name' was selected")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For multiple fields test, ensure both fields are present
|
||||
if tc.name == "query with array select fields - multiple fields" && len(results) > 0 {
|
||||
for _, result := range results {
|
||||
if data, ok := result["data"].(map[string]interface{}); ok {
|
||||
if _, hasName := data["name"]; !hasName {
|
||||
t.Fatalf("expected 'name' field in data, but not found")
|
||||
}
|
||||
if _, hasAge := data["age"]; !hasAge {
|
||||
t.Fatalf("expected 'age' field in data, but not found")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runFirestoreQueryCollectionTest(t *testing.T, collectionName string) {
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
@@ -1385,7 +1684,7 @@ func runFirestoreQueryCollectionTest(t *testing.T, collectionName string) {
|
||||
"orderBy": "{\"field\": \"age\", \"direction\": \"DESCENDING\"}",
|
||||
"limit": 2
|
||||
}`, collectionName))),
|
||||
wantRegex: `"age":35.*"age":30`, // Should be ordered by age descending (Charlie=35, Alice=30, Bob=25)
|
||||
wantRegex: `"age":35.*"age":30`, // Should be ordered by age descending (Charlie=35, Alice=30)
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -15,15 +15,21 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/tests"
|
||||
@@ -32,6 +38,7 @@ import (
|
||||
var (
|
||||
MySQLSourceKind = "mysql"
|
||||
MySQLToolKind = "mysql-sql"
|
||||
MySQLListTablesToolKind = "mysql-list-tables"
|
||||
MySQLDatabase = os.Getenv("MYSQL_DATABASE")
|
||||
MySQLHost = os.Getenv("MYSQL_HOST")
|
||||
MySQLPort = os.Getenv("MYSQL_PORT")
|
||||
@@ -63,6 +70,20 @@ func getMySQLVars(t *testing.T) map[string]any {
|
||||
}
|
||||
}
|
||||
|
||||
func addPrebuiltToolConfig(t *testing.T, config map[string]any) map[string]any {
|
||||
tools, ok := config["tools"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("unable to get tools from config")
|
||||
}
|
||||
tools["list_tables"] = map[string]any{
|
||||
"kind": MySQLListTablesToolKind,
|
||||
"source": "my-instance",
|
||||
"description": "Lists tables in the database.",
|
||||
}
|
||||
config["tools"] = tools
|
||||
return config
|
||||
}
|
||||
|
||||
// Copied over from mysql.go
|
||||
func initMySQLConnectionPool(host, port, user, pass, dbname string) (*sql.DB, error) {
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?parseTime=true", user, pass, host, port, dbname)
|
||||
@@ -108,6 +129,8 @@ func TestMySQLToolEndpoints(t *testing.T) {
|
||||
tmplSelectCombined, tmplSelectFilterCombined := tests.GetMySQLTmplToolStatement()
|
||||
toolsFile = tests.AddTemplateParamConfig(t, toolsFile, MySQLToolKind, tmplSelectCombined, tmplSelectFilterCombined, "")
|
||||
|
||||
toolsFile = addPrebuiltToolConfig(t, toolsFile)
|
||||
|
||||
cmd, cleanup, err := tests.StartCmd(ctx, toolsFile, args...)
|
||||
if err != nil {
|
||||
t.Fatalf("command initialization returned an error: %s", err)
|
||||
@@ -131,4 +154,182 @@ func TestMySQLToolEndpoints(t *testing.T) {
|
||||
tests.RunMCPToolCallMethod(t, mcpMyFailToolWant, mcpSelect1Want)
|
||||
tests.RunExecuteSqlToolInvokeTest(t, createTableStatement, select1Want)
|
||||
tests.RunToolInvokeWithTemplateParameters(t, tableNameTemplateParam)
|
||||
|
||||
// Run specific MySQL tool tests
|
||||
runMySQLListTablesTest(t, tableNameParam, tableNameAuth)
|
||||
}
|
||||
|
||||
func runMySQLListTablesTest(t *testing.T, tableNameParam, tableNameAuth string) {
|
||||
type tableInfo struct {
|
||||
ObjectName string `json:"object_name"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
ObjectDetails string `json:"object_details"`
|
||||
}
|
||||
|
||||
type column struct {
|
||||
DataType string `json:"data_type"`
|
||||
ColumnName string `json:"column_name"`
|
||||
ColumnComment string `json:"column_comment"`
|
||||
ColumnDefault any `json:"column_default"`
|
||||
IsNotNullable int `json:"is_not_nullable"`
|
||||
OrdinalPosition int `json:"ordinal_position"`
|
||||
}
|
||||
|
||||
type objectDetails struct {
|
||||
Owner any `json:"owner"`
|
||||
Columns []column `json:"columns"`
|
||||
Comment string `json:"comment"`
|
||||
Indexes []any `json:"indexes"`
|
||||
Triggers []any `json:"triggers"`
|
||||
Constraints []any `json:"constraints"`
|
||||
ObjectName string `json:"object_name"`
|
||||
ObjectType string `json:"object_type"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
}
|
||||
|
||||
paramTableWant := objectDetails{
|
||||
ObjectName: tableNameParam,
|
||||
SchemaName: MySQLDatabase,
|
||||
ObjectType: "TABLE",
|
||||
Columns: []column{
|
||||
{DataType: "int", ColumnName: "id", IsNotNullable: 1, OrdinalPosition: 1},
|
||||
{DataType: "varchar(255)", ColumnName: "name", OrdinalPosition: 2},
|
||||
},
|
||||
Indexes: []any{map[string]any{"index_columns": []any{"id"}, "index_name": "PRIMARY", "is_primary": float64(1), "is_unique": float64(1)}},
|
||||
Triggers: []any{},
|
||||
Constraints: []any{map[string]any{"constraint_columns": []any{"id"}, "constraint_name": "PRIMARY", "constraint_type": "PRIMARY KEY", "foreign_key_referenced_columns": any(nil), "foreign_key_referenced_table": any(nil), "constraint_definition": ""}},
|
||||
}
|
||||
|
||||
authTableWant := objectDetails{
|
||||
ObjectName: tableNameAuth,
|
||||
SchemaName: MySQLDatabase,
|
||||
ObjectType: "TABLE",
|
||||
Columns: []column{
|
||||
{DataType: "int", ColumnName: "id", IsNotNullable: 1, OrdinalPosition: 1},
|
||||
{DataType: "varchar(255)", ColumnName: "name", OrdinalPosition: 2},
|
||||
{DataType: "varchar(255)", ColumnName: "email", OrdinalPosition: 3},
|
||||
},
|
||||
Indexes: []any{map[string]any{"index_columns": []any{"id"}, "index_name": "PRIMARY", "is_primary": float64(1), "is_unique": float64(1)}},
|
||||
Triggers: []any{},
|
||||
Constraints: []any{map[string]any{"constraint_columns": []any{"id"}, "constraint_name": "PRIMARY", "constraint_type": "PRIMARY KEY", "foreign_key_referenced_columns": any(nil), "foreign_key_referenced_table": any(nil), "constraint_definition": ""}},
|
||||
}
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want any
|
||||
isSimple bool
|
||||
}{
|
||||
{
|
||||
name: "invoke list_tables detailed output",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s"}`, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant},
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables simple output",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s", "output_format": "simple"}`, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{{"name": tableNameAuth}},
|
||||
isSimple: true,
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with multiple table names",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s,%s"}`, tableNameParam, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant, paramTableWant},
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with one existing and one non-existent table",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s,non_existent_table"}`, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant},
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with non-existent table",
|
||||
requestBody: bytes.NewBufferString(`{"table_names": "non_existent_table"}`),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const api = "http://127.0.0.1:5000/api/tool/list_tables/invoke"
|
||||
req, err := http.NewRequest(http.MethodPost, api, tc.requestBody)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create request: %v", err)
|
||||
}
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("wrong status code: got %d, want %d, body: %s", resp.StatusCode, tc.wantStatusCode, string(body))
|
||||
}
|
||||
if tc.wantStatusCode != http.StatusOK {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyWrapper struct{ Result json.RawMessage `json:"result"` }
|
||||
if err := json.NewDecoder(resp.Body).Decode(&bodyWrapper); err != nil {
|
||||
t.Fatalf("error decoding response wrapper: %v", err)
|
||||
}
|
||||
|
||||
var resultString string
|
||||
if err := json.Unmarshal(bodyWrapper.Result, &resultString); err != nil {
|
||||
resultString = string(bodyWrapper.Result)
|
||||
}
|
||||
|
||||
var got any
|
||||
if tc.isSimple {
|
||||
var tables []tableInfo
|
||||
if err := json.Unmarshal([]byte(resultString), &tables); err != nil {
|
||||
t.Fatalf("failed to unmarshal outer JSON array into []tableInfo: %v", err)
|
||||
}
|
||||
var details []map[string]any
|
||||
for _, table := range tables {
|
||||
var d map[string]any
|
||||
if err := json.Unmarshal([]byte(table.ObjectDetails), &d); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested ObjectDetails string: %v", err)
|
||||
}
|
||||
details = append(details, d)
|
||||
}
|
||||
got = details
|
||||
} else {
|
||||
if resultString == "null" {
|
||||
got = nil
|
||||
} else {
|
||||
var tables []tableInfo
|
||||
if err := json.Unmarshal([]byte(resultString), &tables); err != nil {
|
||||
t.Fatalf("failed to unmarshal outer JSON array into []tableInfo: %v", err)
|
||||
}
|
||||
var details []objectDetails
|
||||
for _, table := range tables {
|
||||
var d objectDetails
|
||||
if err := json.Unmarshal([]byte(table.ObjectDetails), &d); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested ObjectDetails string: %v", err)
|
||||
}
|
||||
details = append(details, d)
|
||||
}
|
||||
got = details
|
||||
}
|
||||
}
|
||||
|
||||
opts := []cmp.Option{
|
||||
cmpopts.SortSlices(func(a, b objectDetails) bool { return a.ObjectName < b.ObjectName }),
|
||||
cmpopts.SortSlices(func(a, b column) bool { return a.ColumnName < b.ColumnName }),
|
||||
cmpopts.SortSlices(func(a, b map[string]any) bool { return a["name"].(string) < b["name"].(string) }),
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tc.want, got, opts...); diff != "" {
|
||||
t.Errorf("Unexpected result: got %#v, want: %#v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,17 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -31,13 +37,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
PostgresSourceKind = "postgres"
|
||||
PostgresToolKind = "postgres-sql"
|
||||
PostgresDatabase = os.Getenv("POSTGRES_DATABASE")
|
||||
PostgresHost = os.Getenv("POSTGRES_HOST")
|
||||
PostgresPort = os.Getenv("POSTGRES_PORT")
|
||||
PostgresUser = os.Getenv("POSTGRES_USER")
|
||||
PostgresPass = os.Getenv("POSTGRES_PASS")
|
||||
PostgresSourceKind = "postgres"
|
||||
PostgresToolKind = "postgres-sql"
|
||||
PostgresListTablesToolKind = "postgres-list-tables"
|
||||
PostgresDatabase = os.Getenv("POSTGRES_DATABASE")
|
||||
PostgresHost = os.Getenv("POSTGRES_HOST")
|
||||
PostgresPort = os.Getenv("POSTGRES_PORT")
|
||||
PostgresUser = os.Getenv("POSTGRES_USER")
|
||||
PostgresPass = os.Getenv("POSTGRES_PASS")
|
||||
)
|
||||
|
||||
func getPostgresVars(t *testing.T) map[string]any {
|
||||
@@ -64,6 +71,20 @@ func getPostgresVars(t *testing.T) map[string]any {
|
||||
}
|
||||
}
|
||||
|
||||
func addPrebuiltToolConfig(t *testing.T, config map[string]any) map[string]any {
|
||||
tools, ok := config["tools"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("unable to get tools from config")
|
||||
}
|
||||
tools["list_tables"] = map[string]any{
|
||||
"kind": PostgresListTablesToolKind,
|
||||
"source": "my-instance",
|
||||
"description": "Lists tables in the database.",
|
||||
}
|
||||
config["tools"] = tools
|
||||
return config
|
||||
}
|
||||
|
||||
// Copied over from postgres.go
|
||||
func initPostgresConnectionPool(host, port, user, pass, dbname string) (*pgxpool.Pool, error) {
|
||||
// urlExample := "postgres:dd//username:password@localhost:5432/database_name"
|
||||
@@ -114,6 +135,8 @@ func TestPostgres(t *testing.T) {
|
||||
tmplSelectCombined, tmplSelectFilterCombined := tests.GetPostgresSQLTmplToolStatement()
|
||||
toolsFile = tests.AddTemplateParamConfig(t, toolsFile, PostgresToolKind, tmplSelectCombined, tmplSelectFilterCombined, "")
|
||||
|
||||
toolsFile = addPrebuiltToolConfig(t, toolsFile)
|
||||
|
||||
cmd, cleanup, err := tests.StartCmd(ctx, toolsFile, args...)
|
||||
if err != nil {
|
||||
t.Fatalf("command initialization returned an error: %s", err)
|
||||
@@ -137,4 +160,165 @@ func TestPostgres(t *testing.T) {
|
||||
tests.RunMCPToolCallMethod(t, mcpMyFailToolWant, mcpSelect1Want)
|
||||
tests.RunExecuteSqlToolInvokeTest(t, createTableStatement, select1Want)
|
||||
tests.RunToolInvokeWithTemplateParameters(t, tableNameTemplateParam)
|
||||
|
||||
// Run specific Postgres tool tests
|
||||
runPostgresListTablesTest(t, tableNameParam, tableNameAuth)
|
||||
}
|
||||
|
||||
func runPostgresListTablesTest(t *testing.T, tableNameParam, tableNameAuth string) {
|
||||
// TableNameParam columns to construct want
|
||||
paramTableColumns := fmt.Sprintf(`[
|
||||
{"data_type": "integer", "column_name": "id", "column_default": "nextval('%s_id_seq'::regclass)", "is_not_nullable": true, "ordinal_position": 1, "column_comment": null},
|
||||
{"data_type": "text", "column_name": "name", "column_default": null, "is_not_nullable": false, "ordinal_position": 2, "column_comment": null}
|
||||
]`, tableNameParam)
|
||||
|
||||
// TableNameAuth columns to construct want
|
||||
authTableColumns := fmt.Sprintf(`[
|
||||
{"data_type": "integer", "column_name": "id", "column_default": "nextval('%s_id_seq'::regclass)", "is_not_nullable": true, "ordinal_position": 1, "column_comment": null},
|
||||
{"data_type": "text", "column_name": "name", "column_default": null, "is_not_nullable": false, "ordinal_position": 2, "column_comment": null},
|
||||
{"data_type": "text", "column_name": "email", "column_default": null, "is_not_nullable": false, "ordinal_position": 3, "column_comment": null}
|
||||
]`, tableNameAuth)
|
||||
|
||||
const (
|
||||
// Template to construct detailed output want
|
||||
detailedObjectTemplate = `{
|
||||
"object_name": "%[1]s", "schema_name": "public",
|
||||
"object_details": {
|
||||
"owner": "%[3]s", "comment": null,
|
||||
"indexes": [{"is_primary": true, "is_unique": true, "index_name": "%[1]s_pkey", "index_method": "btree", "index_columns": ["id"], "index_definition": "CREATE UNIQUE INDEX %[1]s_pkey ON public.%[1]s USING btree (id)"}],
|
||||
"triggers": [], "columns": %[2]s, "object_name": "%[1]s", "object_type": "TABLE", "schema_name": "public",
|
||||
"constraints": [{"constraint_name": "%[1]s_pkey", "constraint_type": "PRIMARY KEY", "constraint_columns": ["id"], "constraint_definition": "PRIMARY KEY (id)", "foreign_key_referenced_table": null, "foreign_key_referenced_columns": null}]
|
||||
}
|
||||
}`
|
||||
|
||||
// Template to construct simple output want
|
||||
simpleObjectTemplate = `{"object_name":"%s", "schema_name":"public", "object_details":{"name":"%s"}}`
|
||||
)
|
||||
|
||||
// Helper to build json for detailed want
|
||||
getDetailedWant := func(tableName, columnJSON string) string {
|
||||
return fmt.Sprintf(detailedObjectTemplate, tableName, columnJSON, PostgresUser)
|
||||
}
|
||||
|
||||
// Helper to build template for simple want
|
||||
getSimpleWant := func(tableName string) string {
|
||||
return fmt.Sprintf(simpleObjectTemplate, tableName, tableName)
|
||||
}
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
api string
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "invoke list_tables detailed output",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"table_names": "%s"}`,tableNameAuth))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: fmt.Sprintf("[%s]", getDetailedWant(tableNameAuth, authTableColumns)),
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables simple output",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"table_names": "%s", "output_format": "simple"}`, tableNameAuth))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: fmt.Sprintf("[%s]", getSimpleWant(tableNameAuth)),
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with invalid output format",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"table_names": "", "output_format": "abcd"}`)),
|
||||
wantStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with malformed table_names parameter",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"table_names": 12345, "output_format": "detailed"}`)),
|
||||
wantStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with multiple table names",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"table_names": "%s,%s"}`, tableNameParam, tableNameAuth))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: fmt.Sprintf("[%s,%s]", getDetailedWant(tableNameAuth, authTableColumns), getDetailedWant(tableNameParam, paramTableColumns)),
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with non-existent table",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"table_names": "non_existent_table"}`)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: `null`,
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with one existing and one non-existent table",
|
||||
api: "http://127.0.0.1:5000/api/tool/list_tables/invoke",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"table_names": "%s,non_existent_table"}`, tableNameParam))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: fmt.Sprintf("[%s]", getDetailedWant(tableNameParam, paramTableColumns)),
|
||||
},
|
||||
}
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, tc.api, tc.requestBody)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create request: %s", err)
|
||||
}
|
||||
req.Header.Add("Content-type", "application/json")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send request: %s", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("response status code is not 200, got %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
if tc.wantStatusCode == http.StatusOK {
|
||||
var bodyWrapper map[string]json.RawMessage
|
||||
respBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading response body: %s", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(respBytes, &bodyWrapper); err != nil {
|
||||
t.Fatalf("error parsing response wrapper: %s, body: %s", err, string(respBytes))
|
||||
}
|
||||
|
||||
resultJSON, ok := bodyWrapper["result"]
|
||||
if !ok {
|
||||
t.Fatal("unable to find 'result' in response body")
|
||||
}
|
||||
|
||||
var resultString string
|
||||
if err := json.Unmarshal(resultJSON, &resultString); err != nil {
|
||||
t.Fatalf("'result' is not a JSON-encoded string: %s", err)
|
||||
}
|
||||
|
||||
var got, want []any
|
||||
|
||||
if err := json.Unmarshal([]byte(resultString), &got); err != nil {
|
||||
t.Fatalf("failed to unmarshal actual result string: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal([]byte(tc.want), &want); err != nil {
|
||||
t.Fatalf("failed to unmarshal expected want string: %v", err)
|
||||
}
|
||||
|
||||
sort.SliceStable(got, func(i, j int) bool {
|
||||
return fmt.Sprintf("%v", got[i]) < fmt.Sprintf("%v", got[j])
|
||||
})
|
||||
sort.SliceStable(want, func(i, j int) bool {
|
||||
return fmt.Sprintf("%v", want[i]) < fmt.Sprintf("%v", want[j])
|
||||
})
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Unexpected result: got %#v, want: %#v", got, want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -273,6 +273,7 @@ func RunToolInvokeTest(t *testing.T, select1Want string, options ...InvokeTestOp
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
// Test tool invoke endpoint
|
||||
invokeTcs := []struct {
|
||||
@@ -841,6 +842,7 @@ func RunMCPToolCallMethod(t *testing.T, myFailToolWant, select1Want string, opti
|
||||
if err != nil {
|
||||
t.Fatalf("error getting access token from ADC: %s", err)
|
||||
}
|
||||
accessToken = "Bearer " + accessToken
|
||||
|
||||
idToken, err := GetGoogleIdToken(ClientId)
|
||||
if err != nil {
|
||||
|
||||
157
tests/yugabytedb/yugabytedb_integration_test.go
Normal file
157
tests/yugabytedb/yugabytedb_integration_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yugabytedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/tests"
|
||||
"github.com/yugabyte/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
var (
|
||||
YBDB_SOURCE_KIND = "yugabytedb"
|
||||
YBDB_TOOL_KIND = "yugabytedb-sql"
|
||||
YBDB_DATABASE = os.Getenv("YUGABYTEDB_DATABASE")
|
||||
YBDB_HOST = os.Getenv("YUGABYTEDB_HOST")
|
||||
YBDB_PORT = os.Getenv("YUGABYTEDB_PORT")
|
||||
YBDB_USER = os.Getenv("YUGABYTEDB_USER")
|
||||
YBDB_PASS = os.Getenv("YUGABYTEDB_PASS")
|
||||
YBDB_LB = os.Getenv("YUGABYTEDB_LOADBALANCE")
|
||||
)
|
||||
|
||||
func getYBVars(t *testing.T) map[string]any {
|
||||
switch "" {
|
||||
case YBDB_DATABASE:
|
||||
t.Fatal("'YUGABYTEDB_DATABASE' not set")
|
||||
case YBDB_HOST:
|
||||
t.Fatal("'YUGABYTEDB_HOST' not set")
|
||||
case YBDB_PORT:
|
||||
t.Fatal("'YUGABYTEDB_PORT' not set")
|
||||
case YBDB_USER:
|
||||
t.Fatal("'YUGABYTEDB_USER' not set")
|
||||
case YBDB_PASS:
|
||||
t.Fatal("'YUGABYTEDB_PASS' not set")
|
||||
case YBDB_LB:
|
||||
fmt.Printf("YUGABYTEDB_LOADBALANCE value not set. Setting default value: false")
|
||||
YBDB_LB = "false"
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"kind": YBDB_SOURCE_KIND,
|
||||
"host": YBDB_HOST,
|
||||
"port": YBDB_PORT,
|
||||
"database": YBDB_DATABASE,
|
||||
"user": YBDB_USER,
|
||||
"password": YBDB_PASS,
|
||||
"loadBalance": YBDB_LB,
|
||||
}
|
||||
}
|
||||
|
||||
func initYBConnectionPool(host, port, user, pass, dbname, loadBalance string) (*pgxpool.Pool, error) {
|
||||
dsn := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?load_balance=%s", user, pass, host, port, dbname, loadBalance)
|
||||
pool, err := pgxpool.New(context.Background(), dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create YugabyteDB connection pool: %w", err)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// SetupYugabyteDBSQLTable creates and inserts data into a table of tool
|
||||
// compatible with yugabytedb-sql tool
|
||||
func SetupYugabyteDBSQLTable(t *testing.T, ctx context.Context, pool *pgxpool.Pool, create_statement, insert_statement, tableName string, params []any) func(*testing.T) {
|
||||
err := pool.Ping(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to connect to test database: %s", err)
|
||||
}
|
||||
|
||||
// Create table
|
||||
_, err = pool.Query(ctx, create_statement)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test table %s: %s", tableName, err)
|
||||
}
|
||||
|
||||
// Insert test data
|
||||
_, err = pool.Query(ctx, insert_statement, params...)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to insert test data: %s", err)
|
||||
}
|
||||
|
||||
return func(t *testing.T) {
|
||||
// tear down test
|
||||
_, err = pool.Exec(ctx, fmt.Sprintf("DROP TABLE %s;", tableName))
|
||||
if err != nil {
|
||||
t.Errorf("Teardown failed: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestYugabyteDB(t *testing.T) {
|
||||
sourceConfig := getYBVars(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
var args []string
|
||||
|
||||
pool, err := initYBConnectionPool(YBDB_HOST, YBDB_PORT, YBDB_USER, YBDB_PASS, YBDB_DATABASE, YBDB_LB)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create YugabyteDB connection pool: %s", err)
|
||||
}
|
||||
|
||||
tableNameParam := "param_table_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
tableNameAuth := "auth_table_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
tableNameTemplateParam := "template_param_table_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
|
||||
createParamTableStmt, insertParamTableStmt, paramToolStmt, idParamToolStmt, nameParamToolStmt, arrayToolStmt, paramTestParams := tests.GetPostgresSQLParamToolInfo(tableNameParam)
|
||||
teardownTable1 := SetupYugabyteDBSQLTable(t, ctx, pool, createParamTableStmt, insertParamTableStmt, tableNameParam, paramTestParams)
|
||||
defer teardownTable1(t)
|
||||
|
||||
createAuthTableStmt, insertAuthTableStmt, authToolStmt, authTestParams := tests.GetPostgresSQLAuthToolInfo(tableNameAuth)
|
||||
teardownTable2 := SetupYugabyteDBSQLTable(t, ctx, pool, createAuthTableStmt, insertAuthTableStmt, tableNameAuth, authTestParams)
|
||||
defer teardownTable2(t)
|
||||
|
||||
toolsFile := tests.GetToolsConfig(sourceConfig, YBDB_TOOL_KIND, paramToolStmt, idParamToolStmt, nameParamToolStmt, arrayToolStmt, authToolStmt)
|
||||
tmplSelectCombined, tmplSelectFilterCombined := tests.GetPostgresSQLTmplToolStatement()
|
||||
toolsFile = tests.AddTemplateParamConfig(t, toolsFile, YBDB_TOOL_KIND, tmplSelectCombined, tmplSelectFilterCombined, "")
|
||||
|
||||
cmd, cleanup, err := tests.StartCmd(ctx, toolsFile, args...)
|
||||
if err != nil {
|
||||
t.Fatalf("command initialization returned an error: %s", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
out, err := testutils.WaitForString(waitCtx, regexp.MustCompile(`Server ready to serve`), cmd.Out)
|
||||
if err != nil {
|
||||
t.Logf("toolbox command logs: \n%s", out)
|
||||
t.Fatalf("toolbox didn't start successfully: %s", err)
|
||||
}
|
||||
|
||||
select1Want, mcpMyFailToolWant, _, mcpSelect1Want := tests.GetPostgresWants()
|
||||
|
||||
tests.RunToolGetTest(t)
|
||||
tests.RunToolInvokeTest(t, select1Want)
|
||||
tests.RunMCPToolCallMethod(t, mcpMyFailToolWant, mcpSelect1Want)
|
||||
tests.RunToolInvokeWithTemplateParameters(t, tableNameTemplateParam)
|
||||
}
|
||||
Reference in New Issue
Block a user