mirror of
https://github.com/googleapis/genai-toolbox.git
synced 2026-01-11 16:38:15 -05:00
Compare commits
53 Commits
cloud-sql-
...
cloud-sql-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e39f566f0 | ||
|
|
e6a6c615d5 | ||
|
|
a1bc04477b | ||
|
|
b17652309d | ||
|
|
15b628d2d2 | ||
|
|
d30249961b | ||
|
|
677254e6d9 | ||
|
|
f2d9e3b579 | ||
|
|
da246610e1 | ||
|
|
fd00fef5b7 | ||
|
|
e4957af011 | ||
|
|
aa5486b8b2 | ||
|
|
61b9041f1a | ||
|
|
4babc4e11b | ||
|
|
2036c8efd2 | ||
|
|
fdea0d1555 | ||
|
|
22287c4e53 | ||
|
|
971001400f | ||
|
|
56b6574fc2 | ||
|
|
7d384dc28f | ||
|
|
60b26608dd | ||
|
|
681c2b4f3a | ||
|
|
caba2ef829 | ||
|
|
2fda200066 | ||
|
|
fe2999a691 | ||
|
|
3a6b51752f | ||
|
|
77919c7d8e | ||
|
|
01712284b4 | ||
|
|
c181dabc91 | ||
|
|
93c1b30fce | ||
|
|
3a8a65ceaa | ||
|
|
236be89961 | ||
|
|
3aef2bb7be | ||
|
|
ce736defb0 | ||
|
|
02aa376df8 | ||
|
|
7f97442045 | ||
|
|
c761dfa5aa | ||
|
|
38eae15e16 | ||
|
|
d4a9eb0ce2 | ||
|
|
5530b08f34 | ||
|
|
e515d9254f | ||
|
|
d661f5343f | ||
|
|
6c8460b0e5 | ||
|
|
71f360d315 | ||
|
|
33beb7187d | ||
|
|
4f46782927 | ||
|
|
bf6831fdbe | ||
|
|
bd195d2fe2 | ||
|
|
ec0d3a6eb3 | ||
|
|
81d239b053 | ||
|
|
0cd3f16f87 | ||
|
|
aa3972470f | ||
|
|
36d79ef147 |
@@ -62,6 +62,28 @@ steps:
|
||||
postgressql \
|
||||
postgresexecutesql
|
||||
|
||||
- id: "alloydb"
|
||||
name: golang:1
|
||||
waitFor: ["compile-test-binary"]
|
||||
entrypoint: /bin/bash
|
||||
env:
|
||||
- "GOPATH=/gopath"
|
||||
- "ALLOYDB_PROJECT=$PROJECT_ID"
|
||||
- "ALLOYDB_CLUSTER=$_ALLOYDB_POSTGRES_CLUSTER"
|
||||
- "ALLOYDB_INSTANCE=$_ALLOYDB_POSTGRES_INSTANCE"
|
||||
- "ALLOYDB_REGION=$_REGION"
|
||||
secretEnv: ["ALLOYDB_POSTGRES_USER"]
|
||||
volumes:
|
||||
- name: "go"
|
||||
path: "/gopath"
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
.ci/test_with_coverage.sh \
|
||||
"AlloyDB" \
|
||||
alloydb \
|
||||
alloydb
|
||||
|
||||
- id: "alloydb-pg"
|
||||
name: golang:1
|
||||
waitFor: ["compile-test-binary"]
|
||||
|
||||
4
.github/labels.yaml
vendored
4
.github/labels.yaml
vendored
@@ -88,6 +88,10 @@
|
||||
color: 8befd7
|
||||
description: 'Status: reviewer is awaiting feedback or responses from the author before proceeding.'
|
||||
|
||||
- name: 'release candidate'
|
||||
color: 32CD32
|
||||
description: 'Use label to signal PR should be included in the next release.'
|
||||
|
||||
# Product Labels
|
||||
- name: 'product: bigquery'
|
||||
color: 5065c7
|
||||
|
||||
1
.github/release-please.yml
vendored
1
.github/release-please.yml
vendored
@@ -36,4 +36,5 @@ extraFiles: [
|
||||
"docs/en/how-to/connect-ide/mssql_mcp.md",
|
||||
"docs/en/how-to/connect-ide/postgres_mcp.md",
|
||||
"docs/en/how-to/connect-ide/neo4j_mcp.md",
|
||||
"docs/en/how-to/connect-ide/sqlite_mcp.md",
|
||||
]
|
||||
|
||||
@@ -89,6 +89,9 @@ implementation](https://github.com/googleapis/genai-toolbox/blob/main/internal/s
|
||||
|
||||
### Adding a New Tool
|
||||
|
||||
> [!NOTE]
|
||||
> Please follow the tool naming convention detailed [here](./DEVELOPER.md#tool-naming-conventions).
|
||||
|
||||
We recommend looking at an [example tool
|
||||
implementation](https://github.com/googleapis/genai-toolbox/tree/main/internal/tools/postgres/postgressql).
|
||||
|
||||
|
||||
41
DEVELOPER.md
41
DEVELOPER.md
@@ -44,6 +44,47 @@ Before you begin, ensure you have the following:
|
||||
curl http://127.0.0.1:5000
|
||||
```
|
||||
|
||||
### Tool Naming Conventions
|
||||
|
||||
This section details the purpose and conventions for MCP Toolbox's tools naming
|
||||
properties, **tool name** and **tool kind**.
|
||||
|
||||
```
|
||||
cancel_hotel: <- tool name
|
||||
kind: postgres-sql <- tool kind
|
||||
source: my_pg_source
|
||||
```
|
||||
|
||||
#### Tool Name
|
||||
|
||||
Tool name is the identifier used by a Large Language Model (LLM) to invoke a
|
||||
specific tool.
|
||||
* Custom tools: The user can define any name they want. The below guidelines
|
||||
do not apply.
|
||||
* Pre-built tools: The tool name is predefined and cannot be changed. It
|
||||
should follow the guidelines.
|
||||
|
||||
The following guidelines apply to tool names:
|
||||
* Should use underscores over hyphens (e.g., `list_collections` instead of
|
||||
`list-collections`).
|
||||
* Should not have the product name in the name (e.g., `list_collections` instead
|
||||
of `firestore_list_collections`).
|
||||
* Superficial changes are NOT considered as breaking (e.g., changing tool name).
|
||||
* Non-superficial changes MAY be considered breaking (e.g. adding new parameters
|
||||
to a function) until they can be validated through extensive testing to ensure
|
||||
they do not negatively impact agent's performances.
|
||||
|
||||
#### Tool Kind
|
||||
|
||||
Tool kind serves as a category or type that a user can assign to a tool.
|
||||
|
||||
The following guidelines apply to tool kinds:
|
||||
* Should user hyphens over underscores (e.g. `firestore-list-collections` or
|
||||
`firestore_list_colelctions`).
|
||||
* Should use product name in name (e.g. `firestore-list-collections` over
|
||||
`list-collections`).
|
||||
* Changes to tool kind are breaking changes and should be avoided.
|
||||
|
||||
## Testing
|
||||
|
||||
### Infrastructure
|
||||
|
||||
26
cmd/root.go
26
cmd/root.go
@@ -42,7 +42,14 @@ import (
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
|
||||
// Import tool packages for side effect of registration
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydbgetcluster"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydbgetinstance"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydbgetuser"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydblistclusters"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydblistinstances"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydblistusers"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/alloydbainl"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryanalyzecontribution"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryconversationalanalytics"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryforecast"
|
||||
@@ -53,8 +60,18 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigquerysql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/bigtable"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhouseexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhouselistdatabases"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/clickhouse/clickhousesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudmonitoring"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsql/cloudsqlcreatedatabase"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsql/cloudsqlcreateusers"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsql/cloudsqlgetinstances"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsql/cloudsqllistdatabases"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsql/cloudsqllistinstances"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsql/cloudsqlwaitforoperation"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsqlmssql/cloudsqlmssqlcreateinstance"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsqlmysql/cloudsqlmysqlcreateinstance"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/cloudsqlpg/cloudsqlpgcreateinstances"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/couchbase"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/dataplex/dataplexlookupentry"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/dataplex/dataplexsearchaspecttypes"
|
||||
@@ -99,6 +116,7 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mssql/mssqlexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mssql/mssqlsql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqllisttables"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlsql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jcypher"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/neo4j/neo4jexecutecypher"
|
||||
@@ -106,11 +124,14 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/oceanbase/oceanbaseexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/oceanbase/oceanbasesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgresexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslisttables"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgressql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/redis"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/spanner/spannerexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/spanner/spannerlisttables"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/spanner/spannersql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/sqlitesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/sqlite/sqliteexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/sqlite/sqlitesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/tidb/tidbexecutesql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/tidb/tidbsql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/trino/trinoexecutesql"
|
||||
@@ -122,10 +143,13 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/alloydbpg"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/bigquery"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/bigtable"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/clickhouse"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudmonitoring"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqladmin"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmssql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmysql"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
|
||||
|
||||
116
cmd/root_test.go
116
cmd/root_test.go
@@ -1234,8 +1234,11 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
bigquery_config, _ := prebuiltconfigs.Get("bigquery")
|
||||
clickhouse_config, _ := prebuiltconfigs.Get("clickhouse")
|
||||
cloudsqlpg_config, _ := prebuiltconfigs.Get("cloud-sql-postgres")
|
||||
cloudsqlpg_admin_config, _ := prebuiltconfigs.Get("cloud-sql-postgres-admin")
|
||||
cloudsqlmysql_config, _ := prebuiltconfigs.Get("cloud-sql-mysql")
|
||||
cloudsqlmysql_admin_config, _ := prebuiltconfigs.Get("cloud-sql-mysql-admin")
|
||||
cloudsqlmssql_config, _ := prebuiltconfigs.Get("cloud-sql-mssql")
|
||||
cloudsqlmssql_admin_config, _ := prebuiltconfigs.Get("cloud-sql-mssql-admin")
|
||||
dataplex_config, _ := prebuiltconfigs.Get("dataplex")
|
||||
firestoreconfig, _ := prebuiltconfigs.Get("firestore")
|
||||
mysql_config, _ := prebuiltconfigs.Get("mysql")
|
||||
@@ -1244,6 +1247,7 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
postgresconfig, _ := prebuiltconfigs.Get("postgres")
|
||||
spanner_config, _ := prebuiltconfigs.Get("spanner")
|
||||
spannerpg_config, _ := prebuiltconfigs.Get("spanner-postgres")
|
||||
sqlite_config, _ := prebuiltconfigs.Get("sqlite")
|
||||
neo4jconfig, _ := prebuiltconfigs.Get("neo4j")
|
||||
|
||||
// Set environment variables
|
||||
@@ -1319,6 +1323,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
t.Setenv("LOOKER_CLIENT_SECRET", "your_looker_client_secret")
|
||||
t.Setenv("LOOKER_VERIFY_SSL", "true")
|
||||
|
||||
t.Setenv("SQLITE_DATABASE", "test.db")
|
||||
|
||||
t.Setenv("NEO4J_URI", "bolt://localhost:7687")
|
||||
t.Setenv("NEO4J_DATABASE", "neo4j")
|
||||
t.Setenv("NEO4J_USERNAME", "your_neo4j_user")
|
||||
@@ -1337,9 +1343,39 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "alloydb postgres admin prebuilt tools",
|
||||
in: alloydb_admin_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"alloydb-postgres-admin-tools": tools.ToolsetConfig{
|
||||
Name: "alloydb-postgres-admin-tools",
|
||||
ToolNames: []string{"alloydb-create-cluster", "alloydb-operations-get", "alloydb-create-instance", "alloydb-list-clusters", "alloydb-list-instances", "alloydb-list-users", "alloydb-create-user"},
|
||||
"alloydb_postgres_admin_tools": tools.ToolsetConfig{
|
||||
Name: "alloydb_postgres_admin_tools",
|
||||
ToolNames: []string{"create_cluster", "wait_for_operation", "create_instance", "list_clusters", "list_instances", "list_users", "create_user", "get_cluster", "get_instance", "get_user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cloudsql pg admin prebuilt tools",
|
||||
in: cloudsqlpg_admin_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud_sql_postgres_admin_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_postgres_admin_tools",
|
||||
ToolNames: []string{"create_instance", "get_instance", "list_instances", "create_database", "list_databases", "create_user", "wait_for_operation"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cloudsql mysql admin prebuilt tools",
|
||||
in: cloudsqlmysql_admin_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud_sql_mysql_admin_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_mysql_admin_tools",
|
||||
ToolNames: []string{"create_instance", "get_instance", "list_instances", "create_database", "list_databases", "create_user", "wait_for_operation"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cloudsql mssql admin prebuilt tools",
|
||||
in: cloudsqlmssql_admin_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud_sql_mssql_admin_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_mssql_admin_tools",
|
||||
ToolNames: []string{"create_instance", "get_instance", "list_instances", "create_database", "list_databases", "create_user", "wait_for_operation"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1347,8 +1383,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "alloydb prebuilt tools",
|
||||
in: alloydb_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"alloydb-postgres-database-tools": tools.ToolsetConfig{
|
||||
Name: "alloydb-postgres-database-tools",
|
||||
"alloydb_postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "alloydb_postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1357,9 +1393,9 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "bigquery prebuilt tools",
|
||||
in: bigquery_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"bigquery-database-tools": tools.ToolsetConfig{
|
||||
Name: "bigquery-database-tools",
|
||||
ToolNames: []string{"ask_data_insights", "execute_sql", "forecast", "get_dataset_info", "get_table_info", "list_dataset_ids", "list_table_ids"},
|
||||
"bigquery_database_tools": tools.ToolsetConfig{
|
||||
Name: "bigquery_database_tools",
|
||||
ToolNames: []string{"analyze_contribution", "ask_data_insights", "execute_sql", "forecast", "get_dataset_info", "get_table_info", "list_dataset_ids", "list_table_ids"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1367,9 +1403,9 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "clickhouse prebuilt tools",
|
||||
in: clickhouse_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"clickhouse-database-tools": tools.ToolsetConfig{
|
||||
Name: "clickhouse-database-tools",
|
||||
ToolNames: []string{"execute_sql"},
|
||||
"clickhouse_database_tools": tools.ToolsetConfig{
|
||||
Name: "clickhouse_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_databases"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1377,8 +1413,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "cloudsqlpg prebuilt tools",
|
||||
in: cloudsqlpg_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud-sql-postgres-database-tools": tools.ToolsetConfig{
|
||||
Name: "cloud-sql-postgres-database-tools",
|
||||
"cloud_sql_postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1387,8 +1423,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "cloudsqlmysql prebuilt tools",
|
||||
in: cloudsqlmysql_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud-sql-mysql-database-tools": tools.ToolsetConfig{
|
||||
Name: "cloud-sql-mysql-database-tools",
|
||||
"cloud_sql_mysql_database_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_mysql_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1397,8 +1433,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "cloudsqlmssql prebuilt tools",
|
||||
in: cloudsqlmssql_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud-sql-mssql-database-tools": tools.ToolsetConfig{
|
||||
Name: "cloud-sql-mssql-database-tools",
|
||||
"cloud_sql_mssql_database_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_mssql_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1407,9 +1443,9 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "dataplex prebuilt tools",
|
||||
in: dataplex_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"dataplex-tools": tools.ToolsetConfig{
|
||||
Name: "dataplex-tools",
|
||||
ToolNames: []string{"dataplex_search_entries", "dataplex_lookup_entry", "dataplex_search_aspect_types"},
|
||||
"dataplex_tools": tools.ToolsetConfig{
|
||||
Name: "dataplex_tools",
|
||||
ToolNames: []string{"search_entries", "lookup_entry", "search_aspect_types"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1417,9 +1453,9 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "firestore prebuilt tools",
|
||||
in: firestoreconfig,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"firestore-database-tools": tools.ToolsetConfig{
|
||||
Name: "firestore-database-tools",
|
||||
ToolNames: []string{"firestore-get-documents", "firestore-add-documents", "firestore-update-document", "firestore-list-collections", "firestore-delete-documents", "firestore-query-collection", "firestore-get-rules", "firestore-validate-rules"},
|
||||
"firestore_database_tools": tools.ToolsetConfig{
|
||||
Name: "firestore_database_tools",
|
||||
ToolNames: []string{"get_documents", "add_documents", "update_document", "list_collections", "delete_documents", "query_collection", "get_rules", "validate_rules"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1427,8 +1463,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "mysql prebuilt tools",
|
||||
in: mysql_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"mysql-database-tools": tools.ToolsetConfig{
|
||||
Name: "mysql-database-tools",
|
||||
"mysql_database_tools": tools.ToolsetConfig{
|
||||
Name: "mysql_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1437,8 +1473,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "mssql prebuilt tools",
|
||||
in: mssql_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"mssql-database-tools": tools.ToolsetConfig{
|
||||
Name: "mssql-database-tools",
|
||||
"mssql_database_tools": tools.ToolsetConfig{
|
||||
Name: "mssql_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1447,8 +1483,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "looker prebuilt tools",
|
||||
in: looker_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"looker-tools": tools.ToolsetConfig{
|
||||
Name: "looker-tools",
|
||||
"looker_tools": tools.ToolsetConfig{
|
||||
Name: "looker_tools",
|
||||
ToolNames: []string{"get_models", "get_explores", "get_dimensions", "get_measures", "get_filters", "get_parameters", "query", "query_sql", "query_url", "get_looks", "run_look", "make_look", "get_dashboards", "make_dashboard", "add_dashboard_element"},
|
||||
},
|
||||
},
|
||||
@@ -1457,8 +1493,8 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "postgres prebuilt tools",
|
||||
in: postgresconfig,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"postgres-database-tools": tools.ToolsetConfig{
|
||||
Name: "postgres-database-tools",
|
||||
"postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
@@ -1477,18 +1513,28 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
name: "spanner pg prebuilt tools",
|
||||
in: spannerpg_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"spanner-postgres-database-tools": tools.ToolsetConfig{
|
||||
Name: "spanner-postgres-database-tools",
|
||||
"spanner_postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "spanner_postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "execute_sql_dql", "list_tables"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sqlite prebuilt tools",
|
||||
in: sqlite_config,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"sqlite_database_tools": tools.ToolsetConfig{
|
||||
Name: "sqlite_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "neo4j prebuilt tools",
|
||||
in: neo4jconfig,
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"neo4j-database-tools": tools.ToolsetConfig{
|
||||
Name: "neo4j-database-tools",
|
||||
"neo4j_database_tools": tools.ToolsetConfig{
|
||||
Name: "neo4j_database_tools",
|
||||
ToolNames: []string{"execute_cypher", "get_schema"},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -17,6 +17,11 @@ This guide assumes you have already done the following:
|
||||
your preferred virtual environment tool for managing dependencies e.g. [venv][install-venv]).
|
||||
1. Installed [PostgreSQL 16+ and the `psql` client][install-postgres].
|
||||
|
||||
[install-python]: https://wiki.python.org/moin/BeginnersGuide/Download
|
||||
[install-pip]: https://pip.pypa.io/en/stable/installation/
|
||||
[install-venv]: https://packaging.python.org/en/latest/tutorials/installing-packages/#creating-virtual-environments
|
||||
[install-postgres]: https://www.postgresql.org/download/
|
||||
|
||||
### Cloud Setup (Optional)
|
||||
{{< regionInclude "quickstart/shared/cloud_setup.md" "cloud_setup" >}}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.3.0
|
||||
google.golang.org/genai v1.21.0
|
||||
google.golang.org/genai v1.24.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
255
docs/en/how-to/connect-ide/sqlite_mcp.md
Normal file
255
docs/en/how-to/connect-ide/sqlite_mcp.md
Normal file
@@ -0,0 +1,255 @@
|
||||
---
|
||||
title: SQLite using MCP
|
||||
type: docs
|
||||
weight: 2
|
||||
description: "Connect your IDE to SQLite using Toolbox."
|
||||
---
|
||||
|
||||
[Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) is an open protocol for connecting Large Language Models (LLMs) to data sources like SQLite. This guide covers how to use [MCP Toolbox for Databases][toolbox] to expose your developer assistant tools to a SQLite instance:
|
||||
|
||||
* [Cursor][cursor]
|
||||
* [Windsurf][windsurf] (Codium)
|
||||
* [Visual Studio Code][vscode] (Copilot)
|
||||
* [Cline][cline] (VS Code extension)
|
||||
* [Claude desktop][claudedesktop]
|
||||
* [Claude code][claudecode]
|
||||
* [Gemini CLI][geminicli]
|
||||
* [Gemini Code Assist][geminicodeassist]
|
||||
|
||||
[toolbox]: https://github.com/googleapis/genai-toolbox
|
||||
[cursor]: #configure-your-mcp-client
|
||||
[windsurf]: #configure-your-mcp-client
|
||||
[vscode]: #configure-your-mcp-client
|
||||
[cline]: #configure-your-mcp-client
|
||||
[claudedesktop]: #configure-your-mcp-client
|
||||
[claudecode]: #configure-your-mcp-client
|
||||
[geminicli]: #configure-your-mcp-client
|
||||
[geminicodeassist]: #configure-your-mcp-client
|
||||
|
||||
## Set up the database
|
||||
|
||||
1. [Create or select a SQLite database file.](https://www.sqlite.org/download.html)
|
||||
|
||||
## Install MCP Toolbox
|
||||
|
||||
1. Download the latest version of Toolbox as a binary. Select the [correct binary](https://github.com/googleapis/genai-toolbox/releases) corresponding to your OS and CPU architecture. You are required to use Toolbox version V0.10.0+:
|
||||
|
||||
<!-- {x-release-please-start-version} -->
|
||||
{{< tabpane persist=header >}}
|
||||
{{< tab header="linux/amd64" lang="bash" >}}
|
||||
curl -O https://storage.googleapis.com/genai-toolbox/v1.0.0/linux/amd64/toolbox
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab header="darwin/arm64" lang="bash" >}}
|
||||
curl -O https://storage.googleapis.com/genai-toolbox/v1.0.0/darwin/arm64/toolbox
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab header="darwin/amd64" lang="bash" >}}
|
||||
curl -O https://storage.googleapis.com/genai-toolbox/v1.0.0/darwin/amd64/toolbox
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab header="windows/amd64" lang="bash" >}}
|
||||
curl -O https://storage.googleapis.com/genai-toolbox/v1.0.0/windows/amd64/toolbox.exe
|
||||
{{< /tab >}}
|
||||
{{< /tabpane >}}
|
||||
<!-- {x-release-please-end} -->
|
||||
|
||||
1. Make the binary executable:
|
||||
|
||||
```bash
|
||||
chmod +x toolbox
|
||||
```
|
||||
|
||||
1. Verify the installation:
|
||||
|
||||
```bash
|
||||
./toolbox --version
|
||||
```
|
||||
|
||||
## Configure your MCP Client
|
||||
|
||||
{{< tabpane text=true >}}
|
||||
{{% tab header="Claude code" lang="en" %}}
|
||||
|
||||
1. Install [Claude Code](https://docs.anthropic.com/en/docs/agents-and-tools/claude-code/overview).
|
||||
1. Create a `.mcp.json` file in your project root if it doesn't exist.
|
||||
1. Add the following configuration, replace the environment variables with your values, and save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt", "sqlite", "--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Restart Claude code to apply the new configuration.
|
||||
{{% /tab %}}
|
||||
{{% tab header="Claude desktop" lang="en" %}}
|
||||
|
||||
1. Open [Claude desktop](https://claude.ai/download) and navigate to Settings.
|
||||
1. Under the Developer tab, tap Edit Config to open the configuration file.
|
||||
1. Add the following configuration, replace the environment variables with your values, and save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt", "sqlite", "--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Restart Claude desktop.
|
||||
1. From the new chat screen, you should see a hammer (MCP) icon appear with the new MCP server available.
|
||||
{{% /tab %}}
|
||||
{{% tab header="Cline" lang="en" %}}
|
||||
|
||||
1. Open the [Cline](https://github.com/cline/cline) extension in VS Code and tap the **MCP Servers** icon.
|
||||
1. Tap Configure MCP Servers to open the configuration file.
|
||||
1. Add the following configuration, replace the environment variables with your values, and save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt", "sqlite", "--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. You should see a green active status after the server is successfully connected.
|
||||
{{% /tab %}}
|
||||
{{% tab header="Cursor" lang="en" %}}
|
||||
|
||||
1. Create a `.cursor` directory in your project root if it doesn't exist.
|
||||
1. Create a `.cursor/mcp.json` file if it doesn't exist and open it.
|
||||
1. Add the following configuration, replace the environment variables with your values, and save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt", "sqlite", "--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Open [Cursor](https://www.cursor.com/) and navigate to **Settings > Cursor Settings > MCP**. You should see a green active status after the server is successfully connected.
|
||||
{{% /tab %}}
|
||||
{{% tab header="Visual Studio Code (Copilot)" lang="en" %}}
|
||||
|
||||
1. Open [VS Code](https://code.visualstudio.com/docs/copilot/overview) and create a `.vscode` directory in your project root if it doesn't exist.
|
||||
1. Create a `.vscode/mcp.json` file if it doesn't exist and open it.
|
||||
1. Add the following configuration, replace the environment variables with your values, and save:
|
||||
|
||||
```json
|
||||
{
|
||||
"servers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt","sqlite","--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /tab %}}
|
||||
{{% tab header="Windsurf" lang="en" %}}
|
||||
|
||||
1. Open [Windsurf](https://docs.codeium.com/windsurf) and navigate to the Cascade assistant.
|
||||
1. Tap on the hammer (MCP) icon, then Configure to open the configuration file.
|
||||
1. Add the following configuration, replace the environment variables with your values, and save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt","sqlite","--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /tab %}}
|
||||
{{% tab header="Gemini CLI" lang="en" %}}
|
||||
|
||||
1. Install the [Gemini CLI](https://github.com/google-gemini/gemini-cli?tab=readme-ov-file#quickstart).
|
||||
1. In your working directory, create a folder named `.gemini`. Within it, create a `settings.json` file.
|
||||
1. Add the following configuration, replace the environment variables with your values, and then save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt","sqlite","--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /tab %}}
|
||||
{{% tab header="Gemini Code Assist" lang="en" %}}
|
||||
|
||||
1. Install the [Gemini Code Assist](https://marketplace.visualstudio.com/items?itemName=Google.geminicodeassist) extension in Visual Studio Code.
|
||||
1. Enable Agent Mode in Gemini Code Assist chat.
|
||||
1. In your working directory, create a folder named `.gemini`. Within it, create a `settings.json` file.
|
||||
1. Add the following configuration, replace the environment variables with your values, and then save:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"sqlite": {
|
||||
"command": "./PATH/TO/toolbox",
|
||||
"args": ["--prebuilt","sqlite","--stdio"],
|
||||
"env": {
|
||||
"SQLITE_DATABASE": "./sample.db"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /tab %}}
|
||||
{{< /tabpane >}}
|
||||
|
||||
## Use Tools
|
||||
|
||||
Your AI tool is now connected to SQLite using MCP. Try asking your AI assistant to list tables, create a table, or define and execute other SQL statements.
|
||||
|
||||
The following tools are available to the LLM:
|
||||
|
||||
1. **list_tables**: lists tables and descriptions
|
||||
1. **execute_sql**: execute any SQL statement
|
||||
|
||||
{{< notice note >}}
|
||||
Prebuilt tools are pre-1.0, so expect some tool changes between versions. LLMs will adapt to the tools available, so this shouldn't affect most users.
|
||||
{{< /notice >}}
|
||||
@@ -57,6 +57,7 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* **BigQuery Data Editor** (`roles/bigquery.dataEditor`) to create or modify datasets and tables.
|
||||
* **Gemini for Google Cloud** (`roles/cloudaicompanion.user`) to use the conversational analytics API.
|
||||
* **Tools:**
|
||||
* `analyze_contribution`: Use this tool to perform contribution analysis, also called key driver analysis.
|
||||
* `ask_data_insights`: Use this tool to perform data analysis, get insights, or answer complex questions about the contents of specific BigQuery tables. For more information on required roles, API setup, and IAM configuration, see the setup and authentication section of the [Conversational Analytics API documentation](https://cloud.google.com/gemini/docs/conversational-analytics-api/overview).
|
||||
* `execute_sql`: Executes a SQL statement.
|
||||
* `forecast`: Use this tool to forecast time series data.
|
||||
@@ -75,6 +76,8 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `CLOUD_SQL_MYSQL_DATABASE`: The name of the database to connect to.
|
||||
* `CLOUD_SQL_MYSQL_USER`: The database username.
|
||||
* `CLOUD_SQL_MYSQL_PASSWORD`: The password for the database user.
|
||||
* `CLOUD_SQL_MYSQL_IP_TYPE`: The IP type i.e. "Public
|
||||
or "Private" (Default: Public).
|
||||
* **Permissions:**
|
||||
* **Cloud SQL Client** (`roles/cloudsql.client`) to connect to the instance.
|
||||
* Database-level permissions (e.g., `SELECT`, `INSERT`) are required to execute queries.
|
||||
@@ -267,6 +270,17 @@ See guides, [Connect from your IDE](../how-to/connect-ide/_index.md), for detail
|
||||
* `execute_sql_dql`: Executes a DQL SQL query using the PostgreSQL interface for Spanner.
|
||||
* `list_tables`: Lists tables in the database.
|
||||
|
||||
## SQLite
|
||||
|
||||
* `--prebuilt` value: `sqlite`
|
||||
* **Environment Variables:**
|
||||
* `SQLITE_DATABASE`: The path to the SQLite database file (e.g., `./sample.db`).
|
||||
* **Permissions:**
|
||||
* File system read/write permissions for the specified database file.
|
||||
* **Tools:**
|
||||
* `execute_sql`: Executes a SQL query.
|
||||
* `list_tables`: Lists tables in the database.
|
||||
|
||||
## Neo4j
|
||||
|
||||
* `--prebuilt` value: `neo4j`
|
||||
|
||||
36
docs/en/resources/sources/alloydb-admin.md
Normal file
36
docs/en/resources/sources/alloydb-admin.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "AlloyDB Admin"
|
||||
linkTitle: "AlloyDB Admin"
|
||||
type: docs
|
||||
weight: 2
|
||||
description: >
|
||||
The "alloydb-admin" source provides a client for the AlloyDB API.
|
||||
aliases:
|
||||
- /resources/sources/alloydb-admin
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-admin` source provides a client to interact with the [Google AlloyDB API](https://cloud.google.com/alloydb/docs/reference/rest). This allows tools to perform administrative tasks on AlloyDB resources, such as managing clusters, instances, and users.
|
||||
|
||||
Authentication can be handled in two ways:
|
||||
1. **Application Default Credentials (ADC):** By default, the source uses ADC to authenticate with the API.
|
||||
2. **Client-side OAuth:** If `useClientOAuth` is set to `true`, the source will expect an OAuth 2.0 access token to be provided by the client (e.g., a web browser) for each request.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-alloydb-admin:
|
||||
kind: alloy-admin
|
||||
|
||||
my-oauth-alloydb-admin:
|
||||
kind: alloydb-admin
|
||||
useClientOAuth: true
|
||||
```
|
||||
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "alloydb-admin". |
|
||||
| useClientOAuth | boolean | false | If true, the source will use client-side OAuth for authorization. Otherwise, it will use Application Default Credentials. Defaults to `false`. |
|
||||
@@ -33,6 +33,9 @@ cluster][alloydb-free-trial].
|
||||
- [`postgres-execute-sql`](../tools/postgres/postgres-execute-sql.md)
|
||||
Run parameterized SQL statements in AlloyDB Postgres.
|
||||
|
||||
- [`postgres-list-tables`](../tools/postgres/postgres-list-tables.md)
|
||||
List tables in an AlloyDB for PostgreSQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [AlloyDB using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/alloydb_pg_mcp/)
|
||||
|
||||
@@ -36,6 +36,9 @@ avoiding full table scans or complex filters.
|
||||
|
||||
## Available Tools
|
||||
|
||||
- [`bigquery-analyze-contribution`](../tools/bigquery/bigquery-analyze-contribution.md)
|
||||
Performs contribution analysis, also called key driver analysis in BigQuery.
|
||||
|
||||
- [`bigquery-conversational-analytics`](../tools/bigquery/bigquery-conversational-analytics.md)
|
||||
Allows conversational interaction with a BigQuery source.
|
||||
|
||||
@@ -91,15 +94,11 @@ allows Toolbox to make queries to [BigQuery][bigquery-docs] on behalf of the
|
||||
client or the end-user.
|
||||
|
||||
When using this on-behalf-of authentication, you must ensure that the
|
||||
identity used has been granted the correct IAM permissions. Currently,
|
||||
this option is only supported by the following BigQuery tools:
|
||||
identity used has been granted the correct IAM permissions.
|
||||
|
||||
- [`bigquery-sql`](../tools/bigquery/bigquery-sql.md)
|
||||
Run SQL queries directly against BigQuery datasets.
|
||||
|
||||
[iam-overview]: https://cloud.google.com/bigquery/docs/access-control
|
||||
[adc]: https://cloud.google.com/docs/authentication#adc
|
||||
[set-adc]: https://cloud.google.com/docs/authentication/provide-credentials-adc
|
||||
[iam-overview]: <https://cloud.google.com/bigquery/docs/access-control>
|
||||
[adc]: <https://cloud.google.com/docs/authentication#adc>
|
||||
[set-adc]: <https://cloud.google.com/docs/authentication/provide-credentials-adc>
|
||||
|
||||
## Example
|
||||
|
||||
@@ -111,6 +110,9 @@ sources:
|
||||
kind: "bigquery"
|
||||
project: "my-project-id"
|
||||
# location: "US" # Optional: Specifies the location for query jobs.
|
||||
# allowedDatasets: # Optional: Restricts tool access to a specific list of datasets.
|
||||
# - "my_dataset_1"
|
||||
# - "other_project.my_dataset_2"
|
||||
```
|
||||
|
||||
Initialize a BigQuery source that uses the client's access token:
|
||||
@@ -122,13 +124,17 @@ sources:
|
||||
project: "my-project-id"
|
||||
useClientOAuth: true
|
||||
# location: "US" # Optional: Specifies the location for query jobs.
|
||||
# allowedDatasets: # Optional: Restricts tool access to a specific list of datasets.
|
||||
# - "my_dataset_1"
|
||||
# - "other_project.my_dataset_2"
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "bigquery". |
|
||||
| project | string | true | Id of the Google Cloud project to use for billing and as the default project for BigQuery resources. |
|
||||
| location | string | false | Specifies the location (e.g., 'us', 'asia-northeast1') in which to run the query job. This location must match the location of any tables referenced in the query. Defaults to the table's location or 'US' if the location cannot be determined. [Learn More](https://cloud.google.com/bigquery/docs/locations) |
|
||||
| useClientOAuth | bool | false | If true, forwards the client's OAuth access token from the "Authorization" header to downstream queries. |
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "bigquery". |
|
||||
| project | string | true | Id of the Google Cloud project to use for billing and as the default project for BigQuery resources. |
|
||||
| location | string | false | Specifies the location (e.g., 'us', 'asia-northeast1') in which to run the query job. This location must match the location of any tables referenced in the query. Defaults to the table's location or 'US' if the location cannot be determined. [Learn More](https://cloud.google.com/bigquery/docs/locations) |
|
||||
| allowedDatasets | []string | false | An optional list of dataset IDs that tools using this source are allowed to access. If provided, any tool operation attempting to access a dataset not in this list will be rejected. To enforce this, two types of operations are also disallowed: 1) Dataset-level operations (e.g., `CREATE SCHEMA`), and 2) operations where table access cannot be statically analyzed (e.g., `EXECUTE IMMEDIATE`, `CREATE PROCEDURE`). If a single dataset is provided, it will be treated as the default for prebuilt tools. |
|
||||
| useClientOAuth | bool | false | If true, forwards the client's OAuth access token from the "Authorization" header to downstream queries. |
|
||||
|
||||
36
docs/en/resources/sources/cloud-monitoring.md
Normal file
36
docs/en/resources/sources/cloud-monitoring.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "Cloud Monitoring"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "cloud-monitoring" source provides a client for the Cloud Monitoring API.
|
||||
aliases:
|
||||
- /resources/sources/cloud-monitoring
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `cloud-monitoring` source provides a client to interact with the [Google Cloud Monitoring API](https://cloud.google.com/monitoring/api). This allows tools to access cloud monitoring metrics explorer and run promql queries.
|
||||
|
||||
Authentication can be handled in two ways:
|
||||
1. **Application Default Credentials (ADC):** By default, the source uses ADC to authenticate with the API.
|
||||
2. **Client-side OAuth:** If `useClientOAuth` is set to `true`, the source will expect an OAuth 2.0 access token to be provided by the client (e.g., a web browser) for each request.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-monitoring:
|
||||
kind: cloud-monitoring
|
||||
|
||||
my-oauth-cloud-monitoring:
|
||||
kind: cloud-monitoring
|
||||
useClientOAuth: true
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "cloud-monitoring". |
|
||||
| useClientOAuth | boolean | false | If true, the source will use client-side OAuth for authorization. Otherwise, it will use Application Default Credentials. Defaults to `false`. |
|
||||
36
docs/en/resources/sources/cloud-sql-admin.md
Normal file
36
docs/en/resources/sources/cloud-sql-admin.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "Cloud SQL Admin"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "cloud-sql-admin" source provides a client for the Cloud SQL Admin API.
|
||||
aliases:
|
||||
- /resources/sources/cloud-sql-admin
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `cloud-sql-admin` source provides a client to interact with the [Google Cloud SQL Admin API](https://cloud.google.com/sql/docs/mysql/admin-api/v1). This allows tools to perform administrative tasks on Cloud SQL instances, such as creating users and databases.
|
||||
|
||||
Authentication can be handled in two ways:
|
||||
1. **Application Default Credentials (ADC):** By default, the source uses ADC to authenticate with the API.
|
||||
2. **Client-side OAuth:** If `useClientOAuth` is set to `true`, the source will expect an OAuth 2.0 access token to be provided by the client (e.g., a web browser) for each request.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-sql-admin:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
my-oauth-cloud-sql-admin:
|
||||
kind: cloud-sql-admin
|
||||
useClientOAuth: true
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|----------------|:--------:|:------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "cloud-sql-admin". |
|
||||
| useClientOAuth | boolean | false | If true, the source will use client-side OAuth for authorization. Otherwise, it will use Application Default Credentials. Defaults to `false`. |
|
||||
@@ -28,6 +28,9 @@ to a database by following these instructions][csql-mysql-quickstart].
|
||||
- [`mysql-execute-sql`](../tools/mysql/mysql-execute-sql.md)
|
||||
Run parameterized SQL queries in Cloud SQL for MySQL.
|
||||
|
||||
- [`mysql-list-tables`](../tools/mysql/mysql-list-tables.md)
|
||||
List tables in a Cloud SQL for MySQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [Cloud SQL for MySQL using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/cloud_sql_mysql_mcp/)
|
||||
|
||||
@@ -28,6 +28,9 @@ to a database by following these instructions][csql-pg-quickstart].
|
||||
- [`postgres-execute-sql`](../tools/postgres/postgres-execute-sql.md)
|
||||
Run parameterized SQL statements in PostgreSQL.
|
||||
|
||||
- [`postgres-list-tables`](../tools/postgres/postgres-list-tables.md)
|
||||
List tables in a PostgreSQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [Cloud SQL for Postgres using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/cloud_sql_pg_mcp/)
|
||||
|
||||
@@ -22,6 +22,9 @@ reliability, performance, and ease of use.
|
||||
- [`mysql-execute-sql`](../tools/mysql/mysql-execute-sql.md)
|
||||
Run parameterized SQL queries in MySQL.
|
||||
|
||||
- [`mysql-list-tables`](../tools/mysql/mysql-list-tables.md)
|
||||
List tables in a MySQL database.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Database User
|
||||
|
||||
@@ -23,6 +23,9 @@ reputation for reliability, feature robustness, and performance.
|
||||
- [`postgres-execute-sql`](../tools/postgres/postgres-execute-sql.md)
|
||||
Run parameterized SQL statements in PostgreSQL.
|
||||
|
||||
- [`postgres-list-tables`](../tools/postgres/postgres-list-tables.md)
|
||||
List tables in a PostgreSQL database.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [PostgreSQL using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/postgres_mcp/)
|
||||
|
||||
@@ -26,6 +26,14 @@ SQLite has the following notable characteristics:
|
||||
|
||||
- [`sqlite-sql`](../tools/sqlite/sqlite-sql.md)
|
||||
Run SQL queries against a local SQLite database.
|
||||
|
||||
- [`sqlite-execute-sql`](../tools/sqlite/sqlite-execute-sql.md)
|
||||
Run parameterized SQL statements in SQlite.
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [SQLite using MCP](../../how-to/connect-ide/sqlite_mcp.md)
|
||||
Connect your IDE to SQlite using Toolbox.
|
||||
|
||||
## Requirements
|
||||
|
||||
|
||||
7
docs/en/resources/tools/alloydb/_index.md
Normal file
7
docs/en/resources/tools/alloydb/_index.md
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "AlloyDB for PostgreSQL"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
Tools for AlloyDB for PostgreSQL.
|
||||
---
|
||||
37
docs/en/resources/tools/alloydb/alloydb-get-cluster.md
Normal file
37
docs/en/resources/tools/alloydb/alloydb-get-cluster.md
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
title: "alloydb-get-cluster"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "alloydb-get-cluster" tool retrieves details for a specific AlloyDB cluster.
|
||||
aliases:
|
||||
- /resources/tools/alloydb-get-cluster
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-get-cluster` tool retrieves detailed information for a single, specified AlloyDB cluster. It is compatible with [alloydb-admin](../../sources/alloydb-admin.md) source.
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `projectId` | string | The GCP project ID to get cluster for. | Yes |
|
||||
| `locationId` | string | The location of the cluster (e.g., 'us-central1'). | Yes |
|
||||
| `clusterId` | string | The ID of the cluster to retrieve. | Yes |
|
||||
> **Note**
|
||||
> This tool authenticates using the credentials configured in its [alloydb-admin](../../sources/alloydb-admin.md) source which can be either [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) or client-side OAuth.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
get_specific_cluster:
|
||||
kind: alloydb-get-cluster
|
||||
source: my-alloydb-admin-source
|
||||
description: Use this tool to retrieve details for a specific AlloyDB cluster.
|
||||
```
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be alloydb-get-cluster. | |
|
||||
| source | string | true | The name of an `alloydb-admin` source. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
38
docs/en/resources/tools/alloydb/alloydb-get-instance.md
Normal file
38
docs/en/resources/tools/alloydb/alloydb-get-instance.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
title: "alloydb-get-instance"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "alloydb-get-instance" tool retrieves details for a specific AlloyDB instance.
|
||||
aliases:
|
||||
- /resources/tools/alloydb-get-instance
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-get-instance` tool retrieves detailed information for a single, specified AlloyDB instance. It is compatible with [alloydb-admin](../../sources/alloydb-admin.md) source.
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `project` | string | The GCP project ID to get instance for. | Yes |
|
||||
| `location` | string | The location of the instance (e.g., 'us-central1'). | Yes |
|
||||
| `cluster` | string | The ID of the cluster. | Yes |
|
||||
| `instance` | string | The ID of the instance to retrieve. | Yes |
|
||||
> **Note**
|
||||
> This tool authenticates using the credentials configured in its [alloydb-admin](../../sources/alloydb-admin.md) source which can be either [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) or client-side OAuth.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
get_specific_instance:
|
||||
kind: alloydb-get-instance
|
||||
source: my-alloydb-admin-source
|
||||
description: Use this tool to retrieve details for a specific AlloyDB instance.
|
||||
```
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be alloydb-get-instance. | |
|
||||
| source | string | true | The name of an `alloydb-admin` source. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
38
docs/en/resources/tools/alloydb/alloydb-get-user.md
Normal file
38
docs/en/resources/tools/alloydb/alloydb-get-user.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
title: "alloydb-get-user"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "alloydb-get-user" tool retrieves details for a specific AlloyDB user.
|
||||
aliases:
|
||||
- /resources/tools/alloydb-get-user
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-get-user` tool retrieves detailed information for a single, specified AlloyDB user. It is compatible with [alloydb-admin](../../sources/alloydb-admin.md) source.
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `project` | string | The GCP project ID to get user for. | Yes |
|
||||
| `location` | string | The location of the cluster (e.g., 'us-central1'). | Yes |
|
||||
| `cluster` | string | The ID of the cluster to retrieve the user from. | Yes |
|
||||
| `user` | string | The ID of the user to retrieve. | Yes |
|
||||
> **Note**
|
||||
> This tool authenticates using the credentials configured in its [alloydb-admin](../../sources/alloydb-admin.md) source which can be either [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) or client-side OAuth.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
get_specific_user:
|
||||
kind: alloydb-get-user
|
||||
source: my-alloydb-admin-source
|
||||
description: Use this tool to retrieve details for a specific AlloyDB user.
|
||||
```
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be alloydb-get-user. | |
|
||||
| source | string | true | The name of an `alloydb-admin` source. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
38
docs/en/resources/tools/alloydb/alloydb-list-clusters.md
Normal file
38
docs/en/resources/tools/alloydb/alloydb-list-clusters.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
title: "alloydb-list-clusters"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "alloydb-list-clusters" tool lists the AlloyDB clusters in a given project and location.
|
||||
aliases:
|
||||
- /resources/tools/alloydb-list-clusters
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-list-clusters` tool retrieves AlloyDB cluster information for all or specified locations in a given project. It is compatible with [alloydb-admin](../../sources/alloydb-admin.md) source.
|
||||
|
||||
`alloydb-list-clusters` tool lists the detailed information of AlloyDB cluster(cluster name, state, configuration, etc) for a given project and location. The tool takes the following input parameters:
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `projectId` | string | The GCP project ID to list clusters for. | Yes |
|
||||
| `locationId` | string | The location to list clusters in (e.g., 'us-central1'). Use `-` for all locations. Default: `-`.| No |
|
||||
> **Note**
|
||||
> This tool authenticates using the credentials configured in its [alloydb-admin](../../sources/alloydb-admin.md) source which can be either [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) or client-side OAuth.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_clusters:
|
||||
kind: alloydb-list-clusters
|
||||
source: alloydb-admin-source
|
||||
description: Use this tool to list all AlloyDB clusters in a given project and location.
|
||||
```
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be alloydb-list-clusters. | |
|
||||
| source | string | true | The name of an `alloydb-admin` source. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
39
docs/en/resources/tools/alloydb/alloydb-list-instances.md
Normal file
39
docs/en/resources/tools/alloydb/alloydb-list-instances.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "alloydb-list-instances"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "alloydb-list-instances" tool lists the AlloyDB instances for a given project, cluster and location.
|
||||
aliases:
|
||||
- /resources/tools/alloydb-list-instances
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-list-instances` tool retrieves AlloyDB instance information for all or specified clusters and locations in a given project. It is compatible with [alloydb-admin](../../sources/alloydb-admin.md) source.
|
||||
|
||||
`alloydb-list-instances` tool lists the detailed information of AlloyDB instances (instance name, type, IP address, state, configuration, etc) for a given project, cluster and location. The tool takes the following input parameters:
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `projectId` | string | The GCP project ID to list instances for. | Yes |
|
||||
| `clusterId` | string | The ID of the cluster to list instances from. Use '-' to get results for all clusters. Default: `-`.| No |
|
||||
| `locationId` | string | The location of the cluster (e.g., 'us-central1'). Use '-' to get results for all locations. Default: `-`.| No |
|
||||
> **Note**
|
||||
> This tool authenticates using the credentials configured in its [alloydb-admin](../../sources/alloydb-admin.md) source which can be either [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) or client-side OAuth.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_instances:
|
||||
kind: alloydb-list-instances
|
||||
source: alloydb-admin-source
|
||||
description: Use this tool to list all AlloyDB instances for a given project, cluster and location.
|
||||
```
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be alloydb-list-instances. | |
|
||||
| source | string | true | The name of an `alloydb-admin` source. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
38
docs/en/resources/tools/alloydb/alloydb-list-users.md
Normal file
38
docs/en/resources/tools/alloydb/alloydb-list-users.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
title: "alloydb-list-users"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "alloydb-list-users" tool lists all database users within an AlloyDB cluster.
|
||||
aliases:
|
||||
- /resources/tools/alloydb-list-users
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `alloydb-list-users` tool lists all database users within an AlloyDB cluster. It is compatible with [alloydb-admin](../../sources/alloydb-admin.md) source.
|
||||
The tool takes the following input parameters:
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `projectId` | string | The GCP project ID to list users for. | Yes |
|
||||
| `clusterId` | string | The ID of the cluster to list users from. | Yes |
|
||||
| `locationId` | string | The location of the cluster (e.g., 'us-central1'). | Yes |
|
||||
> **Note**
|
||||
> This tool authenticates using the credentials configured in its [alloydb-admin](../../sources/alloydb-admin.md) source which can be either [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) or client-side OAuth.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_users:
|
||||
kind: alloydb-list-users
|
||||
source: alloydb-admin-source
|
||||
description: Use this tool to list all database users within an AlloyDB cluster
|
||||
```
|
||||
## Reference
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be alloydb-list-users. | |
|
||||
| source | string | true | The name of an `alloydb-admin` source. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
@@ -0,0 +1,52 @@
|
||||
---
|
||||
title: "bigquery-analyze-contribution"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "bigquery-analyze-contribution" tool performs contribution analysis in BigQuery.
|
||||
aliases:
|
||||
- /resources/tools/bigquery-analyze-contribution
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `bigquery-analyze-contribution` tool performs contribution analysis in BigQuery by creating a temporary `CONTRIBUTION_ANALYSIS` model and then querying it with `ML.GET_INSIGHTS` to find top contributors for a given metric.
|
||||
|
||||
It's compatible with the following sources:
|
||||
|
||||
- [bigquery](../../sources/bigquery.md)
|
||||
|
||||
`bigquery-analyze-contribution` takes the following parameters:
|
||||
|
||||
- **input_data** (string, required): The data that contain the test and control data to analyze. This can be a fully qualified BigQuery table ID (e.g., `my-project.my_dataset.my_table`) or a SQL query that returns the data.
|
||||
- **contribution_metric** (string, required): The name of the column that contains the metric to analyze. This can be SUM(metric_column_name), SUM(numerator_metric_column_name)/SUM(denominator_metric_column_name) or SUM(metric_sum_column_name)/COUNT(DISTINCT categorical_column_name) depending the type of metric to analyze.
|
||||
- **is_test_col** (string, required): The name of the column that identifies whether a row is in the test or control group. The column must contain boolean values.
|
||||
- **dimension_id_cols** (array of strings, optional): An array of column names that uniquely identify each dimension.
|
||||
- **top_k_insights_by_apriori_support** (integer, optional): The number of top insights to return, ranked by apriori support. Default to '30'.
|
||||
- **pruning_method** (string, optional): The method to use for pruning redundant insights. Can be `'NO_PRUNING'` or `'PRUNE_REDUNDANT_INSIGHTS'`. Defaults to `'PRUNE_REDUNDANT_INSIGHTS'`.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
contribution_analyzer:
|
||||
kind: bigquery-analyze-contribution
|
||||
source: my-bigquery-source
|
||||
description: Use this tool to run contribution analysis on a dataset in BigQuery.
|
||||
```
|
||||
|
||||
## Sample Prompt
|
||||
You can prepare a sample table following https://cloud.google.com/bigquery/docs/get-contribution-analysis-insights.
|
||||
And use the following sample prompts to call this tool:
|
||||
|
||||
- What drives the changes in sales in the table `bqml_tutorial.iowa_liquor_sales_sum_data`? Use the project id myproject.
|
||||
- Analyze the contribution for the `total_sales` metric in the table `bqml_tutorial.iowa_liquor_sales_sum_data`. The test group is identified by the `is_test` column. The dimensions are `store_name`, `city`, `vendor_name`, `category_name` and `item_description`.
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:--------:|:------------:|------------------------------------------------------------|
|
||||
| kind | string | true | Must be "bigquery-analyze-contribution". |
|
||||
| source | string | true | Name of the source the tool should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the LLM. |
|
||||
@@ -15,10 +15,19 @@ It's compatible with the following sources:
|
||||
|
||||
- [bigquery](../../sources/bigquery.md)
|
||||
|
||||
`bigquery-list-table-ids` takes a required `dataset` parameter to specify the dataset
|
||||
from which to list table IDs. It also optionally accepts a `project` parameter to
|
||||
define the Google Cloud project ID. If the `project` parameter is not provided, the
|
||||
tool defaults to using the project defined in the source configuration.
|
||||
`bigquery-list-table-ids` accepts the following parameters:
|
||||
- **`dataset`** (required): Specifies the dataset from which to list table IDs.
|
||||
- **`project`** (optional): Defines the Google Cloud project ID. If not provided,
|
||||
the tool defaults to the project from the source configuration.
|
||||
|
||||
The tool's behavior regarding these parameters is influenced by the
|
||||
`allowedDatasets` restriction on the `bigquery` source:
|
||||
- **Without `allowedDatasets` restriction:** The tool can list tables from any
|
||||
dataset specified by the `dataset` and `project` parameters.
|
||||
- **With `allowedDatasets` restriction:** Before listing tables, the tool verifies
|
||||
that the requested dataset is in the allowed list. If it is not, the request is
|
||||
denied. If only one dataset is specified in the `allowedDatasets` list, it
|
||||
will be used as the default value for the `dataset` parameter.
|
||||
|
||||
## Example
|
||||
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
---
|
||||
title: "clickhouse-list-databases"
|
||||
type: docs
|
||||
weight: 3
|
||||
description: >
|
||||
A "clickhouse-list-databases" tool lists all databases in a ClickHouse instance.
|
||||
aliases:
|
||||
- /resources/tools/clickhouse-list-databases
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `clickhouse-list-databases` tool lists all available databases in a
|
||||
ClickHouse instance. It's compatible with the [clickhouse](../../sources/clickhouse.md) source.
|
||||
|
||||
This tool executes the `SHOW DATABASES` command and returns a list of all
|
||||
databases accessible to the configured user, making it useful for database
|
||||
discovery and exploration tasks.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_clickhouse_databases:
|
||||
kind: clickhouse-list-databases
|
||||
source: my-clickhouse-instance
|
||||
description: List all available databases in the ClickHouse instance
|
||||
```
|
||||
|
||||
## Return Value
|
||||
|
||||
The tool returns an array of objects, where each object contains:
|
||||
- `name`: The name of the database
|
||||
|
||||
Example response:
|
||||
```json
|
||||
[
|
||||
{"name": "default"},
|
||||
{"name": "system"},
|
||||
{"name": "analytics"},
|
||||
{"name": "user_data"}
|
||||
]
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|--------------------|:------------------:|:------------:|-----------------------------------------------------------|
|
||||
| kind | string | true | Must be "clickhouse-list-databases". |
|
||||
| source | string | true | Name of the ClickHouse source to list databases from. |
|
||||
| description | string | true | Description of the tool that is passed to the LLM. |
|
||||
| authRequired | array of string | false | Authentication services required to use this tool. |
|
||||
| parameters | array of Parameter | false | Parameters for the tool (typically not used). |
|
||||
@@ -3,5 +3,5 @@ title: "Cloud SQL"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
Tools that work with Cloud SQL Control Plane.
|
||||
Tools that work with Cloud SQL.
|
||||
---
|
||||
39
docs/en/resources/tools/cloudsql/cloudsqlcreatedatabase.md
Normal file
39
docs/en/resources/tools/cloudsql/cloudsqlcreatedatabase.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: cloud-sql-create-database
|
||||
type: docs
|
||||
weight: 10
|
||||
description: >
|
||||
Create a new database in a Cloud SQL instance.
|
||||
---
|
||||
|
||||
The `cloud-sql-create-database` tool creates a new database in a specified Cloud SQL instance.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses a `source` of kind `cloud-sql-admin`.
|
||||
{{< /notice >}}
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
create-cloud-sql-database:
|
||||
kind: cloud-sql-create-database
|
||||
source: my-cloud-sql-admin-source
|
||||
description: "Creates a new database in a Cloud SQL instance."
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | ------------------------------------------------ |
|
||||
| kind | string | true | Must be "cloud-sql-create-database". |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use. |
|
||||
| description | string | false | A description of the tool. |
|
||||
|
||||
## Input Parameters
|
||||
|
||||
| **parameter** | **type** | **required** | **description** |
|
||||
| ------------- | :------: | :----------: | ------------------------------------------------------------------ |
|
||||
| project | string | true | The project ID. |
|
||||
| instance | string | true | The ID of the instance where the database will be created. |
|
||||
| name | string | true | The name for the new database. Must be unique within the instance. |
|
||||
31
docs/en/resources/tools/cloudsql/cloudsqlcreateusers.md
Normal file
31
docs/en/resources/tools/cloudsql/cloudsqlcreateusers.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: cloud-sql-create-users
|
||||
type: docs
|
||||
weight: 10
|
||||
description: >
|
||||
Create a new user in a Cloud SQL instance.
|
||||
---
|
||||
|
||||
The `cloud-sql-create-users` tool creates a new user in a specified Cloud SQL instance. It can create both built-in and IAM users.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses a `source` of kind `cloud-sql-admin`.
|
||||
{{< /notice >}}
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
create-cloud-sql-user:
|
||||
kind: cloud-sql-create-users
|
||||
source: my-cloud-sql-admin-source
|
||||
description: "Creates a new user in a Cloud SQL instance. Both built-in and IAM users are supported. IAM users require an email account as the user name. IAM is the more secure and recommended way to manage users. The agent should always ask the user what type of user they want to create. For more information, see https://cloud.google.com/sql/docs/postgres/add-manage-iam-users"
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ------------ | :-------: | :----------: | ------------------------------------------------ |
|
||||
| kind | string | true | Must be "cloud-sql-create-users". |
|
||||
| description | string | false | A description of the tool. |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use. |
|
||||
31
docs/en/resources/tools/cloudsql/cloudsqlgetinstances.md
Normal file
31
docs/en/resources/tools/cloudsql/cloudsqlgetinstances.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: "cloud-sql-get-instance"
|
||||
type: docs
|
||||
weight: 10
|
||||
description: >
|
||||
Get a Cloud SQL instance resource.
|
||||
---
|
||||
|
||||
The `cloud-sql-get-instance` tool retrieves a Cloud SQL instance resource using the Cloud SQL Admin API.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses a `source` of kind `cloud-sql-admin`.
|
||||
{{< /notice >}}
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
get-sql-instance:
|
||||
kind: cloud-sql-get-instance
|
||||
source: my-cloud-sql-admin-source
|
||||
description: "Gets a particular cloud sql instance."
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | ------------------------------------------------ |
|
||||
| kind | string | true | Must be "cloud-sql-get-instance". |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use. |
|
||||
| description | string | false | A description of the tool. |
|
||||
47
docs/en/resources/tools/cloudsql/cloudsqllistdatabases.md
Normal file
47
docs/en/resources/tools/cloudsql/cloudsqllistdatabases.md
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
title: cloud-sql-list-databases
|
||||
type: docs
|
||||
weight: 1
|
||||
description: List Cloud SQL databases in an instance.
|
||||
---
|
||||
|
||||
The `cloud-sql-list-databases` tool lists all Cloud SQL databases in a specified
|
||||
Google Cloud project and instance.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses the `cloud-sql-admin` source.
|
||||
{{< /notice >}}
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to configure the `cloud-sql-list-databases` tool in your
|
||||
`tools.yaml` file:
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-sql-admin-source:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
tools:
|
||||
list_my_databases:
|
||||
kind: cloud-sql-list-databases
|
||||
source: my-cloud-sql-admin-source
|
||||
description: Use this tool to list all Cloud SQL databases in an instance.
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `cloud-sql-list-databases` tool has two required parameters:
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| --------- | :------: | :----------: | ---------------------------- |
|
||||
| project | string | true | The Google Cloud project ID. |
|
||||
| instance | string | true | The Cloud SQL instance ID. |
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | -------------------------------------------------------------- |
|
||||
| kind | string | true | Must be "cloud-sql-list-databases". |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use for this tool. |
|
||||
| description | string | false | Description of the tool that is passed to the agent. |
|
||||
46
docs/en/resources/tools/cloudsql/cloudsqllistinstances.md
Normal file
46
docs/en/resources/tools/cloudsql/cloudsqllistinstances.md
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
title: Cloud SQL List Instances
|
||||
type: docs
|
||||
weight: 1
|
||||
description: "List Cloud SQL instances in a project.\n"
|
||||
---
|
||||
|
||||
The `cloud-sql-list-instances` tool lists all Cloud SQL instances in a specified
|
||||
Google Cloud project.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses the `cloud-sql-admin` source, which automatically handles authentication on behalf of the user.
|
||||
{{< /notice >}}
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to configure the `cloud-sql-list-instances` tool in your
|
||||
`tools.yaml` file:
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-sql-admin-source:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
tools:
|
||||
list_my_instances:
|
||||
kind: cloud-sql-list-instances
|
||||
source: my-cloud-sql-admin-source
|
||||
description: Use this tool to list all Cloud SQL instances in a project.
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `cloud-sql-list-instances` tool has one required parameter:
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| --------- | :------: | :----------: | ---------------------------- |
|
||||
| project | string | true | The Google Cloud project ID. |
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ------------ | :-------: | :----------: | ----------------------------------------------------------------------------------- |
|
||||
| kind | string | true | Must be "cloud-sql-list-instances". |
|
||||
| description | string | false | Description of the tool that is passed to the agent. |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use for this tool. |
|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: cloud-sql-mssql-create-instance
|
||||
type: docs
|
||||
weight: 10
|
||||
description: "Create a Cloud SQL for SQL Server instance."
|
||||
---
|
||||
|
||||
The `cloud-sql-mssql-create-instance` tool creates a Cloud SQL for SQL Server instance using the Cloud SQL Admin API.
|
||||
|
||||
{{< notice info dd>}}
|
||||
This tool uses a `source` of kind `cloud-sql-admin`.
|
||||
{{< /notice >}}
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
create-sql-instance:
|
||||
kind: cloud-sql-mssql-create-instance
|
||||
source: cloud-sql-admin-source
|
||||
description: "Creates a SQL Server instance using `Production` and `Development` presets. For the `Development` template, it chooses a 2 vCPU, 8 GiB RAM (`db-custom-2-8192`) configuration with Non-HA/zonal availability. For the `Production` template, it chooses a 4 vCPU, 26 GiB RAM (`db-custom-4-26624`) configuration with HA/regional availability. The Enterprise edition is used in both cases. The default database version is `SQLSERVER_2022_STANDARD`. The agent should ask the user if they want to use a different version."
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
### Tool Configuration
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | ------------------------------------------------ |
|
||||
| kind | string | true | Must be "cloud-sql-mssql-create-instance". |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use. |
|
||||
| description | string | false | A description of the tool. |
|
||||
|
||||
### Tool Inputs
|
||||
|
||||
| **parameter** | **type** | **required** | **description** |
|
||||
| --------------- | :------: | :----------: | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| project | string | true | The project ID. |
|
||||
| name | string | true | The name of the instance. |
|
||||
| databaseVersion | string | false | The database version for SQL Server. If not specified, defaults to the latest available version (e.g., SQLSERVER_2022_STANDARD). |
|
||||
| rootPassword | string | true | The root password for the instance. |
|
||||
| editionPreset | string | false | The edition of the instance. Can be `Production` or `Development`. This determines the default machine type and availability. Defaults to `Development`. |
|
||||
@@ -0,0 +1,48 @@
|
||||
---
|
||||
title: cloud-sql-mysql-create-instance
|
||||
type: docs
|
||||
weight: 2
|
||||
description: "Create a Cloud SQL for MySQL instance."
|
||||
---
|
||||
|
||||
The `cloud-sql-mysql-create-instance` tool creates a new Cloud SQL for MySQL instance in a specified Google Cloud project.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses the `cloud-sql-admin` source.
|
||||
{{< /notice >}}
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to configure the `cloud-sql-mysql-create-instance` tool in your `tools.yaml` file:
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-cloud-sql-admin-source:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
tools:
|
||||
create_my_mysql_instance:
|
||||
kind: cloud-sql-mysql-create-instance
|
||||
source: my-cloud-sql-admin-source
|
||||
description: "Creates a MySQL instance using `Production` and `Development` presets. For the `Development` template, it chooses a 2 vCPU, 16 GiB RAM, 100 GiB SSD configuration with Non-HA/zonal availability. For the `Production` template, it chooses an 8 vCPU, 64 GiB RAM, 250 GiB SSD configuration with HA/regional availability. The Enterprise Plus edition is used in both cases. The default database version is `MYSQL_8_4`. The agent should ask the user if they want to use a different version."
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `cloud-sql-mysql-create-instance` tool has the following parameters:
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| --------------- | :------: | :----------: | --------------------------------------------------------------------------------------------------------------- |
|
||||
| project | string | true | The Google Cloud project ID. |
|
||||
| name | string | true | The name of the instance to create. |
|
||||
| databaseVersion | string | false | The database version for MySQL. If not specified, defaults to the latest available version (e.g., `MYSQL_8_4`). |
|
||||
| rootPassword | string | true | The root password for the instance. |
|
||||
| editionPreset | string | false | The edition of the instance. Can be `Production` or `Development`. Defaults to `Development`. |
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | -------------------------------------------------------------- |
|
||||
| kind | string | true | Must be `cloud-sql-mysql-create-instance`. |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use for this tool. |
|
||||
| description | string | false | A description of the tool that is passed to the agent. |
|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: cloud-sql-postgres-create-instance
|
||||
type: docs
|
||||
weight: 10
|
||||
description: Create a Cloud SQL for PostgreSQL instance.
|
||||
---
|
||||
|
||||
The `cloud-sql-postgres-create-instance` tool creates a Cloud SQL for PostgreSQL instance using the Cloud SQL Admin API.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool uses a `source` of kind `cloud-sql-admin`.
|
||||
{{< /notice >}}
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
create-sql-instance:
|
||||
kind: cloud-sql-postgres-create-instance
|
||||
source: cloud-sql-admin-source
|
||||
description: "Creates a Postgres instance using `Production` and `Development` presets. For the `Development` template, it chooses a 2 vCPU, 16 GiB RAM, 100 GiB SSD configuration with Non-HA/zonal availability. For the `Production` template, it chooses an 8 vCPU, 64 GiB RAM, 250 GiB SSD configuration with HA/regional availability. The Enterprise Plus edition is used in both cases. The default database version is `POSTGRES_17`. The agent should ask the user if they want to use a different version."
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
### Tool Configuration
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | ------------------------------------------------ |
|
||||
| kind | string | true | Must be "cloud-sql-postgres-create-instance". |
|
||||
| source | string | true | The name of the `cloud-sql-admin` source to use. |
|
||||
| description | string | false | A description of the tool. |
|
||||
|
||||
### Tool Inputs
|
||||
|
||||
| **parameter** | **type** | **required** | **description** |
|
||||
| --------------- | :------: | :----------: | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| project | string | true | The project ID. |
|
||||
| name | string | true | The name of the instance. |
|
||||
| databaseVersion | string | false | The database version for Postgres. If not specified, defaults to the latest available version (e.g., POSTGRES_17). |
|
||||
| rootPassword | string | true | The root password for the instance. |
|
||||
| editionPreset | string | false | The edition of the instance. Can be `Production` or `Development`. This determines the default machine type and availability. Defaults to `Development`. |
|
||||
@@ -11,10 +11,6 @@ long-running Cloud SQL operation to complete. It does this by polling the Cloud
|
||||
SQL Admin API operation status endpoint until the operation is finished, using
|
||||
exponential backoff.
|
||||
|
||||
{{< notice info >}}
|
||||
This tool is intended for developer assistant workflows with human-in-the-loop
|
||||
and shouldn't be used for production agents.
|
||||
{{< /notice >}}
|
||||
|
||||
## Example
|
||||
|
||||
@@ -22,7 +18,7 @@ and shouldn't be used for production agents.
|
||||
tools:
|
||||
cloudsql-operations-get:
|
||||
kind: cloud-sql-wait-for-operation
|
||||
source: some-http-source
|
||||
source: my-cloud-sql-source
|
||||
description: "This will poll on operations API until the operation is done. For checking operation status we need projectId and operationId. Once instance is created give follow up steps on how to use the variables to bring data plane MCP server up in local and remote setup."
|
||||
delay: 1s
|
||||
maxDelay: 4m
|
||||
@@ -35,9 +31,9 @@ tools:
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ----------- | :------: | :----------: | ---------------------------------------------------------------------------------------------------------------- |
|
||||
| kind | string | true | Must be "cloud-sql-wait-for-operation". |
|
||||
| source | string | true | The name of an `http` source to use for authentication. |
|
||||
| description | string | true | A description of the tool. |
|
||||
| delay | duration | false | The initial delay between polling requests (e.g., `3s`). Defaults to 3 seconds. |
|
||||
| maxDelay | duration | false | The maximum delay between polling requests (e.g., `4m`). Defaults to 4 minutes. |
|
||||
| multiplier | float | false | The multiplier for the polling delay. The delay is multiplied by this value after each request. Defaults to 2.0. |
|
||||
| maxRetries | int | false | The maximum number of polling attempts before giving up. Defaults to 10. |
|
||||
| source | string | true | The name of a `cloud-sql-admin` source to use for authentication. |
|
||||
| description | string | false | A description of the tool. |
|
||||
| delay | duration | false | The initial delay between polling requests (e.g., `3s`). Defaults to 3 seconds. |
|
||||
| maxDelay | duration | false | The maximum delay between polling requests (e.g., `4m`). Defaults to 4 minutes. |
|
||||
| multiplier | float | false | The multiplier for the polling delay. The delay is multiplied by this value after each request. Defaults to 2.0. |
|
||||
| maxRetries | int | false | The maximum number of polling attempts before giving up. Defaults to 10. |
|
||||
|
||||
@@ -29,6 +29,10 @@ It's compatible with the following sources:
|
||||
7. an optional `limit`
|
||||
8. an optional `tz`
|
||||
|
||||
Starting in Looker v25.18, these queries can be identified in Looker's
|
||||
System Activity. In the History explore, use the field API Client Name
|
||||
to find MCP Toolbox queries.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -29,6 +29,10 @@ It's compatible with the following sources:
|
||||
7. an optional `limit`
|
||||
8. an optional `tz`
|
||||
|
||||
Starting in Looker v25.18, these queries can be identified in Looker's
|
||||
System Activity. In the History explore, use the field API Client Name
|
||||
to find MCP Toolbox queries.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
|
||||
43
docs/en/resources/tools/mysql/mysql-list-tables.md
Normal file
43
docs/en/resources/tools/mysql/mysql-list-tables.md
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
title: "mysql-list-tables"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "mysql-list-tables" tool lists schema information for all or specified tables in a MySQL database.
|
||||
aliases:
|
||||
- /resources/tools/mysql-list-tables
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `mysql-list-tables` tool retrieves schema information for all or specified tables in a MySQL database. It is compatible with any of the following sources:
|
||||
|
||||
- [cloud-sql-mysql](../../sources/cloud-sql-mysql.md)
|
||||
- [mysql](../../sources/mysql.md)
|
||||
|
||||
`mysql-list-tables` lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, it lists all tables in user schemas. The output format can be set to `simple` which will return only the table names or `detailed` which is the default.
|
||||
|
||||
The tool takes the following input parameters:
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
| :--------- | :----- | :--------------------------------------------------------------------------------------- | :------- |
|
||||
| `table_names` | string | Filters by a comma-separated list of names. By default, it lists all tables in user schemas. Default: `""` | No |
|
||||
| `output_format` | string | Indicate the output format of table schema. `simple` will return only the table names, `detailed` will return the full table information. Default: `detailed`. | No |
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
mysql_list_tables:
|
||||
kind: mysql-list-tables
|
||||
source: mysql-source
|
||||
description: Use this tool to retrieve schema information for all or specified tables. Output format can be simple (only table names) or detailed.
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "mysql-list-tables". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
39
docs/en/resources/tools/postgres/postgres-list-tables.md
Normal file
39
docs/en/resources/tools/postgres/postgres-list-tables.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "postgres-list-tables"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "postgres-list-tables" tool lists schema information for all or specified tables in a Postgres database.
|
||||
aliases:
|
||||
- /resources/tools/postgres-list-tables
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `postgres-list-tables` tool retrieves schema information for all or specified tables in a Postgres database. It's compatible with any of the following sources:
|
||||
|
||||
- [alloydb-postgres](../../sources/alloydb-pg.md)
|
||||
- [cloud-sql-postgres](../../sources/cloud-sql-pg.md)
|
||||
- [postgres](../../sources/postgres.md)
|
||||
|
||||
`postgres-list-tables` lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). The tool takes the following input parameters:
|
||||
* `table_names` (optional): Filters by a comma-separated list of names. By default, it lists all tables in user schemas.
|
||||
* `output_format` (optional): Indicate the output format of table schema. `simple` will return only the table names, `detailed` will return the full table information. Default: `detailed`.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
postgres_list_tables:
|
||||
kind: postgres-list-tables
|
||||
source: postgres-source
|
||||
description: Use this tool to retrieve schema information for all or specified tables. Output format can be simple (only table names) or detailed.
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|--------------------------------------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "postgres-list-tables". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the agent. |
|
||||
208
docs/en/resources/tools/spanner/spanner-list-tables.md
Normal file
208
docs/en/resources/tools/spanner/spanner-list-tables.md
Normal file
@@ -0,0 +1,208 @@
|
||||
---
|
||||
title: "spanner-list-tables"
|
||||
type: docs
|
||||
weight: 3
|
||||
description: >
|
||||
A "spanner-list-tables" tool retrieves schema information about tables in a
|
||||
Google Cloud Spanner database.
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `spanner-list-tables` tool retrieves comprehensive schema information about
|
||||
tables in a Cloud Spanner database. It automatically adapts to the database
|
||||
dialect (GoogleSQL or PostgreSQL) and returns detailed metadata including
|
||||
columns, constraints, and indexes. It's compatible with:
|
||||
|
||||
- [spanner](../../sources/spanner.md)
|
||||
|
||||
This tool is read-only and executes pre-defined SQL queries against the
|
||||
`INFORMATION_SCHEMA` tables to gather metadata. The tool automatically detects
|
||||
the database dialect from the source configuration and uses the appropriate SQL
|
||||
syntax.
|
||||
|
||||
## Features
|
||||
|
||||
- **Automatic Dialect Detection**: Adapts queries based on whether the database
|
||||
uses GoogleSQL or PostgreSQL dialect
|
||||
- **Comprehensive Schema Information**: Returns columns, data types, constraints,
|
||||
indexes, and table relationships
|
||||
- **Flexible Filtering**: Can list all tables or filter by specific table names
|
||||
- **Output Format Options**: Choose between simple (table names only) or detailed
|
||||
(full schema information) output
|
||||
|
||||
## Example
|
||||
|
||||
### Basic Usage - List All Tables
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my-spanner-db:
|
||||
kind: spanner
|
||||
project: ${SPANNER_PROJECT}
|
||||
instance: ${SPANNER_INSTANCE}
|
||||
database: ${SPANNER_DATABASE}
|
||||
dialect: googlesql # or postgresql
|
||||
|
||||
tools:
|
||||
list_all_tables:
|
||||
kind: spanner-list-tables
|
||||
source: my-spanner-db
|
||||
description: Lists all tables with their complete schema information
|
||||
```
|
||||
|
||||
### List Specific Tables
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_specific_tables:
|
||||
kind: spanner-list-tables
|
||||
source: my-spanner-db
|
||||
description: |
|
||||
Lists schema information for specific tables.
|
||||
Example usage:
|
||||
{
|
||||
"table_names": "users,orders,products",
|
||||
"output_format": "detailed"
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The tool accepts two optional parameters:
|
||||
|
||||
| **parameter** | **type** | **default** | **description** |
|
||||
|-----------------|:--------:|:-----------:|------------------------------------------------------------------------------------------------------|
|
||||
| table_names | string | "" | Comma-separated list of table names to filter. If empty, lists all tables in user-accessible schemas |
|
||||
| output_format | string | "detailed" | Output format: "simple" returns only table names, "detailed" returns full schema information |
|
||||
|
||||
## Output Format
|
||||
|
||||
### Simple Format
|
||||
|
||||
When `output_format` is set to "simple", the tool returns a minimal JSON structure:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"schema_name": "public",
|
||||
"object_name": "users",
|
||||
"object_details": "{\"name\":\"users\"}"
|
||||
},
|
||||
{
|
||||
"schema_name": "public",
|
||||
"object_name": "orders",
|
||||
"object_details": "{\"name\":\"orders\"}"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Detailed Format
|
||||
|
||||
When `output_format` is set to "detailed" (default), the tool returns comprehensive schema information:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"schema_name": "public",
|
||||
"object_name": "users",
|
||||
"object_details": "{
|
||||
\"schema_name\": \"public\",
|
||||
\"object_name\": \"users\",
|
||||
\"object_type\": \"BASE TABLE\",
|
||||
\"columns\": [
|
||||
{
|
||||
\"column_name\": \"id\",
|
||||
\"data_type\": \"INT64\",
|
||||
\"ordinal_position\": 1,
|
||||
\"is_not_nullable\": true,
|
||||
\"column_default\": null
|
||||
},
|
||||
{
|
||||
\"column_name\": \"email\",
|
||||
\"data_type\": \"STRING(255)\",
|
||||
\"ordinal_position\": 2,
|
||||
\"is_not_nullable\": true,
|
||||
\"column_default\": null
|
||||
}
|
||||
],
|
||||
\"constraints\": [
|
||||
{
|
||||
\"constraint_name\": \"PK_users\",
|
||||
\"constraint_type\": \"PRIMARY KEY\",
|
||||
\"constraint_definition\": \"PRIMARY KEY (id)\",
|
||||
\"constraint_columns\": [\"id\"],
|
||||
\"foreign_key_referenced_table\": null,
|
||||
\"foreign_key_referenced_columns\": []
|
||||
}
|
||||
],
|
||||
\"indexes\": [
|
||||
{
|
||||
\"index_name\": \"idx_users_email\",
|
||||
\"index_type\": \"INDEX\",
|
||||
\"is_unique\": true,
|
||||
\"is_null_filtered\": false,
|
||||
\"interleaved_in_table\": null,
|
||||
\"index_key_columns\": [
|
||||
{\"column_name\": \"email\", \"ordering\": \"ASC\"}
|
||||
],
|
||||
\"storing_columns\": []
|
||||
}
|
||||
]
|
||||
}"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. **Database Documentation**: Generate comprehensive documentation of your
|
||||
database schema
|
||||
2. **Schema Validation**: Verify that expected tables and columns exist
|
||||
3. **Migration Planning**: Understand the current schema before making changes
|
||||
4. **Development Tools**: Build tools that need to understand database structure
|
||||
5. **Audit and Compliance**: Track schema changes and ensure compliance with
|
||||
data governance policies
|
||||
|
||||
## Example with Agent Integration
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
spanner-db:
|
||||
kind: spanner
|
||||
project: my-project
|
||||
instance: my-instance
|
||||
database: my-database
|
||||
dialect: googlesql
|
||||
|
||||
tools:
|
||||
schema_inspector:
|
||||
kind: spanner-list-tables
|
||||
source: spanner-db
|
||||
description: |
|
||||
Use this tool to inspect database schema information.
|
||||
You can:
|
||||
- List all tables by leaving table_names empty
|
||||
- Get specific table schemas by providing comma-separated table names
|
||||
- Choose between simple (names only) or detailed (full schema) output
|
||||
|
||||
Examples:
|
||||
1. List all tables with details: {"output_format": "detailed"}
|
||||
2. Get specific tables: {"table_names": "users,orders", "output_format": "detailed"}
|
||||
3. Just get table names: {"output_format": "simple"}
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|---------------|:--------:|:------------:|--------------------------------------------------------------------|
|
||||
| kind | string | true | Must be "spanner-list-tables" |
|
||||
| source | string | true | Name of the Spanner source to query |
|
||||
| description | string | false | Description of the tool that is passed to the LLM |
|
||||
| authRequired | string[] | false | List of auth services required to invoke this tool |
|
||||
|
||||
## Notes
|
||||
|
||||
- This tool is read-only and does not modify any data
|
||||
- The tool automatically handles both GoogleSQL and PostgreSQL dialects
|
||||
- Large databases with many tables may take longer to query
|
||||
39
docs/en/resources/tools/sqlite/sqlite-execute-sql.md
Normal file
39
docs/en/resources/tools/sqlite/sqlite-execute-sql.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "sqlite-execute-sql"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
A "sqlite-execute-sql" tool executes a single SQL statement against a SQLite database.
|
||||
aliases:
|
||||
- /resources/tools/sqlite-execute-sql
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
A `sqlite-execute-sql` tool executes a single SQL statement against a SQLite database.
|
||||
It's compatible with any of the following sources:
|
||||
|
||||
- [sqlite](../../sources/sqlite.md)
|
||||
|
||||
This tool is designed for direct execution of SQL statements. It takes a single `sql` input parameter and runs the SQL statement against the configured SQLite `source`.
|
||||
|
||||
> **Note:** This tool is intended for developer assistant workflows with
|
||||
> human-in-the-loop and shouldn't be used for production agents.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
execute_sql_tool:
|
||||
kind: sqlite-execute-sql
|
||||
source: my-sqlite-db
|
||||
description: Use this tool to execute a SQL statement.
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:------------------------------------------:|:------------:|----------------------------------------------------|
|
||||
| kind | string | true | Must be "sqlite-execute-sql". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | true | Description of the tool that is passed to the LLM. |
|
||||
45
go.mod
45
go.mod
@@ -9,10 +9,10 @@ require (
|
||||
cloud.google.com/go/bigquery v1.70.0
|
||||
cloud.google.com/go/bigtable v1.39.0
|
||||
cloud.google.com/go/cloudsqlconn v1.18.1
|
||||
cloud.google.com/go/dataplex v1.26.0
|
||||
cloud.google.com/go/dataplex v1.27.0
|
||||
cloud.google.com/go/firestore v1.18.0
|
||||
cloud.google.com/go/spanner v1.84.1
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.1
|
||||
cloud.google.com/go/spanner v1.85.1
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.3
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.29.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
@@ -30,11 +30,11 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/jackc/pgx/v5 v5.7.6
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/looker-open-source/sdk-codegen/go v0.25.10
|
||||
github.com/looker-open-source/sdk-codegen/go v0.25.11
|
||||
github.com/microsoft/go-mssqldb v1.9.3
|
||||
github.com/nakagami/firebirdsql v0.9.15
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.3
|
||||
github.com/redis/go-redis/v9 v9.13.0
|
||||
github.com/redis/go-redis/v9 v9.14.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/thlib/go-timezone-local v0.0.7
|
||||
github.com/trinodb/trino-go-client v0.328.0
|
||||
@@ -42,21 +42,21 @@ require (
|
||||
github.com/yugabyte/pgx/v5 v5.5.3-yb-5
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
go.opentelemetry.io/contrib/propagators/autoprop v0.62.0
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0
|
||||
go.opentelemetry.io/otel/metric v1.37.0
|
||||
go.opentelemetry.io/otel/metric v1.38.0
|
||||
go.opentelemetry.io/otel/sdk v1.37.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0
|
||||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
golang.org/x/oauth2 v0.31.0
|
||||
google.golang.org/api v0.248.0
|
||||
google.golang.org/api v0.249.0
|
||||
google.golang.org/genproto v0.0.0-20250826171959-ef028d996bc1
|
||||
modernc.org/sqlite v1.38.2
|
||||
modernc.org/sqlite v1.39.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/ch-go v0.67.0 // indirect
|
||||
github.com/ClickHouse/ch-go v0.68.0 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
@@ -65,8 +65,6 @@ require (
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
|
||||
gonum.org/v1/gonum v0.16.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -82,7 +80,7 @@ require (
|
||||
cloud.google.com/go/trace v1.11.6 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
@@ -100,7 +98,7 @@ require (
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
@@ -164,18 +162,19 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/mod v0.26.0 // indirect
|
||||
golang.org/x/net v0.43.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect
|
||||
google.golang.org/grpc v1.74.2 // indirect
|
||||
google.golang.org/grpc v1.75.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
modernc.org/libc v1.66.3 // indirect
|
||||
|
||||
96
go.sum
96
go.sum
@@ -236,8 +236,8 @@ cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX
|
||||
cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
|
||||
cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
|
||||
cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
|
||||
cloud.google.com/go/dataplex v1.26.0 h1:nu8/KrLR5v62L1lApGNgm61Oq+xaa2bS9rgc1csjqE0=
|
||||
cloud.google.com/go/dataplex v1.26.0/go.mod h1:12R9nlLUzxOscbb2HgoYnkGNibmv4sXEVMXxrdw2a90=
|
||||
cloud.google.com/go/dataplex v1.27.0 h1:k6gf5DnX7YHD/hilShxVP9ExmGrEWZFjfBZ7rHt4JlM=
|
||||
cloud.google.com/go/dataplex v1.27.0/go.mod h1:VB+xlYJiJ5kreonXsa2cHPj0A3CfPh/mgiHG4JFhbUA=
|
||||
cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
|
||||
cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
|
||||
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
|
||||
@@ -544,8 +544,8 @@ cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+
|
||||
cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
|
||||
cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
|
||||
cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
|
||||
cloud.google.com/go/spanner v1.84.1 h1:ShH4Y3YeDtmHa55dFiSS3YtQ0dmCuP0okfAoHp/d68w=
|
||||
cloud.google.com/go/spanner v1.84.1/go.mod h1:3GMEIjOcXINJSvb42H3M6TdlGCDzaCFpiiNQpjHPlCM=
|
||||
cloud.google.com/go/spanner v1.85.1 h1:cJx1ZD//C2QIfFQl8hSTn4twL8amAXtnayyflRIjj40=
|
||||
cloud.google.com/go/spanner v1.85.1/go.mod h1:bbwCXbM+zljwSPLZ44wZOdzcdmy89hbUGmM/r9sD0ws=
|
||||
cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
|
||||
cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
|
||||
cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
|
||||
@@ -655,14 +655,14 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJ
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/ch-go v0.67.0 h1:18MQF6vZHj+4/hTRaK7JbS/TIzn4I55wC+QzO24uiqc=
|
||||
github.com/ClickHouse/ch-go v0.67.0/go.mod h1:2MSAeyVmgt+9a2k2SQPPG1b4qbTPzdGDpf1+bcHh+18=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.1 h1:PbwsHBgqXRydU7jKULD1C8CHmifczffvQqmFvltM2W4=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.1/go.mod h1:GDzSBLVhladVm8V01aEB36IoBOVLLICfyeuiIp/8Ezc=
|
||||
github.com/ClickHouse/ch-go v0.68.0 h1:zd2VD8l2aVYnXFRyhTyKCrxvhSz1AaY4wBUXu/f0GiU=
|
||||
github.com/ClickHouse/ch-go v0.68.0/go.mod h1:C89Fsm7oyck9hr6rRo5gqqiVtaIY6AjdD0WFMyNRQ5s=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.3 h1:46jB4kKwVDUOnECpStKMVXxvR0Cg9zeV9vdbPjtn6po=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.40.3/go.mod h1:qO0HwvjCnTB4BPL/k6EE3l4d9f/uF+aoimAhJX70eKA=
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3 h1:2afWGsMzkIcN8Qm4mgPJKZWyroE5QBszMiDMYEBrnfw=
|
||||
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.29.0 h1:YVtMlmfRUTaWs3+1acwMBp7rBUo6zrxl6Kn13/R9YW4=
|
||||
@@ -802,8 +802,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwenxRM7/rLu8=
|
||||
github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
|
||||
github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -866,8 +866,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-goquery/goquery v1.0.1 h1:kpchVA1LdOFWdRpkDPESVdlb1JQI6ixsJ5MiNUITO7U=
|
||||
github.com/go-goquery/goquery v1.0.1/go.mod h1:W5s8OWbqWf6lG0LkXWBeh7U1Y/X5XTI0Br65MHF8uJk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
|
||||
github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
|
||||
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
|
||||
@@ -895,7 +895,6 @@ github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
|
||||
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
@@ -1121,8 +1120,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/looker-open-source/sdk-codegen/go v0.25.10 h1:ltBbwkwZrQEHEIKrE5QbF+EtBlweKN0RZpQR0w2GIqo=
|
||||
github.com/looker-open-source/sdk-codegen/go v0.25.10/go.mod h1:YM/IYSsTPk7I54j4l6PduNJYgXyOShuaMi7mD6xic8E=
|
||||
github.com/looker-open-source/sdk-codegen/go v0.25.11 h1:IPxG3eTqz8ICd1ImqffEKQVd8G9/IAbjH7dg4IhiQtU=
|
||||
github.com/looker-open-source/sdk-codegen/go v0.25.11/go.mod h1:Br1ntSiruDJ/4nYNjpYyWyCbqJ7+GQceWbIgn0hYims=
|
||||
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
|
||||
@@ -1195,8 +1194,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/redis/go-redis/v9 v9.13.0 h1:PpmlVykE0ODh8P43U0HqC+2NXHXwG+GUtQyz+MPKGRg=
|
||||
github.com/redis/go-redis/v9 v9.13.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/redis/go-redis/v9 v9.14.0 h1:u4tNCjXOyzfgeLN+vAZaW1xUooqWDqVEsZN0U01jfAE=
|
||||
github.com/redis/go-redis/v9 v9.14.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
@@ -1242,8 +1241,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/thlib/go-timezone-local v0.0.7 h1:fX8zd3aJydqLlTs/TrROrIIdztzsdFV23OzOQx31jII=
|
||||
github.com/thlib/go-timezone-local v0.0.7/go.mod h1:/Tnicc6m/lsJE0irFMA0LfIwTBo4QP7A8IfyIv4zZKI=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
@@ -1317,22 +1316,22 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.37.0 h1:pW+qDVo0jB0rLsNeaP85xL
|
||||
go.opentelemetry.io/contrib/propagators/jaeger v1.37.0/go.mod h1:x7bd+t034hxLTve1hF9Yn9qQJlO/pP8H5pWIt7+gsFM=
|
||||
go.opentelemetry.io/contrib/propagators/ot v1.37.0 h1:tVjnBF6EiTDMXoq2Xuc2vK0I7MTbEs05II/0j9mMK+E=
|
||||
go.opentelemetry.io/contrib/propagators/ot v1.37.0/go.mod h1:MQjyNXtxAC8PGN9gzPtO4GY5zuP+RI3XX53uWbCTvEQ=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 h1:9PgnL3QNlj10uGxExowIDIZu66aVBwWhXmbOp1pa6RA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0/go.mod h1:0ineDcLELf6JmKfuo0wvvhAVMuxWFYvkTin2iV4ydPQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
@@ -1348,6 +1347,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -1363,8 +1364,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1427,8 +1428,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1492,8 +1493,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1545,8 +1546,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1632,8 +1633,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -1668,8 +1669,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1744,8 +1745,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1823,8 +1824,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
|
||||
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
|
||||
google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
|
||||
google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
|
||||
google.golang.org/api v0.248.0 h1:hUotakSkcwGdYUqzCRc5yGYsg4wXxpkKlW5ryVqvC1Y=
|
||||
google.golang.org/api v0.248.0/go.mod h1:yAFUAF56Li7IuIQbTFoLwXTCI6XCFKueOlS7S9e4F9k=
|
||||
google.golang.org/api v0.249.0 h1:0VrsWAKzIZi058aeq+I86uIXbNhm9GxSHpbmZ92a38w=
|
||||
google.golang.org/api v0.249.0/go.mod h1:dGk9qyI0UYPwO/cjt2q06LG/EhUpwZGdAbYF14wHHrQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -2012,8 +2013,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
|
||||
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
|
||||
google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
|
||||
google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -2039,7 +2040,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -2108,8 +2108,8 @@ modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
|
||||
modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
|
||||
modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
||||
modernc.org/sqlite v1.39.0 h1:6bwu9Ooim0yVYA7IZn9demiQk/Ejp0BtTjBWFLymSeY=
|
||||
modernc.org/sqlite v1.39.0/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
||||
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
||||
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
|
||||
@@ -22,11 +22,18 @@ import (
|
||||
|
||||
var expectedToolSources = []string{
|
||||
"alloydb-postgres-admin",
|
||||
"alloydb-postgres-observability",
|
||||
"alloydb-postgres",
|
||||
"bigquery",
|
||||
"clickhouse",
|
||||
"cloud-sql-mssql-admin",
|
||||
"cloud-sql-mssql-observability",
|
||||
"cloud-sql-mssql",
|
||||
"cloud-sql-mysql-admin",
|
||||
"cloud-sql-mysql-observability",
|
||||
"cloud-sql-mysql",
|
||||
"cloud-sql-postgres-admin",
|
||||
"cloud-sql-postgres-observability",
|
||||
"cloud-sql-postgres",
|
||||
"dataplex",
|
||||
"firestore",
|
||||
@@ -38,6 +45,7 @@ var expectedToolSources = []string{
|
||||
"postgres",
|
||||
"spanner-postgres",
|
||||
"spanner",
|
||||
"sqlite",
|
||||
}
|
||||
|
||||
func TestGetPrebuiltSources(t *testing.T) {
|
||||
@@ -85,11 +93,18 @@ func TestLoadPrebuiltToolYAMLs(t *testing.T) {
|
||||
|
||||
func TestGetPrebuiltTool(t *testing.T) {
|
||||
alloydb_admin_config, _ := Get("alloydb-postgres-admin")
|
||||
alloydb_observability_config, _ := Get("alloydb-postgres-observability")
|
||||
alloydb_config, _ := Get("alloydb-postgres")
|
||||
bigquery_config, _ := Get("bigquery")
|
||||
clickhouse_config, _ := Get("clickhouse")
|
||||
cloudsqlpg_observability_config, _ := Get("cloud-sql-postgres-observability")
|
||||
cloudsqlpg_config, _ := Get("cloud-sql-postgres")
|
||||
cloudsqlpg_admin_config, _ := Get("cloud-sql-postgres-admin")
|
||||
cloudsqlmysql_admin_config, _ := Get("cloud-sql-mysql-admin")
|
||||
cloudsqlmssql_admin_config, _ := Get("cloud-sql-mssql-admin")
|
||||
cloudsqlmysql_observability_config, _ := Get("cloud-sql-mysql-observability")
|
||||
cloudsqlmysql_config, _ := Get("cloud-sql-mysql")
|
||||
cloudsqlmssql_observability_config, _ := Get("cloud-sql-mssql-observability")
|
||||
cloudsqlmssql_config, _ := Get("cloud-sql-mssql")
|
||||
dataplex_config, _ := Get("dataplex")
|
||||
firestoreconfig, _ := Get("firestore")
|
||||
@@ -99,6 +114,7 @@ func TestGetPrebuiltTool(t *testing.T) {
|
||||
postgresconfig, _ := Get("postgres")
|
||||
spanner_config, _ := Get("spanner")
|
||||
spannerpg_config, _ := Get("spanner-postgres")
|
||||
sqlite_config, _ := Get("sqlite")
|
||||
neo4jconfig, _ := Get("neo4j")
|
||||
if len(alloydb_admin_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch alloydb prebuilt tools yaml")
|
||||
@@ -106,18 +122,39 @@ func TestGetPrebuiltTool(t *testing.T) {
|
||||
if len(alloydb_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch alloydb prebuilt tools yaml")
|
||||
}
|
||||
if len(alloydb_observability_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch alloydb-observability prebuilt tools yaml")
|
||||
}
|
||||
if len(bigquery_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch bigquery prebuilt tools yaml")
|
||||
}
|
||||
if len(clickhouse_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch clickhouse prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlpg_observability_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql pg observability prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlpg_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql pg prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlpg_admin_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql pg admin prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlmysql_admin_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql mysql admin prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlmysql_observability_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql mysql observability prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlmysql_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql mysql prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlmssql_observability_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql mssql observability prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlmssql_admin_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql mssql admin prebuilt tools yaml")
|
||||
}
|
||||
if len(cloudsqlmssql_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch cloud sql mssql prebuilt tools yaml")
|
||||
}
|
||||
@@ -145,6 +182,9 @@ func TestGetPrebuiltTool(t *testing.T) {
|
||||
if len(spannerpg_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch spanner pg prebuilt tools yaml")
|
||||
}
|
||||
if len(sqlite_config) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch sqlite prebuilt tools yaml")
|
||||
}
|
||||
if len(neo4jconfig) <= 0 {
|
||||
t.Fatalf("unexpected error: could not fetch neo4j prebuilt tools yaml")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
alloydb-api-source:
|
||||
kind: http
|
||||
@@ -5,8 +19,10 @@ sources:
|
||||
headers:
|
||||
Authorization: Bearer ${API_KEY}
|
||||
Content-Type: application/json
|
||||
alloydb-admin-source:
|
||||
kind: alloydb-admin
|
||||
tools:
|
||||
alloydb-create-cluster:
|
||||
create_cluster:
|
||||
kind: http
|
||||
source: alloydb-api-source
|
||||
method: POST
|
||||
@@ -48,14 +64,14 @@ tools:
|
||||
- name: user
|
||||
type: string
|
||||
description: "The name for the initial superuser. If not provided, it defaults to 'postgres'. The initial database will always be named 'postgres'."
|
||||
alloydb-operations-get:
|
||||
wait_for_operation:
|
||||
kind: alloydb-wait-for-operation
|
||||
description: "This will poll on operations API until the operation is done. For checking operation status we need projectId, locationID and operationId. Once instance is created give follow up steps on how to use the variables to bring data plane MCP server up in local and remote setup."
|
||||
delay: 1s
|
||||
maxDelay: 4m
|
||||
multiplier: 2
|
||||
maxRetries: 10
|
||||
alloydb-create-instance:
|
||||
create_instance:
|
||||
kind: http
|
||||
source: alloydb-api-source
|
||||
method: POST
|
||||
@@ -114,56 +130,19 @@ tools:
|
||||
type: string
|
||||
description: "The full resource name of the primary cluster for a SECONDARY instance. Required only if instanceType is SECONDARY. Otherwise don't ask"
|
||||
default: ""
|
||||
alloydb-list-clusters:
|
||||
kind: http
|
||||
source: alloydb-api-source
|
||||
method: GET
|
||||
path: /v1/projects/{{.projectId}}/locations/{{.locationId}}/clusters
|
||||
list_clusters:
|
||||
kind: alloydb-list-clusters
|
||||
source: alloydb-admin-source
|
||||
description: "Lists all AlloyDB clusters in a given project and location."
|
||||
pathParams:
|
||||
- name: projectId
|
||||
type: string
|
||||
description: "The GCP project ID to list clusters for."
|
||||
- name: locationId
|
||||
type: string
|
||||
description: "The location to list clusters in (e.g., 'us-central1'). Use '-' to list clusters across all locations."
|
||||
default: "-"
|
||||
alloydb-list-instances:
|
||||
kind: http
|
||||
source: alloydb-api-source
|
||||
method: GET
|
||||
path: /v1/projects/{{.projectId}}/locations/{{.locationId}}/clusters/{{.clusterId}}/instances
|
||||
list_instances:
|
||||
kind: alloydb-list-instances
|
||||
source: alloydb-admin-source
|
||||
description: "Lists all AlloyDB instances within a specific cluster."
|
||||
pathParams:
|
||||
- name: projectId
|
||||
type: string
|
||||
description: "The GCP project ID."
|
||||
- name: locationId
|
||||
type: string
|
||||
description: "The location of the cluster (e.g., 'us-central1'). Use '-' to get results for all regions."
|
||||
default: "-"
|
||||
- name: clusterId
|
||||
type: string
|
||||
description: "The ID of the cluster to list instances from. Use '-' to get results for all clusters."
|
||||
default: "-"
|
||||
alloydb-list-users:
|
||||
kind: http
|
||||
source: alloydb-api-source
|
||||
method: GET
|
||||
path: /v1/projects/{{.projectId}}/locations/{{.locationId}}/clusters/{{.clusterId}}/users
|
||||
list_users:
|
||||
kind: alloydb-list-users
|
||||
source: alloydb-admin-source
|
||||
description: "Lists all database users within a specific AlloyDB cluster."
|
||||
pathParams:
|
||||
- name: projectId
|
||||
type: string
|
||||
description: "The GCP project ID."
|
||||
- name: locationId
|
||||
type: string
|
||||
description: "The location of the cluster (e.g., 'us-central1')."
|
||||
default: us-central1
|
||||
- name: clusterId
|
||||
type: string
|
||||
description: "The ID of the cluster to list users from."
|
||||
alloydb-create-user:
|
||||
create_user:
|
||||
kind: http
|
||||
source: alloydb-api-source
|
||||
method: POST
|
||||
@@ -210,13 +189,28 @@ tools:
|
||||
type: string
|
||||
description: "The type of user to create. Valid values are: USER_TYPE_UNSPECIFIED, ALLOYDB_BUILT_IN, ALLOYDB_IAM_USER."
|
||||
default: "ALLOYDB_BUILT_IN"
|
||||
get_cluster:
|
||||
kind: alloydb-get-cluster
|
||||
source: alloydb-admin-source
|
||||
description: "Retrieves details of a specific AlloyDB cluster."
|
||||
get_instance:
|
||||
kind: alloydb-get-instance
|
||||
source: alloydb-admin-source
|
||||
description: "Retrieves details of a specific AlloyDB instance."
|
||||
get_user:
|
||||
kind: alloydb-get-user
|
||||
source: alloydb-admin-source
|
||||
description: "Retrieves details of a specific AlloyDB user."
|
||||
|
||||
toolsets:
|
||||
alloydb-postgres-admin-tools:
|
||||
- alloydb-create-cluster
|
||||
- alloydb-operations-get
|
||||
- alloydb-create-instance
|
||||
- alloydb-list-clusters
|
||||
- alloydb-list-instances
|
||||
- alloydb-list-users
|
||||
- alloydb-create-user
|
||||
alloydb_postgres_admin_tools:
|
||||
- create_cluster
|
||||
- wait_for_operation
|
||||
- create_instance
|
||||
- list_clusters
|
||||
- list_instances
|
||||
- list_users
|
||||
- create_user
|
||||
- get_cluster
|
||||
- get_instance
|
||||
- get_user
|
||||
|
||||
@@ -0,0 +1,121 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
cloud-monitoring-source:
|
||||
kind: cloud-monitoring
|
||||
tools:
|
||||
get_system_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches system level cloudmonitoring data (timeseries metrics) for an AlloyDB cluster, instance.
|
||||
To use this tool, you must provide the Google Cloud `projectID` and a PromQL `query`.
|
||||
|
||||
Generate the PromQL `query` for AlloyDB system metrics using the provided metrics and rules. Get labels like `cluster_id` and `instance_id` from the user's intent.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="my-instance","cluster_id"="my-cluster"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
8. Percentile with groupby on instanceid, clusterid: `quantile by ("instance_id","cluster_id")(0.99,avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","cluster_id"="my-cluster","instance_id"="my-instance"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels
|
||||
1. `alloydb.googleapis.com/instance/cpu/average_utilization`: The percentage of CPU being used on an instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
2. `alloydb.googleapis.com/instance/cpu/maximum_utilization`: Maximum CPU utilization across all currently serving nodes of the instance from 0 to 100. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
3. `alloydb.googleapis.com/cluster/storage/usage`: The total AlloyDB storage in bytes across the entire cluster. `alloydb.googleapis.com/Cluster`. `cluster_id`.
|
||||
4. `alloydb.googleapis.com/instance/postgres/replication/replicas`: The number of read replicas connected to the primary instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `state`, `replica_instance_id`.
|
||||
5. `alloydb.googleapis.com/instance/postgres/replication/maximum_lag`: The maximum replication time lag calculated across all serving read replicas of the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `replica_instance_id`.
|
||||
6. `alloydb.googleapis.com/instance/memory/min_available_memory`: The minimum available memory across all currently serving nodes of the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
7. `alloydb.googleapis.com/instance/postgres/instances`: The number of nodes in the instance, along with their status, which can be either up or down. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `status`.
|
||||
8. `alloydb.googleapis.com/database/postgresql/tuples`: Number of tuples (rows) by state per database in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`, `state`.
|
||||
9. `alloydb.googleapis.com/database/postgresql/temp_bytes_written_for_top_databases`: The total amount of data(in bytes) written to temporary files by the queries per database for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
10. `alloydb.googleapis.com/database/postgresql/temp_files_written_for_top_databases`: The number of temporary files used for writing data per database while performing internal algorithms like join, sort etc for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
11. `alloydb.googleapis.com/database/postgresql/inserted_tuples_count_for_top_databases`: The total number of rows inserted per db for top 500 dbs as a result of the queries in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
12. `alloydb.googleapis.com/database/postgresql/updated_tuples_count_for_top_databases`: The total number of rows updated per db for top 500 dbs as a result of the queries in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
13. `alloydb.googleapis.com/database/postgresql/deleted_tuples_count_for_top_databases`: The total number of rows deleted per db for top 500 dbs as a result of the queries in the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
14. `alloydb.googleapis.com/database/postgresql/backends_for_top_databases`: The current number of connections per database to the instance for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
15. `alloydb.googleapis.com/instance/postgresql/backends_by_state`: The current number of connections to the instance grouped by the state like idle, active, idle_in_transaction, idle_in_transaction_aborted, disabled, and fastpath_function_call. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `state`.
|
||||
16. `alloydb.googleapis.com/instance/postgresql/backends_for_top_applications`: The current number of connections to the AlloyDB instance, grouped by applications for top 500 applications. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `application_name`.
|
||||
17. `alloydb.googleapis.com/database/postgresql/new_connections_for_top_databases`: Total number of new connections added per database for top 500 databases to the instance. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
18. `alloydb.googleapis.com/database/postgresql/deadlock_count_for_top_databases`: Total number of deadlocks detected in the instance per database for top 500 dbs. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`.
|
||||
19. `alloydb.googleapis.com/database/postgresql/statements_executed_count`: Total count of statements executed in the instance per database per operation_type. `alloydb.googleapis.com/Database`. `cluster_id`, `instance_id`, `database`, `operation_type`.
|
||||
20. `alloydb.googleapis.com/instance/postgresql/returned_tuples_count`: Number of rows scanned while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
21. `alloydb.googleapis.com/instance/postgresql/fetched_tuples_count`: Number of rows fetched while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
22. `alloydb.googleapis.com/instance/postgresql/updated_tuples_count`: Number of rows updated while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
23. `alloydb.googleapis.com/instance/postgresql/inserted_tuples_count`: Number of rows inserted while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
24. `alloydb.googleapis.com/instance/postgresql/deleted_tuples_count`: Number of rows deleted while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
25. `alloydb.googleapis.com/instance/postgresql/written_tuples_count`: Number of rows written while processing the queries in the instance since the last sample. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
26. `alloydb.googleapis.com/instance/postgresql/deadlock_count`: Number of deadlocks detected in the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
27. `alloydb.googleapis.com/instance/postgresql/blks_read`: Number of blocks read by Postgres that were not in the buffer cache. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
28. `alloydb.googleapis.com/instance/postgresql/blks_hit`: Number of times Postgres found the requested block in the buffer cache. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
29. `alloydb.googleapis.com/instance/postgresql/temp_bytes_written_count`: The total amount of data(in bytes) written to temporary files by the queries while performing internal algorithms like join, sort etc. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
30. `alloydb.googleapis.com/instance/postgresql/temp_files_written_count`: The number of temporary files used for writing data in the instance while performing internal algorithms like join, sort etc. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
31. `alloydb.googleapis.com/instance/postgresql/new_connections_count`: The number new connections added to the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
32. `alloydb.googleapis.com/instance/postgresql/wait_count`: Total number of times processes waited for each wait event in the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `wait_event_type`, `wait_event_name`.
|
||||
33. `alloydb.googleapis.com/instance/postgresql/wait_time`: Total elapsed wait time for each wait event in the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`, `wait_event_type`, `wait_event_name`.
|
||||
34. `alloydb.googleapis.com/instance/postgres/transaction_count`: The number of committed and rolled back transactions across all serving nodes of the instance. `alloydb.googleapis.com/Instance`. `cluster_id`, `instance_id`.
|
||||
|
||||
get_query_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches query level cloudmonitoring data (timeseries metrics) for queries running in an AlloyDB instance.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate the PromQL `query` for AlloyDB query metrics using the provided metrics and rules. Get labels like `cluster_id`, `instance_id`, and `query_hash` from the user's intent. If `query_hash` is provided, use the per-query metrics.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="my-instance","cluster_id"="my-cluster"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","instance_id"="alloydb-instance","cluster_id"="alloydb-cluster"}[5m]))`
|
||||
8. Percentile with groupby on instanceid, clusterid: `quantile by ("instance_id","cluster_id")(0.99,avg_over_time({"__name__"="alloydb.googleapis.com/instance/cpu/average_utilization","monitored_resource"="alloydb.googleapis.com/Instance","cluster_id"="my-cluster","instance_id"="my-instance"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. aggregate is the aggregated values for all query stats, Use aggregate metrics if query id is not provided. For perquery metrics do not fetch querystring unless specified by user specifically. Have the aggregation on query hash to avoid fetching the querystring. Do not use latency metrics for anything.
|
||||
1. `alloydb.googleapis.com/database/postgresql/insights/aggregate/latencies`: Aggregated query latency distribution. `alloydb.googleapis.com/Database`. `user`, `client_addr`.
|
||||
2. `alloydb.googleapis.com/database/postgresql/insights/aggregate/execution_time`: Accumulated aggregated query execution time since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`.
|
||||
3. `alloydb.googleapis.com/database/postgresql/insights/aggregate/io_time`: Accumulated aggregated IO time since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `io_type`.
|
||||
4. `alloydb.googleapis.com/database/postgresql/insights/aggregate/lock_time`: Accumulated aggregated lock wait time since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `lock_type`.
|
||||
5. `alloydb.googleapis.com/database/postgresql/insights/aggregate/row_count`: Aggregated number of retrieved or affected rows since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`.
|
||||
6. `alloydb.googleapis.com/database/postgresql/insights/aggregate/shared_blk_access_count`: Aggregated shared blocks accessed by statement execution. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `access_type`.
|
||||
7. `alloydb.googleapis.com/database/postgresql/insights/perquery/latencies`: Per query latency distribution. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `querystring`, `query_hash`.
|
||||
8. `alloydb.googleapis.com/database/postgresql/insights/perquery/execution_time`: Accumulated execution times per user per database per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `querystring`, `query_hash`.
|
||||
9. `alloydb.googleapis.com/database/postgresql/insights/perquery/io_time`: Accumulated IO time since the last sample per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `io_type`, `querystring`, `query_hash`.
|
||||
10. `alloydb.googleapis.com/database/postgresql/insights/perquery/lock_time`: Accumulated lock wait time since the last sample per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `lock_type`, `querystring`, `query_hash`.
|
||||
11. `alloydb.googleapis.com/database/postgresql/insights/perquery/row_count`: The number of retrieved or affected rows since the last sample per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `querystring`, `query_hash`.
|
||||
12. `alloydb.googleapis.com/database/postgresql/insights/perquery/shared_blk_access_count`: Shared blocks accessed by statement execution per query. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `access_type`, `querystring`, `query_hash`.
|
||||
13. `alloydb.googleapis.com/database/postgresql/insights/pertag/latencies`: Query latency distribution. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`.
|
||||
14. `alloydb.googleapis.com/database/postgresql/insights/pertag/execution_time`: Accumulated execution times since the last sample. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`.
|
||||
15. `alloydb.googleapis.com/database/postgresql/insights/pertag/io_time`: Accumulated IO time since the last sample per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `io_type`, `tag_hash`.
|
||||
16. `alloydb.googleapis.com/database/postgresql/insights/pertag/lock_time`: Accumulated lock wait time since the last sample per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `lock_type`, `tag_hash`.
|
||||
17. `alloydb.googleapis.com/database/postgresql/insights/pertag/shared_blk_access_count`: Shared blocks accessed by statement execution per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `access_type`, `tag_hash`.
|
||||
18. `alloydb.googleapis.com/database/postgresql/insights/pertag/row_count`: The number of retrieved or affected rows since the last sample per tag. `alloydb.googleapis.com/Database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`.
|
||||
|
||||
toolsets:
|
||||
alloydb_postgres_cloud_monitoring_tools:
|
||||
- get_system_metrics
|
||||
- get_query_metrics
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
alloydb-pg-source:
|
||||
kind: "alloydb-postgres"
|
||||
@@ -30,95 +31,11 @@ tools:
|
||||
description: Use this tool to execute sql.
|
||||
|
||||
list_tables:
|
||||
kind: postgres-sql
|
||||
kind: postgres-list-tables
|
||||
source: alloydb-pg-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
alloydb-postgres-database-tools:
|
||||
alloydb_postgres_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
|
||||
@@ -11,13 +11,20 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
bigquery-source:
|
||||
kind: "bigquery"
|
||||
project: ${BIGQUERY_PROJECT}
|
||||
location: ${BIGQUERY_LOCATION:}
|
||||
useClientOAuth: ${BIGQUERY_USE_CLIENT_OAUTH:false}
|
||||
|
||||
tools:
|
||||
analyze_contribution:
|
||||
kind: bigquery-analyze-contribution
|
||||
source: bigquery-source
|
||||
description: Use this tool to analyze the contribution about changes to key metrics in multi-dimensional data.
|
||||
|
||||
ask_data_insights:
|
||||
kind: bigquery-conversational-analytics
|
||||
source: bigquery-source
|
||||
@@ -57,7 +64,8 @@ tools:
|
||||
description: Use this tool to list tables.
|
||||
|
||||
toolsets:
|
||||
bigquery-database-tools:
|
||||
bigquery_database_tools:
|
||||
- analyze_contribution
|
||||
- ask_data_insights
|
||||
- execute_sql
|
||||
- forecast
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
clickhouse-source:
|
||||
kind: clickhouse
|
||||
@@ -14,6 +27,12 @@ tools:
|
||||
source: clickhouse-source
|
||||
description: Use this tool to execute SQL.
|
||||
|
||||
list_databases:
|
||||
kind: clickhouse-list-databases
|
||||
source: clickhouse-source
|
||||
description: Use this tool to list all databases in ClickHouse.
|
||||
|
||||
toolsets:
|
||||
clickhouse-database-tools:
|
||||
clickhouse_database_tools:
|
||||
- execute_sql
|
||||
- list_databases
|
||||
|
||||
50
internal/prebuiltconfigs/tools/cloud-sql-mssql-admin.yaml
Normal file
50
internal/prebuiltconfigs/tools/cloud-sql-mssql-admin.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloud-sql-admin-source:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
tools:
|
||||
create_instance:
|
||||
kind: cloud-sql-mssql-create-instance
|
||||
source: cloud-sql-admin-source
|
||||
get_instance:
|
||||
kind: cloud-sql-get-instance
|
||||
source: cloud-sql-admin-source
|
||||
list_instances:
|
||||
kind: cloud-sql-list-instances
|
||||
source: cloud-sql-admin-source
|
||||
create_database:
|
||||
kind: cloud-sql-create-database
|
||||
source: cloud-sql-admin-source
|
||||
list_databases:
|
||||
kind: cloud-sql-list-databases
|
||||
source: cloud-sql-admin-source
|
||||
create_user:
|
||||
kind: cloud-sql-create-users
|
||||
source: cloud-sql-admin-source
|
||||
wait_for_operation:
|
||||
kind: cloud-sql-wait-for-operation
|
||||
source: cloud-sql-admin-source
|
||||
|
||||
toolsets:
|
||||
cloud_sql_mssql_admin_tools:
|
||||
- create_instance
|
||||
- get_instance
|
||||
- list_instances
|
||||
- create_database
|
||||
- list_databases
|
||||
- create_user
|
||||
- wait_for_operation
|
||||
@@ -0,0 +1,76 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
cloud-monitoring-source:
|
||||
kind: cloud-monitoring
|
||||
tools:
|
||||
get_system_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches system level cloudmonitoring data (timeseries metrics) for a SqlServer instance using a PromQL query. Take projectId and instanceId from the user for which the metrics timeseries data needs to be fetched.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate PromQL `query` for SqlServer system metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id` from user intent.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
8. Percentile with groupby on database_id: `quantile by ("database_id")(0.99,avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. database_id is actually the instance id and the format is `project_id:instance_id`.
|
||||
1. `cloudsql.googleapis.com/database/cpu/utilization`: Current CPU utilization as a percentage of the reserved CPU. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
2. `cloudsql.googleapis.com/database/memory/usage`: RAM usage in bytes, excluding buffer/cache. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
3. `cloudsql.googleapis.com/database/memory/total_usage`: Total RAM usage in bytes, including buffer/cache. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
4. `cloudsql.googleapis.com/database/disk/bytes_used`: Data utilization in bytes. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
5. `cloudsql.googleapis.com/database/disk/quota`: Maximum data disk size in bytes. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
6. `cloudsql.googleapis.com/database/disk/read_ops_count`: Delta count of data disk read IO operations. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
7. `cloudsql.googleapis.com/database/disk/write_ops_count`: Delta count of data disk write IO operations. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
8. `cloudsql.googleapis.com/database/network/received_bytes_count`: Delta count of bytes received through the network. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
9. `cloudsql.googleapis.com/database/network/sent_bytes_count`: Delta count of bytes sent through the network. `cloudsql_database`. `destination`, `database`, `project_id`, `database_id`.
|
||||
10. `cloudsql.googleapis.com/database/sqlserver/memory/buffer_cache_hit_ratio`: Current percentage of pages found in the buffer cache without reading from disk. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
11. `cloudsql.googleapis.com/database/sqlserver/memory/memory_grants_pending`: Current number of processes waiting for a workspace memory grant. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
12. `cloudsql.googleapis.com/database/sqlserver/memory/free_list_stall_count`: Total number of requests that waited for a free page. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
13. `cloudsql.googleapis.com/database/swap/pages_swapped_in_count`: Total count of pages swapped in from disk since the system was booted. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
14. `cloudsql.googleapis.com/database/swap/pages_swapped_out_count`: Total count of pages swapped out to disk since the system was booted. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
15. `cloudsql.googleapis.com/database/sqlserver/memory/checkpoint_page_count`: Total number of pages flushed to disk by a checkpoint. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
16. `cloudsql.googleapis.com/database/sqlserver/memory/lazy_write_count`: Total number of buffers written by the buffer manager's lazy writer. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
17. `cloudsql.googleapis.com/database/sqlserver/memory/page_life_expectancy`: Current number of seconds a page will stay in the buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
18. `cloudsql.googleapis.com/database/sqlserver/memory/page_operation_count`: Total number of physical database page reads or writes. `cloudsql_database`. `operation`, `database`, `project_id`, `database_id`.
|
||||
19. `cloudsql.googleapis.com/database/sqlserver/transactions/page_split_count`: Total number of page splits from overflowing index pages. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
20. `cloudsql.googleapis.com/database/sqlserver/transactions/deadlock_count`: Total number of lock requests that resulted in a deadlock. `cloudsql_database`. `locked_resource`, `database`, `project_id`, `database_id`.
|
||||
21. `cloudsql.googleapis.com/database/sqlserver/transactions/transaction_count`: Total number of transactions started. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
22. `cloudsql.googleapis.com/database/sqlserver/transactions/batch_request_count`: Total number of Transact-SQL command batches received. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
23. `cloudsql.googleapis.com/database/sqlserver/transactions/sql_compilation_count`: Total number of SQL compilations. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
24. `cloudsql.googleapis.com/database/sqlserver/transactions/sql_recompilation_count`: Total number of SQL recompilations. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
25. `cloudsql.googleapis.com/database/sqlserver/connections/processes_blocked`: Current number of blocked processes. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
26. `cloudsql.googleapis.com/database/sqlserver/transactions/lock_wait_time`: Total time lock requests were waiting for locks. `cloudsql_database`. `locked_resource`, `database`, `project_id`, `database_id`.
|
||||
27. `cloudsql.googleapis.com/database/sqlserver/transactions/lock_wait_count`: Total number of lock requests that required the caller to wait. `cloudsql_database`. `locked_resource`, `database`, `project_id`, `database_id`.
|
||||
28. `cloudsql.googleapis.com/database/network/connections`: Number of connections to databases on the instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
29. `cloudsql.googleapis.com/database/sqlserver/connections/login_attempt_count`: Total number of login attempts since the last server restart. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
30. `cloudsql.googleapis.com/database/sqlserver/connections/logout_count`: Total number of logout operations since the last server restart. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
31. `cloudsql.googleapis.com/database/sqlserver/connections/connection_reset_count`: Total number of logins started from the connection pool since the last server restart. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
32. `cloudsql.googleapis.com/database/sqlserver/transactions/full_scan_count`: Total number of unrestricted full scans (base-table or full-index). `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
|
||||
toolsets:
|
||||
cloud-sql-mssql-cloud-monitoring-tools:
|
||||
- get_system_metrics
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloudsql-mssql-source:
|
||||
kind: cloud-sql-mssql
|
||||
@@ -289,6 +290,6 @@ tools:
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
cloud-sql-mssql-database-tools:
|
||||
cloud_sql_mssql_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
|
||||
50
internal/prebuiltconfigs/tools/cloud-sql-mysql-admin.yaml
Normal file
50
internal/prebuiltconfigs/tools/cloud-sql-mysql-admin.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloud-sql-admin-source:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
tools:
|
||||
create_instance:
|
||||
kind: cloud-sql-mysql-create-instance
|
||||
source: cloud-sql-admin-source
|
||||
get_instance:
|
||||
kind: cloud-sql-get-instance
|
||||
source: cloud-sql-admin-source
|
||||
list_instances:
|
||||
kind: cloud-sql-list-instances
|
||||
source: cloud-sql-admin-source
|
||||
create_database:
|
||||
kind: cloud-sql-create-database
|
||||
source: cloud-sql-admin-source
|
||||
list_databases:
|
||||
kind: cloud-sql-list-databases
|
||||
source: cloud-sql-admin-source
|
||||
create_user:
|
||||
kind: cloud-sql-create-users
|
||||
source: cloud-sql-admin-source
|
||||
wait_for_operation:
|
||||
kind: cloud-sql-wait-for-operation
|
||||
source: cloud-sql-admin-source
|
||||
|
||||
toolsets:
|
||||
cloud_sql_mysql_admin_tools:
|
||||
- create_instance
|
||||
- get_instance
|
||||
- list_instances
|
||||
- create_database
|
||||
- list_databases
|
||||
- create_user
|
||||
- wait_for_operation
|
||||
@@ -0,0 +1,111 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
cloud-monitoring-source:
|
||||
kind: cloud-monitoring
|
||||
tools:
|
||||
get_system_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches system level cloudmonitoring data (timeseries metrics) for a MySQL instance using a PromQL query. Take projectId and instanceId from the user for which the metrics timeseries data needs to be fetched.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate PromQL `query` for MySQL system metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id` from user intent.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
8. Percentile with groupby on database_id: `quantile by ("database_id")(0.99,avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. database_id is actually the instance id and the format is `project_id:instance_id`.
|
||||
1. `cloudsql.googleapis.com/database/cpu/utilization`: Current CPU utilization as a percentage of reserved CPU. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
2. `cloudsql.googleapis.com/database/network/connections`: Number of connections to the database instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
3. `cloudsql.googleapis.com/database/network/received_bytes_count`: Delta count of bytes received through the network. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
4. `cloudsql.googleapis.com/database/network/sent_bytes_count`: Delta count of bytes sent through the network. `cloudsql_database`. `destination`, `database`, `project_id`, `database_id`.
|
||||
5. `cloudsql.googleapis.com/database/memory/components`: Memory usage for components like usage, cache, and free memory. `cloudsql_database`. `component`, `database`, `project_id`, `database_id`.
|
||||
6. `cloudsql.googleapis.com/database/disk/bytes_used_by_data_type`: Data utilization in bytes. `cloudsql_database`. `data_type`, `database`, `project_id`, `database_id`.
|
||||
7. `cloudsql.googleapis.com/database/disk/read_ops_count`: Delta count of data disk read IO operations. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
8. `cloudsql.googleapis.com/database/disk/write_ops_count`: Delta count of data disk write IO operations. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
9. `cloudsql.googleapis.com/database/mysql/queries`: Delta count of statements executed by the server. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
10. `cloudsql.googleapis.com/database/mysql/questions`: Delta count of statements sent by the client. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
11. `cloudsql.googleapis.com/database/mysql/received_bytes_count`: Delta count of bytes received by MySQL process. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
12. `cloudsql.googleapis.com/database/mysql/sent_bytes_count`: Delta count of bytes sent by MySQL process. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
13. `cloudsql.googleapis.com/database/mysql/innodb_buffer_pool_pages_dirty`: Number of unflushed pages in the InnoDB buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
14. `cloudsql.googleapis.com/database/mysql/innodb_buffer_pool_pages_free`: Number of unused pages in the InnoDB buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
15. `cloudsql.googleapis.com/database/mysql/innodb_buffer_pool_pages_total`: Total number of pages in the InnoDB buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
16. `cloudsql.googleapis.com/database/mysql/innodb_data_fsyncs`: Delta count of InnoDB fsync() calls. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
17. `cloudsql.googleapis.com/database/mysql/innodb_os_log_fsyncs`: Delta count of InnoDB fsync() calls to the log file. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
18. `cloudsql.googleapis.com/database/mysql/innodb_pages_read`: Delta count of InnoDB pages read. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
19. `cloudsql.googleapis.com/database/mysql/innodb_pages_written`: Delta count of InnoDB pages written. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
20. `cloudsql.googleapis.com/database/mysql/open_tables`: The number of tables that are currently open. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
21. `cloudsql.googleapis.com/database/mysql/opened_table_count`: The number of tables opened since the last sample. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
22. `cloudsql.googleapis.com/database/mysql/open_table_definitions`: The number of table definitions currently cached. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
23. `cloudsql.googleapis.com/database/mysql/opened_table_definitions_count`: The number of table definitions cached since the last sample. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
24. `cloudsql.googleapis.com/database/mysql/innodb/dictionary_memory`: Memory allocated for the InnoDB dictionary cache. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
|
||||
get_query_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches query level cloudmonitoring data (timeseries metrics) for queries running in Mysql instance using a PromQL query. Take projectID and instanceID from the user for which the metrics timeseries data needs to be fetched.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate PromQL `query` for Mysql query metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id`, `query_hash` from user intent. If query_hash is provided then use the per_query metrics. Query hash and query id are same.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
8. Percentile with groupby on resource_id, database: `quantile by ("resource_id","database")(0.99,avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. resource_id label format is `project_id:instance_id` which is actually instance id only. aggregate is the aggregated values for all query stats, Use aggregate metrics if query id is not provided. For perquery metrics do not fetch querystring unless specified by user specifically. Have the aggregation on query hash to avoid fetching the querystring. Do not use latency metrics for anything.
|
||||
1. `dbinsights.googleapis.com/aggregate/latencies`: Cumulative query latency distribution per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
|
||||
2. `dbinsights.googleapis.com/aggregate/execution_time`: Cumulative query execution time per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
|
||||
3. `dbinsights.googleapis.com/aggregate/execution_count`: Total number of query executions per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
|
||||
4. `dbinsights.googleapis.com/aggregate/lock_time`: Cumulative lock wait time per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `lock_type`, `database`, `project_id`, `resource_id`.
|
||||
5. `dbinsights.googleapis.com/aggregate/io_time`: Cumulative IO wait time per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
|
||||
6. `dbinsights.googleapis.com/aggregate/row_count`: Total number of rows affected during query execution. `cloudsql_instance_database`. `user`, `client_addr`, `row_status`, `database`, `project_id`, `resource_id`.
|
||||
7. `dbinsights.googleapis.com/perquery/latencies`: Cumulative query latency distribution per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
|
||||
8. `dbinsights.googleapis.com/perquery/execution_time`: Cumulative query execution time per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
|
||||
9. `dbinsights.googleapis.com/perquery/execution_count`: Total number of query executions per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
|
||||
10. `dbinsights.googleapis.com/perquery/lock_time`: Cumulative lock wait time per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `lock_type`, `query_hash`, `database`, `project_id`, `resource_id`.
|
||||
11. `dbinsights.googleapis.com/perquery/io_time`: Cumulative io wait time per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
|
||||
12. `dbinsights.googleapis.com/perquery/row_count`: Total number of rows affected during query execution. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `row_status`, `database`, `project_id`, `resource_id`.
|
||||
13. `dbinsights.googleapis.com/pertag/latencies`: Cumulative query latency distribution per user, database, and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
|
||||
14. `dbinsights.googleapis.com/pertag/execution_time`: Cumulative query execution time per user, database, and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
|
||||
15. `dbinsights.googleapis.com/pertag/execution_count`: Total number of query executions per user, database, and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
|
||||
16. `dbinsights.googleapis.com/pertag/lock_time`: Cumulative lock wait time per user, database and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `lock_type`, `tag_hash`, `database`, `project_id`, `resource_id`.
|
||||
17. `dbinsights.googleapis.com/pertag/io_time`: Cumulative IO wait time per user, database and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
|
||||
18. `dbinsights.googleapis.com/pertag/row_count`: Total number of rows affected during query execution. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `row_status`, `database`, `project_id`, `resource_id`.
|
||||
|
||||
toolsets:
|
||||
cloud-sql-mysql-cloud-monitoring-tools:
|
||||
- get_system_metrics
|
||||
- get_query_metrics
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloud-sql-mysql-source:
|
||||
kind: cloud-sql-mysql
|
||||
@@ -7,176 +21,18 @@ sources:
|
||||
database: ${CLOUD_SQL_MYSQL_DATABASE}
|
||||
user: ${CLOUD_SQL_MYSQL_USER}
|
||||
password: ${CLOUD_SQL_MYSQL_PASSWORD}
|
||||
ipType: ${CLOUD_SQL_MYSQL_IP_TYPE:PUBLIC}
|
||||
tools:
|
||||
execute_sql:
|
||||
kind: mysql-execute-sql
|
||||
source: cloud-sql-mysql-source
|
||||
description: Use this tool to execute SQL.
|
||||
list_tables:
|
||||
kind: mysql-sql
|
||||
kind: mysql-list-tables
|
||||
source: cloud-sql-mysql-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
SELECT
|
||||
T.TABLE_SCHEMA AS schema_name,
|
||||
T.TABLE_NAME AS object_name,
|
||||
CASE
|
||||
WHEN @output_format = 'simple' THEN
|
||||
JSON_OBJECT('name', T.TABLE_NAME)
|
||||
ELSE
|
||||
CONVERT( JSON_OBJECT(
|
||||
'schema_name', T.TABLE_SCHEMA,
|
||||
'object_name', T.TABLE_NAME,
|
||||
'object_type', 'TABLE',
|
||||
'owner', (
|
||||
SELECT
|
||||
IFNULL(U.GRANTEE, 'N/A')
|
||||
FROM
|
||||
INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
|
||||
WHERE
|
||||
U.TABLE_SCHEMA = T.TABLE_SCHEMA
|
||||
LIMIT 1
|
||||
),
|
||||
'comment', IFNULL(T.TABLE_COMMENT, ''),
|
||||
'columns', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'column_name', C.COLUMN_NAME,
|
||||
'data_type', C.COLUMN_TYPE,
|
||||
'ordinal_position', C.ORDINAL_POSITION,
|
||||
'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
|
||||
'column_default', C.COLUMN_DEFAULT,
|
||||
'column_comment', IFNULL(C.COLUMN_COMMENT, '')
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.COLUMNS C
|
||||
WHERE
|
||||
C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
|
||||
ORDER BY C.ORDINAL_POSITION
|
||||
),
|
||||
'constraints', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'constraint_name', TC.CONSTRAINT_NAME,
|
||||
'constraint_type',
|
||||
CASE TC.CONSTRAINT_TYPE
|
||||
WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
|
||||
WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
|
||||
WHEN 'UNIQUE' THEN 'UNIQUE'
|
||||
ELSE TC.CONSTRAINT_TYPE
|
||||
END,
|
||||
'constraint_definition', '',
|
||||
'constraint_columns', (
|
||||
SELECT
|
||||
IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
|
||||
FROM
|
||||
INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
|
||||
WHERE
|
||||
KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND KCU.TABLE_NAME = TC.TABLE_NAME
|
||||
ORDER BY KCU.ORDINAL_POSITION
|
||||
),
|
||||
'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
|
||||
'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
|
||||
(SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
|
||||
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
|
||||
WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND FKCU.TABLE_NAME = TC.TABLE_NAME
|
||||
AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
|
||||
ORDER BY FKCU.ORDINAL_POSITION),
|
||||
NULL
|
||||
)
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
|
||||
LEFT JOIN
|
||||
INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
|
||||
ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
|
||||
AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
|
||||
AND TC.TABLE_NAME = RC.TABLE_NAME
|
||||
WHERE
|
||||
TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
|
||||
),
|
||||
'indexes', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'index_name', IndexData.INDEX_NAME,
|
||||
'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
|
||||
'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
|
||||
'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM (
|
||||
SELECT
|
||||
S.TABLE_SCHEMA,
|
||||
S.TABLE_NAME,
|
||||
S.INDEX_NAME,
|
||||
MIN(S.NON_UNIQUE) AS NON_UNIQUE, -- Aggregate NON_UNIQUE here to get unique status for the index
|
||||
JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY -- Aggregate columns into an array for this index
|
||||
FROM
|
||||
INFORMATION_SCHEMA.STATISTICS S
|
||||
WHERE
|
||||
S.TABLE_SCHEMA = T.TABLE_SCHEMA AND S.TABLE_NAME = T.TABLE_NAME
|
||||
GROUP BY
|
||||
S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
|
||||
) AS IndexData
|
||||
ORDER BY IndexData.INDEX_NAME
|
||||
),
|
||||
'triggers', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'trigger_name', TR.TRIGGER_NAME,
|
||||
'trigger_definition', TR.ACTION_STATEMENT
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TRIGGERS TR
|
||||
WHERE
|
||||
TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
|
||||
ORDER BY TR.TRIGGER_NAME
|
||||
)
|
||||
) USING utf8mb4)
|
||||
END AS object_details
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLES T
|
||||
CROSS JOIN (SELECT @table_names := ?, @output_format := ?) AS variables
|
||||
WHERE
|
||||
T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
|
||||
AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
|
||||
AND T.TABLE_TYPE = 'BASE TABLE'
|
||||
ORDER BY
|
||||
T.TABLE_SCHEMA, T.TABLE_NAME;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
default: ""
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
cloud-sql-mysql-database-tools:
|
||||
cloud_sql_mysql_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
- list_tables
|
||||
|
||||
50
internal/prebuiltconfigs/tools/cloud-sql-postgres-admin.yaml
Normal file
50
internal/prebuiltconfigs/tools/cloud-sql-postgres-admin.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloud-sql-admin-source:
|
||||
kind: cloud-sql-admin
|
||||
|
||||
tools:
|
||||
create_instance:
|
||||
kind: cloud-sql-postgres-create-instance
|
||||
source: cloud-sql-admin-source
|
||||
get_instance:
|
||||
kind: cloud-sql-get-instance
|
||||
source: cloud-sql-admin-source
|
||||
list_instances:
|
||||
kind: cloud-sql-list-instances
|
||||
source: cloud-sql-admin-source
|
||||
create_database:
|
||||
kind: cloud-sql-create-database
|
||||
source: cloud-sql-admin-source
|
||||
list_databases:
|
||||
kind: cloud-sql-list-databases
|
||||
source: cloud-sql-admin-source
|
||||
create_user:
|
||||
kind: cloud-sql-create-users
|
||||
source: cloud-sql-admin-source
|
||||
wait_for_operation:
|
||||
kind: cloud-sql-wait-for-operation
|
||||
source: cloud-sql-admin-source
|
||||
|
||||
toolsets:
|
||||
cloud_sql_postgres_admin_tools:
|
||||
- create_instance
|
||||
- get_instance
|
||||
- list_instances
|
||||
- create_database
|
||||
- list_databases
|
||||
- create_user
|
||||
- wait_for_operation
|
||||
@@ -0,0 +1,113 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
sources:
|
||||
cloud-monitoring-source:
|
||||
kind: cloud-monitoring
|
||||
tools:
|
||||
get_system_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches system level cloudmonitoring data (timeseries metrics) for a Postgres instance using a PromQL query. Take projectId and instanceId from the user for which the metrics timeseries data needs to be fetched.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate PromQL `query` for Postgres system metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id` from user intent.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
8. Percentile with groupby on database_id: `quantile by ("database_id")(0.99,avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. database_id is actually the instance id and the format is `project_id:instance_id`.
|
||||
1. `cloudsql.googleapis.com/database/postgresql/new_connection_count`: Count of new connections added to the postgres instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
2. `cloudsql.googleapis.com/database/postgresql/backends_in_wait`: Number of backends in wait in postgres instance. `cloudsql_database`. `backend_type`, `wait_event`, `wait_event_type`, `project_id`, `database_id`.
|
||||
3. `cloudsql.googleapis.com/database/postgresql/transaction_count`: Delta count of number of transactions. `cloudsql_database`. `database`, `transaction_type`, `project_id`, `database_id`.
|
||||
4. `cloudsql.googleapis.com/database/memory/components`: Memory stats components in percentage as usage, cache and free memory for the database. `cloudsql_database`. `component`, `project_id`, `database_id`.
|
||||
5. `cloudsql.googleapis.com/database/postgresql/external_sync/max_replica_byte_lag`: Replication lag in bytes for Postgres External Server (ES) replicas. Aggregated across all DBs on the replica. `cloudsql_database`. `project_id`, `database_id`.
|
||||
6. `cloudsql.googleapis.com/database/cpu/utilization`: Current CPU utilization represented as a percentage of the reserved CPU that is currently in use. Values are typically numbers between 0.0 and 1.0 (but might exceed 1.0). Charts display the values as a percentage between 0% and 100% (or more). `cloudsql_database`. `project_id`, `database_id`.
|
||||
7. `cloudsql.googleapis.com/database/disk/bytes_used_by_data_type`: Data utilization in bytes. `cloudsql_database`. `data_type`, `project_id`, `database_id`.
|
||||
8. `cloudsql.googleapis.com/database/disk/read_ops_count`: Delta count of data disk read IO operations. `cloudsql_database`. `project_id`, `database_id`.
|
||||
9. `cloudsql.googleapis.com/database/disk/write_ops_count`: Delta count of data disk write IO operations. `cloudsql_database`. `project_id`, `database_id`.
|
||||
10. `cloudsql.googleapis.com/database/postgresql/num_backends_by_state`: Number of connections to the Cloud SQL PostgreSQL instance, grouped by its state. `cloudsql_database`. `database`, `state`, `project_id`, `database_id`.
|
||||
11. `cloudsql.googleapis.com/database/postgresql/num_backends`: Number of connections to the Cloud SQL PostgreSQL instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
12. `cloudsql.googleapis.com/database/network/received_bytes_count`: Delta count of bytes received through the network. `cloudsql_database`. `project_id`, `database_id`.
|
||||
13. `cloudsql.googleapis.com/database/network/sent_bytes_count`: Delta count of bytes sent through the network. `cloudsql_database`. `destination`, `project_id`, `database_id`.
|
||||
14. `cloudsql.googleapis.com/database/postgresql/deadlock_count`: Number of deadlocks detected for this database. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
15. `cloudsql.googleapis.com/database/postgresql/blocks_read_count`: Number of disk blocks read by this database. The source field distingushes actual reads from disk versus reads from buffer cache. `cloudsql_database`. `database`, `source`, `project_id`, `database_id`.
|
||||
16. `cloudsql.googleapis.com/database/postgresql/tuples_processed_count`: Number of tuples(rows) processed for a given database for operations like insert, update or delete. `cloudsql_database`. `operation_type`, `database`, `project_id`, `database_id`.
|
||||
17. `cloudsql.googleapis.com/database/postgresql/tuple_size`: Number of tuples (rows) in the database. `cloudsql_database`. `database`, `tuple_state`, `project_id`, `database_id`.
|
||||
18. `cloudsql.googleapis.com/database/postgresql/vacuum/oldest_transaction_age`: Age of the oldest transaction yet to be vacuumed in the Cloud SQL PostgreSQL instance, measured in number of transactions that have happened since the oldest transaction. `cloudsql_database`. `oldest_transaction_type`, `project_id`, `database_id`.
|
||||
19. `cloudsql.googleapis.com/database/replication/log_archive_success_count`: Number of successful attempts for archiving replication log files. `cloudsql_database`. `project_id`, `database_id`.
|
||||
20. `cloudsql.googleapis.com/database/replication/log_archive_failure_count`: Number of failed attempts for archiving replication log files. `cloudsql_database`. `project_id`, `database_id`.
|
||||
21. `cloudsql.googleapis.com/database/postgresql/transaction_id_utilization`: Current utilization represented as a percentage of transaction IDs consumed by the Cloud SQL PostgreSQL instance. Values are typically numbers between 0.0 and 1.0. Charts display the values as a percentage between 0% and 100% . `cloudsql_database`. `project_id`, `database_id`.
|
||||
22. `cloudsql.googleapis.com/database/postgresql/num_backends_by_application`: Number of connections to the Cloud SQL PostgreSQL instance, grouped by applications. `cloudsql_database`. `application`, `project_id`, `database_id`.
|
||||
23. `cloudsql.googleapis.com/database/postgresql/tuples_fetched_count`: Total number of rows fetched as a result of queries per database in the PostgreSQL instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
24. `cloudsql.googleapis.com/database/postgresql/tuples_returned_count`: Total number of rows scanned while processing the queries per database in the PostgreSQL instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
25. `cloudsql.googleapis.com/database/postgresql/temp_bytes_written_count`: Total amount of data (in bytes) written to temporary files by the queries per database. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
26. `cloudsql.googleapis.com/database/postgresql/temp_files_written_count`: Total number of temporary files used for writing data while performing algorithms such as join and sort. `cloudsql_database`. `database`, `project_id`, `database_id`.
|
||||
|
||||
get_query_metrics:
|
||||
kind: cloud-monitoring-query-prometheus
|
||||
source: cloud-monitoring-source
|
||||
description: |
|
||||
Fetches query level cloudmonitoring data (timeseries metrics) for queries running in Postgres instance using a PromQL query. Take projectID and instanceID from the user for which the metrics timeseries data needs to be fetched.
|
||||
To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.
|
||||
|
||||
Generate PromQL `query` for Postgres query metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id`, `query_hash` from user intent. If query_hash is provided then use the per_query metrics. Query hash and query id are same.
|
||||
|
||||
Defaults:
|
||||
1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.
|
||||
|
||||
PromQL Query Examples:
|
||||
1. Basic Time Series: `avg_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m])`
|
||||
2. Top K: `topk(30, avg_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
3. Mean: `avg(avg_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
4. Minimum: `min(min_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
5. Maximum: `max(max_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
6. Sum: `sum(avg_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
7. Count streams: `count(avg_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
8. Percentile with groupby on resource_id, database: `quantile by ("resource_id","database")(0.99,avg_over_time({"__name__"="cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
|
||||
|
||||
Available Metrics List: metricname. description. monitored resource. labels. resource_id label format is `project_id:instance_id` which is actually instance id only. aggregate is the aggregated values for all query stats, Use aggregate metrics if query id is not provided. For perquery metrics do not fetch querystring unless specified by user specifically. Have the aggregation on query hash to avoid fetching the querystring. Do not use latency metrics for anything.
|
||||
1. `cloudsql.googleapis.com/database/postgresql/insights/aggregate/latencies`: Aggregated query latency distribution. `cloudsql_instance_database`. `user`, `client_addr`, `project_id`, `resource_id`.
|
||||
2. `cloudsql.googleapis.com/database/postgresql/insights/aggregate/execution_time`: Accumulated aggregated query execution time since the last sample. `cloudsql_instance_database`. `user`, `client_addr`, `project_id`, `resource_id`.
|
||||
3. `cloudsql.googleapis.com/database/postgresql/insights/aggregate/io_time`: Accumulated aggregated IO time since the last sample. `cloudsql_instance_database`. `user`, `client_addr`, `io_type`, `project_id`, `resource_id`.
|
||||
4. `cloudsql.googleapis.com/database/postgresql/insights/aggregate/lock_time`: Accumulated aggregated lock wait time since the last sample. `cloudsql_instance_database`. `user`, `client_addr`, `lock_type`, `project_id`, `resource_id`.
|
||||
5. `cloudsql.googleapis.com/database/postgresql/insights/aggregate/row_count`: Aggregated number of retrieved or affected rows since the last sample. `cloudsql_instance_database`. `user`, `client_addr`, `project_id`, `resource_id`.
|
||||
6. `cloudsql.googleapis.com/database/postgresql/insights/aggregate/shared_blk_access_count`: Aggregated shared blocks accessed by statement execution. `cloudsql_instance_database`. `user`, `client_addr`, `access_type`, `project_id`, `resource_id`.
|
||||
7. `cloudsql.googleapis.com/database/postgresql/insights/perquery/latencies`: Per query latency distribution. `cloudsql_instance_database`. `user`, `client_addr`, `querystring`, `query_hash`, `project_id`, `resource_id`.
|
||||
8. `cloudsql.googleapis.com/database/postgresql/insights/perquery/execution_time`: Accumulated execution times per user per database per query. `cloudsql_instance_database`. `user`, `client_addr`, `querystring`, `query_hash`, `project_id`, `resource_id`.
|
||||
9. `cloudsql.googleapis.com/database/postgresql/insights/perquery/io_time`: Accumulated IO time since the last sample per query. `cloudsql_instance_database`. `user`, `client_addr`, `io_type`, `querystring`, `query_hash`, `project_id`, `resource_id`.
|
||||
10. `cloudsql.googleapis.com/database/postgresql/insights/perquery/lock_time`: Accumulated lock wait time since the last sample per query. `cloudsql_instance_database`. `user`, `client_addr`, `lock_type`, `querystring`, `query_hash`, `project_id`, `resource_id`.
|
||||
11. `cloudsql.googleapis.com/database/postgresql/insights/perquery/row_count`: The number of retrieved or affected rows since the last sample per query. `cloudsql_instance_database`. `user`, `client_addr`, `querystring`, `query_hash`, `project_id`, `resource_id`.
|
||||
12. `cloudsql.googleapis.com/database/postgresql/insights/perquery/shared_blk_access_count`: Shared blocks accessed by statement execution per query. `cloudsql_instance_database`. `user`, `client_addr`, `access_type`, `querystring`, `query_hash`, `project_id`, `resource_id`.
|
||||
13. `cloudsql.googleapis.com/database/postgresql/insights/pertag/latencies`: Query latency distribution. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `project_id`, `resource_id`.
|
||||
14. `cloudsql.googleapis.com/database/postgresql/insights/pertag/execution_time`: Accumulated execution times since the last sample. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `project_id`, `resource_id`.
|
||||
15. `cloudsql.googleapis.com/database/postgresql/insights/pertag/io_time`: Accumulated IO time since the last sample per tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `io_type`, `tag_hash`, `project_id`, `resource_id`.
|
||||
16. `cloudsql.googleapis.com/database/postgresql/insights/pertag/lock_time`: Accumulated lock wait time since the last sample per tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `lock_type`, `tag_hash`, `project_id`, `resource_id`.
|
||||
17. `cloudsql.googleapis.com/database/postgresql/insights/pertag/shared_blk_access_count`: Shared blocks accessed by statement execution per tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `access_type`, `tag_hash`, `project_id`, `resource_id`.
|
||||
18. `cloudsql.googleapis.com/database/postgresql/insights/pertag/row_count`: The number of retrieved or affected rows since the last sample per tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `project_id`, `resource_id`.
|
||||
|
||||
toolsets:
|
||||
cloud-sql-postgres-cloud-monitoring-tools:
|
||||
- get_system_metrics
|
||||
- get_query_metrics
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
cloudsql-pg-source:
|
||||
kind: cloud-sql-postgres
|
||||
@@ -29,95 +30,11 @@ tools:
|
||||
description: Use this tool to execute sql.
|
||||
|
||||
list_tables:
|
||||
kind: postgres-sql
|
||||
kind: postgres-list-tables
|
||||
source: cloudsql-pg-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
cloud-sql-postgres-database-tools:
|
||||
cloud_sql_postgres_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
|
||||
@@ -1,24 +1,38 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
dataplex-source:
|
||||
kind: "dataplex"
|
||||
project: ${DATAPLEX_PROJECT}
|
||||
|
||||
tools:
|
||||
dataplex_search_entries:
|
||||
search_entries:
|
||||
kind: dataplex-search-entries
|
||||
source: dataplex-source
|
||||
description: Use this tool to search for entries in Dataplex Catalog based on the provided search query.
|
||||
dataplex_lookup_entry:
|
||||
lookup_entry:
|
||||
kind: dataplex-lookup-entry
|
||||
source: dataplex-source
|
||||
description: Use this tool to retrieve a specific entry from Dataplex Catalog.
|
||||
dataplex_search_aspect_types:
|
||||
search_aspect_types:
|
||||
kind: dataplex-search-aspect-types
|
||||
source: dataplex-source
|
||||
description: Use this tool to find aspect types relevant to the query.
|
||||
|
||||
toolsets:
|
||||
dataplex-tools:
|
||||
- dataplex_search_entries
|
||||
- dataplex_lookup_entry
|
||||
- dataplex_search_aspect_types
|
||||
dataplex_tools:
|
||||
- search_entries
|
||||
- lookup_entry
|
||||
- search_aspect_types
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
firestore-source:
|
||||
kind: firestore
|
||||
@@ -18,11 +19,11 @@ sources:
|
||||
database: ${FIRESTORE_DATABASE:}
|
||||
|
||||
tools:
|
||||
firestore-get-documents:
|
||||
get_documents:
|
||||
kind: firestore-get-documents
|
||||
source: firestore-source
|
||||
description: Gets multiple documents from Firestore by their paths
|
||||
firestore-add-documents:
|
||||
add_documents:
|
||||
kind: firestore-add-documents
|
||||
source: firestore-source
|
||||
description: |
|
||||
@@ -34,7 +35,7 @@ tools:
|
||||
5. Handle timestamps properly: Use RFC3339 format for timestamp strings
|
||||
6. Base64 encode binary data: Binary data must be base64 encoded in the bytesValue field
|
||||
7. Consider security rules: Ensure your Firestore security rules allow document creation in the target collection
|
||||
firestore-update-document:
|
||||
update_document:
|
||||
kind: firestore-update-document
|
||||
source: firestore-source
|
||||
description: |
|
||||
@@ -46,36 +47,36 @@ tools:
|
||||
5. Use returnData sparingly: Only set to true when you need to verify the exact data after the update
|
||||
6. Handle timestamps properly: Use RFC3339 format for timestamp strings
|
||||
7. Consider security rules: Ensure your Firestore security rules allow document updates
|
||||
firestore-list-collections:
|
||||
list_collections:
|
||||
kind: firestore-list-collections
|
||||
source: firestore-source
|
||||
description: List Firestore collections for a given parent path
|
||||
firestore-delete-documents:
|
||||
delete_documents:
|
||||
kind: firestore-delete-documents
|
||||
source: firestore-source
|
||||
description: Delete multiple documents from Firestore
|
||||
firestore-query-collection:
|
||||
query_collection:
|
||||
kind: firestore-query-collection
|
||||
source: firestore-source
|
||||
description: |
|
||||
Retrieves one or more Firestore documents from a collection in a database in the current project by a collection with a full document path.
|
||||
Use this if you know the exact path of a collection and the filtering clause you would like for the document.
|
||||
firestore-get-rules:
|
||||
get_rules:
|
||||
kind: firestore-get-rules
|
||||
source: firestore-source
|
||||
description: Retrieves the active Firestore security rules for the current project
|
||||
firestore-validate-rules:
|
||||
validate_rules:
|
||||
kind: firestore-validate-rules
|
||||
source: firestore-source
|
||||
description: Checks the provided Firestore Rules source for syntax and validation errors. Provide the source code to validate.
|
||||
|
||||
toolsets:
|
||||
firestore-database-tools:
|
||||
- firestore-get-documents
|
||||
- firestore-add-documents
|
||||
- firestore-update-document
|
||||
- firestore-list-collections
|
||||
- firestore-delete-documents
|
||||
- firestore-query-collection
|
||||
- firestore-get-rules
|
||||
- firestore-validate-rules
|
||||
firestore_database_tools:
|
||||
- get_documents
|
||||
- add_documents
|
||||
- update_document
|
||||
- list_collections
|
||||
- delete_documents
|
||||
- query_collection
|
||||
- get_rules
|
||||
- validate_rules
|
||||
|
||||
@@ -696,7 +696,7 @@ tools:
|
||||
and the resulting tiles will be added in order.
|
||||
|
||||
toolsets:
|
||||
looker-tools:
|
||||
looker_tools:
|
||||
- get_models
|
||||
- get_explores
|
||||
- get_dimensions
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
mssql-source:
|
||||
kind: mssql
|
||||
@@ -273,6 +287,6 @@ tools:
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
mssql-database-tools:
|
||||
mssql_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
- list_tables
|
||||
|
||||
@@ -32,170 +32,11 @@ tools:
|
||||
source: mysql-source
|
||||
description: Use this tool to execute SQL.
|
||||
list_tables:
|
||||
kind: mysql-sql
|
||||
kind: mysql-list-tables
|
||||
source: mysql-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
SELECT
|
||||
T.TABLE_SCHEMA AS schema_name,
|
||||
T.TABLE_NAME AS object_name,
|
||||
CASE
|
||||
WHEN @output_format = 'simple' THEN
|
||||
JSON_OBJECT('name', T.TABLE_NAME)
|
||||
ELSE
|
||||
CONVERT( JSON_OBJECT(
|
||||
'schema_name', T.TABLE_SCHEMA,
|
||||
'object_name', T.TABLE_NAME,
|
||||
'object_type', 'TABLE',
|
||||
'owner', (
|
||||
SELECT
|
||||
IFNULL(U.GRANTEE, 'N/A')
|
||||
FROM
|
||||
INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
|
||||
WHERE
|
||||
U.TABLE_SCHEMA = T.TABLE_SCHEMA
|
||||
LIMIT 1
|
||||
),
|
||||
'comment', IFNULL(T.TABLE_COMMENT, ''),
|
||||
'columns', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'column_name', C.COLUMN_NAME,
|
||||
'data_type', C.COLUMN_TYPE,
|
||||
'ordinal_position', C.ORDINAL_POSITION,
|
||||
'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
|
||||
'column_default', C.COLUMN_DEFAULT,
|
||||
'column_comment', IFNULL(C.COLUMN_COMMENT, '')
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.COLUMNS C
|
||||
WHERE
|
||||
C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
|
||||
ORDER BY C.ORDINAL_POSITION
|
||||
),
|
||||
'constraints', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'constraint_name', TC.CONSTRAINT_NAME,
|
||||
'constraint_type',
|
||||
CASE TC.CONSTRAINT_TYPE
|
||||
WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
|
||||
WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
|
||||
WHEN 'UNIQUE' THEN 'UNIQUE'
|
||||
ELSE TC.CONSTRAINT_TYPE
|
||||
END,
|
||||
'constraint_definition', '',
|
||||
'constraint_columns', (
|
||||
SELECT
|
||||
IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
|
||||
FROM
|
||||
INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
|
||||
WHERE
|
||||
KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND KCU.TABLE_NAME = TC.TABLE_NAME
|
||||
ORDER BY KCU.ORDINAL_POSITION
|
||||
),
|
||||
'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
|
||||
'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
|
||||
(SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
|
||||
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
|
||||
WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
|
||||
AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
|
||||
AND FKCU.TABLE_NAME = TC.TABLE_NAME
|
||||
AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
|
||||
ORDER BY FKCU.ORDINAL_POSITION),
|
||||
NULL
|
||||
)
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
|
||||
LEFT JOIN
|
||||
INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
|
||||
ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
|
||||
AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
|
||||
AND TC.TABLE_NAME = RC.TABLE_NAME
|
||||
WHERE
|
||||
TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
|
||||
),
|
||||
'indexes', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'index_name', IndexData.INDEX_NAME,
|
||||
'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
|
||||
'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
|
||||
'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM (
|
||||
SELECT
|
||||
S.TABLE_SCHEMA,
|
||||
S.TABLE_NAME,
|
||||
S.INDEX_NAME,
|
||||
MIN(S.NON_UNIQUE) AS NON_UNIQUE, -- Aggregate NON_UNIQUE here to get unique status for the index
|
||||
JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY -- Aggregate columns into an array for this index
|
||||
FROM
|
||||
INFORMATION_SCHEMA.STATISTICS S
|
||||
WHERE
|
||||
S.TABLE_SCHEMA = T.TABLE_SCHEMA AND S.TABLE_NAME = T.TABLE_NAME
|
||||
GROUP BY
|
||||
S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
|
||||
) AS IndexData
|
||||
ORDER BY IndexData.INDEX_NAME
|
||||
),
|
||||
'triggers', (
|
||||
SELECT
|
||||
IFNULL(
|
||||
JSON_ARRAYAGG(
|
||||
JSON_OBJECT(
|
||||
'trigger_name', TR.TRIGGER_NAME,
|
||||
'trigger_definition', TR.ACTION_STATEMENT
|
||||
)
|
||||
),
|
||||
JSON_ARRAY()
|
||||
)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TRIGGERS TR
|
||||
WHERE
|
||||
TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
|
||||
ORDER BY TR.TRIGGER_NAME
|
||||
)
|
||||
) USING utf8mb4)
|
||||
END AS object_details
|
||||
FROM
|
||||
INFORMATION_SCHEMA.TABLES T
|
||||
CROSS JOIN (SELECT @table_names := ?, @output_format := ?) AS variables
|
||||
WHERE
|
||||
T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
|
||||
AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
|
||||
AND T.TABLE_TYPE = 'BASE TABLE'
|
||||
ORDER BY
|
||||
T.TABLE_SCHEMA, T.TABLE_NAME;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
default: ""
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
mysql-database-tools:
|
||||
mysql_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
- list_tables
|
||||
|
||||
@@ -32,6 +32,6 @@ tools:
|
||||
description: Use this tool to get the database schema.
|
||||
|
||||
toolsets:
|
||||
neo4j-database-tools:
|
||||
neo4j_database_tools:
|
||||
- execute_cypher
|
||||
- get_schema
|
||||
- get_schema
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
oceanbase-source:
|
||||
kind: oceanbase
|
||||
@@ -164,6 +178,6 @@ tools:
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
toolsets:
|
||||
oceanbase-database-tools:
|
||||
oceanbase_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
- list_tables
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
postgresql-source:
|
||||
kind: postgres
|
||||
@@ -28,95 +29,11 @@ tools:
|
||||
description: Use this tool to execute SQL.
|
||||
|
||||
list_tables:
|
||||
kind: postgres-sql
|
||||
kind: postgres-list-tables
|
||||
source: postgresql-source
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes, triggers, owner, comment) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH desired_relkinds AS (
|
||||
SELECT ARRAY['r', 'p']::char[] AS kinds -- Always consider both 'TABLE' and 'PARTITIONED TABLE'
|
||||
),
|
||||
table_info AS (
|
||||
SELECT
|
||||
t.oid AS table_oid,
|
||||
ns.nspname AS schema_name,
|
||||
t.relname AS table_name,
|
||||
pg_get_userbyid(t.relowner) AS table_owner,
|
||||
obj_description(t.oid, 'pg_class') AS table_comment,
|
||||
t.relkind AS object_kind
|
||||
FROM
|
||||
pg_class t
|
||||
JOIN
|
||||
pg_namespace ns ON ns.oid = t.relnamespace
|
||||
CROSS JOIN desired_relkinds dk
|
||||
WHERE
|
||||
t.relkind = ANY(dk.kinds) -- Filter by selected table relkinds ('r', 'p')
|
||||
AND (NULLIF(TRIM($1), '') IS NULL OR t.relname = ANY(string_to_array($1,','))) -- $1 is object_names
|
||||
AND ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND ns.nspname NOT LIKE 'pg_temp_%' AND ns.nspname NOT LIKE 'pg_toast_temp_%'
|
||||
),
|
||||
columns_info AS (
|
||||
SELECT
|
||||
att.attrelid AS table_oid, att.attname AS column_name, format_type(att.atttypid, att.atttypmod) AS data_type,
|
||||
att.attnum AS column_ordinal_position, att.attnotnull AS is_not_nullable,
|
||||
pg_get_expr(ad.adbin, ad.adrelid) AS column_default, col_description(att.attrelid, att.attnum) AS column_comment
|
||||
FROM pg_attribute att LEFT JOIN pg_attrdef ad ON att.attrelid = ad.adrelid AND att.attnum = ad.adnum
|
||||
JOIN table_info ti ON att.attrelid = ti.table_oid WHERE att.attnum > 0 AND NOT att.attisdropped
|
||||
),
|
||||
constraints_info AS (
|
||||
SELECT
|
||||
con.conrelid AS table_oid, con.conname AS constraint_name, pg_get_constraintdef(con.oid) AS constraint_definition,
|
||||
CASE con.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'f' THEN 'FOREIGN KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' ELSE con.contype::text END AS constraint_type,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.conkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = u.attnum) AS constraint_columns,
|
||||
NULLIF(con.confrelid, 0)::regclass AS foreign_key_referenced_table,
|
||||
(SELECT array_agg(att.attname ORDER BY u.attposition) FROM unnest(con.confkey) WITH ORDINALITY AS u(attnum, attposition) JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = u.attnum WHERE con.contype = 'f') AS foreign_key_referenced_columns
|
||||
FROM pg_constraint con JOIN table_info ti ON con.conrelid = ti.table_oid
|
||||
),
|
||||
indexes_info AS (
|
||||
SELECT
|
||||
idx.indrelid AS table_oid, ic.relname AS index_name, pg_get_indexdef(idx.indexrelid) AS index_definition,
|
||||
idx.indisunique AS is_unique, idx.indisprimary AS is_primary, am.amname AS index_method,
|
||||
(SELECT array_agg(att.attname ORDER BY u.ord) FROM unnest(idx.indkey::int[]) WITH ORDINALITY AS u(colidx, ord) LEFT JOIN pg_attribute att ON att.attrelid = idx.indrelid AND att.attnum = u.colidx WHERE u.colidx <> 0) AS index_columns
|
||||
FROM pg_index idx JOIN pg_class ic ON ic.oid = idx.indexrelid JOIN pg_am am ON am.oid = ic.relam JOIN table_info ti ON idx.indrelid = ti.table_oid
|
||||
),
|
||||
triggers_info AS (
|
||||
SELECT tg.tgrelid AS table_oid, tg.tgname AS trigger_name, pg_get_triggerdef(tg.oid) AS trigger_definition, tg.tgenabled AS trigger_enabled_state
|
||||
FROM pg_trigger tg JOIN table_info ti ON tg.tgrelid = ti.table_oid WHERE NOT tg.tgisinternal
|
||||
)
|
||||
SELECT
|
||||
ti.schema_name,
|
||||
ti.table_name AS object_name,
|
||||
CASE
|
||||
WHEN $2 = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
json_build_object('name', ti.table_name)
|
||||
ELSE
|
||||
json_build_object(
|
||||
'schema_name', ti.schema_name,
|
||||
'object_name', ti.table_name,
|
||||
'object_type', CASE ti.object_kind
|
||||
WHEN 'r' THEN 'TABLE'
|
||||
WHEN 'p' THEN 'PARTITIONED TABLE'
|
||||
ELSE ti.object_kind::text -- Should not happen due to WHERE clause
|
||||
END,
|
||||
'owner', ti.table_owner,
|
||||
'comment', ti.table_comment,
|
||||
'columns', COALESCE((SELECT json_agg(json_build_object('column_name',ci.column_name,'data_type',ci.data_type,'ordinal_position',ci.column_ordinal_position,'is_not_nullable',ci.is_not_nullable,'column_default',ci.column_default,'column_comment',ci.column_comment) ORDER BY ci.column_ordinal_position) FROM columns_info ci WHERE ci.table_oid = ti.table_oid), '[]'::json),
|
||||
'constraints', COALESCE((SELECT json_agg(json_build_object('constraint_name',cons.constraint_name,'constraint_type',cons.constraint_type,'constraint_definition',cons.constraint_definition,'constraint_columns',cons.constraint_columns,'foreign_key_referenced_table',cons.foreign_key_referenced_table,'foreign_key_referenced_columns',cons.foreign_key_referenced_columns)) FROM constraints_info cons WHERE cons.table_oid = ti.table_oid), '[]'::json),
|
||||
'indexes', COALESCE((SELECT json_agg(json_build_object('index_name',ii.index_name,'index_definition',ii.index_definition,'is_unique',ii.is_unique,'is_primary',ii.is_primary,'index_method',ii.index_method,'index_columns',ii.index_columns)) FROM indexes_info ii WHERE ii.table_oid = ti.table_oid), '[]'::json),
|
||||
'triggers', COALESCE((SELECT json_agg(json_build_object('trigger_name',tri.trigger_name,'trigger_definition',tri.trigger_definition,'trigger_enabled_state',tri.trigger_enabled_state)) FROM triggers_info tri WHERE tri.table_oid = ti.table_oid), '[]'::json)
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info ti ORDER BY ti.schema_name, ti.table_name;
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
postgres-database-tools:
|
||||
postgres_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
spanner-source:
|
||||
kind: "spanner"
|
||||
@@ -216,7 +230,7 @@ tools:
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
spanner-postgres-database-tools:
|
||||
spanner_postgres_database_tools:
|
||||
- execute_sql
|
||||
- execute_sql_dql
|
||||
- list_tables
|
||||
|
||||
@@ -1,218 +1,41 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
spanner-source:
|
||||
kind: spanner
|
||||
project: ${SPANNER_PROJECT}
|
||||
instance: ${SPANNER_INSTANCE}
|
||||
database: ${SPANNER_DATABASE}
|
||||
dialect: ${SPANNER_DIALECT:googlesql}
|
||||
|
||||
tools:
|
||||
execute_sql:
|
||||
kind: spanner-execute-sql
|
||||
source: spanner-source
|
||||
description: Use this tool to execute DML SQL
|
||||
description: Use this tool to execute DML SQL. Please use the ${SPANNER_DIALECT:googlesql} interface for Spanner.
|
||||
|
||||
execute_sql_dql:
|
||||
kind: spanner-execute-sql
|
||||
source: spanner-source
|
||||
description: Use this tool to execute DQL SQL
|
||||
description: Use this tool to execute DQL SQL. Please use the ${SPANNER_DIALECT:googlesql} interface for Spanner.
|
||||
readOnly: true
|
||||
|
||||
list_tables:
|
||||
kind: spanner-sql
|
||||
kind: spanner-list-tables
|
||||
source: spanner-source
|
||||
readOnly: true
|
||||
description: "Lists detailed schema information (object type, columns, constraints, indexes) as JSON for user-created tables (ordinary or partitioned). Filters by a comma-separated list of names. If names are omitted, lists all tables in user schemas."
|
||||
statement: |
|
||||
WITH FilterTableNames AS (
|
||||
SELECT DISTINCT TRIM(name) AS TABLE_NAME
|
||||
FROM UNNEST(IF(@table_names = '' OR @table_names IS NULL, ['%'], SPLIT(@table_names, ','))) AS name
|
||||
),
|
||||
|
||||
-- 1. Table Information
|
||||
table_info_cte AS (
|
||||
SELECT
|
||||
T.TABLE_SCHEMA,
|
||||
T.TABLE_NAME,
|
||||
T.TABLE_TYPE,
|
||||
T.PARENT_TABLE_NAME, -- For interleaved tables
|
||||
T.ON_DELETE_ACTION -- For interleaved tables
|
||||
FROM INFORMATION_SCHEMA.TABLES AS T
|
||||
WHERE
|
||||
T.TABLE_SCHEMA = ''
|
||||
AND T.TABLE_TYPE = 'BASE TABLE'
|
||||
AND (EXISTS (SELECT 1 FROM FilterTableNames WHERE FilterTableNames.TABLE_NAME = '%') OR T.TABLE_NAME IN (SELECT TABLE_NAME FROM FilterTableNames))
|
||||
),
|
||||
|
||||
-- 2. Column Information (with JSON string for each column)
|
||||
columns_info_cte AS (
|
||||
SELECT
|
||||
C.TABLE_SCHEMA,
|
||||
C.TABLE_NAME,
|
||||
ARRAY_AGG(
|
||||
CONCAT(
|
||||
'{',
|
||||
'"column_name":"', IFNULL(C.COLUMN_NAME, ''), '",',
|
||||
'"data_type":"', IFNULL(C.SPANNER_TYPE, ''), '",',
|
||||
'"ordinal_position":', CAST(C.ORDINAL_POSITION AS STRING), ',',
|
||||
'"is_not_nullable":', IF(C.IS_NULLABLE = 'NO', 'true', 'false'), ',',
|
||||
'"column_default":', IF(C.COLUMN_DEFAULT IS NULL, 'null', CONCAT('"', C.COLUMN_DEFAULT, '"')),
|
||||
'}'
|
||||
) ORDER BY C.ORDINAL_POSITION
|
||||
) AS columns_json_array_elements
|
||||
FROM INFORMATION_SCHEMA.COLUMNS AS C
|
||||
WHERE EXISTS (SELECT 1 FROM table_info_cte TI WHERE C.TABLE_SCHEMA = TI.TABLE_SCHEMA AND C.TABLE_NAME = TI.TABLE_NAME)
|
||||
GROUP BY C.TABLE_SCHEMA, C.TABLE_NAME
|
||||
),
|
||||
|
||||
-- Helper CTE for aggregating constraint columns
|
||||
constraint_columns_agg_cte AS (
|
||||
SELECT
|
||||
CONSTRAINT_CATALOG,
|
||||
CONSTRAINT_SCHEMA,
|
||||
CONSTRAINT_NAME,
|
||||
ARRAY_AGG(CONCAT('"', COLUMN_NAME, '"') ORDER BY ORDINAL_POSITION) AS column_names_json_list
|
||||
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
|
||||
GROUP BY CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME
|
||||
),
|
||||
|
||||
-- 3. Constraint Information (with JSON string for each constraint)
|
||||
constraints_info_cte AS (
|
||||
SELECT
|
||||
TC.TABLE_SCHEMA,
|
||||
TC.TABLE_NAME,
|
||||
ARRAY_AGG(
|
||||
CONCAT(
|
||||
'{',
|
||||
'"constraint_name":"', IFNULL(TC.CONSTRAINT_NAME, ''), '",',
|
||||
'"constraint_type":"', IFNULL(TC.CONSTRAINT_TYPE, ''), '",',
|
||||
'"constraint_definition":',
|
||||
CASE TC.CONSTRAINT_TYPE
|
||||
WHEN 'CHECK' THEN IF(CC.CHECK_CLAUSE IS NULL, 'null', CONCAT('"', CC.CHECK_CLAUSE, '"'))
|
||||
WHEN 'PRIMARY KEY' THEN CONCAT('"', 'PRIMARY KEY (', ARRAY_TO_STRING(COALESCE(KeyCols.column_names_json_list, []), ', '), ')', '"')
|
||||
WHEN 'UNIQUE' THEN CONCAT('"', 'UNIQUE (', ARRAY_TO_STRING(COALESCE(KeyCols.column_names_json_list, []), ', '), ')', '"')
|
||||
WHEN 'FOREIGN KEY' THEN CONCAT('"', 'FOREIGN KEY (', ARRAY_TO_STRING(COALESCE(KeyCols.column_names_json_list, []), ', '), ') REFERENCES ',
|
||||
IFNULL(RefKeyTable.TABLE_NAME, ''),
|
||||
' (', ARRAY_TO_STRING(COALESCE(RefKeyCols.column_names_json_list, []), ', '), ')', '"')
|
||||
ELSE 'null'
|
||||
END, ',',
|
||||
'"constraint_columns":[', ARRAY_TO_STRING(COALESCE(KeyCols.column_names_json_list, []), ','), '],',
|
||||
'"foreign_key_referenced_table":', IF(RefKeyTable.TABLE_NAME IS NULL, 'null', CONCAT('"', RefKeyTable.TABLE_NAME, '"')), ',',
|
||||
'"foreign_key_referenced_columns":[', ARRAY_TO_STRING(COALESCE(RefKeyCols.column_names_json_list, []), ','), ']',
|
||||
'}'
|
||||
) ORDER BY TC.CONSTRAINT_NAME
|
||||
) AS constraints_json_array_elements
|
||||
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS TC
|
||||
LEFT JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS AS CC
|
||||
ON TC.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG AND TC.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA AND TC.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
|
||||
LEFT JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC
|
||||
ON TC.CONSTRAINT_CATALOG = RC.CONSTRAINT_CATALOG AND TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
|
||||
LEFT JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS RefConstraint
|
||||
ON RC.UNIQUE_CONSTRAINT_CATALOG = RefConstraint.CONSTRAINT_CATALOG AND RC.UNIQUE_CONSTRAINT_SCHEMA = RefConstraint.CONSTRAINT_SCHEMA AND RC.UNIQUE_CONSTRAINT_NAME = RefConstraint.CONSTRAINT_NAME
|
||||
LEFT JOIN INFORMATION_SCHEMA.TABLES AS RefKeyTable
|
||||
ON RefConstraint.TABLE_CATALOG = RefKeyTable.TABLE_CATALOG AND RefConstraint.TABLE_SCHEMA = RefKeyTable.TABLE_SCHEMA AND RefConstraint.TABLE_NAME = RefKeyTable.TABLE_NAME
|
||||
LEFT JOIN constraint_columns_agg_cte AS KeyCols
|
||||
ON TC.CONSTRAINT_CATALOG = KeyCols.CONSTRAINT_CATALOG AND TC.CONSTRAINT_SCHEMA = KeyCols.CONSTRAINT_SCHEMA AND TC.CONSTRAINT_NAME = KeyCols.CONSTRAINT_NAME
|
||||
LEFT JOIN constraint_columns_agg_cte AS RefKeyCols
|
||||
ON RC.UNIQUE_CONSTRAINT_CATALOG = RefKeyCols.CONSTRAINT_CATALOG AND RC.UNIQUE_CONSTRAINT_SCHEMA = RefKeyCols.CONSTRAINT_SCHEMA AND RC.UNIQUE_CONSTRAINT_NAME = RefKeyCols.CONSTRAINT_NAME AND TC.CONSTRAINT_TYPE = 'FOREIGN KEY'
|
||||
WHERE EXISTS (SELECT 1 FROM table_info_cte TI WHERE TC.TABLE_SCHEMA = TI.TABLE_SCHEMA AND TC.TABLE_NAME = TI.TABLE_NAME)
|
||||
GROUP BY TC.TABLE_SCHEMA, TC.TABLE_NAME
|
||||
),
|
||||
|
||||
-- Helper CTE for aggregating index key columns (as JSON strings)
|
||||
index_key_columns_agg_cte AS (
|
||||
SELECT
|
||||
TABLE_CATALOG,
|
||||
TABLE_SCHEMA,
|
||||
TABLE_NAME,
|
||||
INDEX_NAME,
|
||||
ARRAY_AGG(
|
||||
CONCAT(
|
||||
'{"column_name":"', IFNULL(COLUMN_NAME, ''), '",',
|
||||
'"ordering":"', IFNULL(COLUMN_ORDERING, ''), '"}'
|
||||
) ORDER BY ORDINAL_POSITION
|
||||
) AS key_column_json_details
|
||||
FROM INFORMATION_SCHEMA.INDEX_COLUMNS
|
||||
WHERE ORDINAL_POSITION IS NOT NULL -- Key columns
|
||||
GROUP BY TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
|
||||
),
|
||||
|
||||
-- Helper CTE for aggregating index storing columns (as JSON strings)
|
||||
index_storing_columns_agg_cte AS (
|
||||
SELECT
|
||||
TABLE_CATALOG,
|
||||
TABLE_SCHEMA,
|
||||
TABLE_NAME,
|
||||
INDEX_NAME,
|
||||
ARRAY_AGG(CONCAT('"', COLUMN_NAME, '"') ORDER BY COLUMN_NAME) AS storing_column_json_names
|
||||
FROM INFORMATION_SCHEMA.INDEX_COLUMNS
|
||||
WHERE ORDINAL_POSITION IS NULL -- Storing columns
|
||||
GROUP BY TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
|
||||
),
|
||||
|
||||
-- 4. Index Information (with JSON string for each index)
|
||||
indexes_info_cte AS (
|
||||
SELECT
|
||||
I.TABLE_SCHEMA,
|
||||
I.TABLE_NAME,
|
||||
ARRAY_AGG(
|
||||
CONCAT(
|
||||
'{',
|
||||
'"index_name":"', IFNULL(I.INDEX_NAME, ''), '",',
|
||||
'"index_type":"', IFNULL(I.INDEX_TYPE, ''), '",',
|
||||
'"is_unique":', IF(I.IS_UNIQUE, 'true', 'false'), ',',
|
||||
'"is_null_filtered":', IF(I.IS_NULL_FILTERED, 'true', 'false'), ',',
|
||||
'"interleaved_in_table":', IF(I.PARENT_TABLE_NAME IS NULL, 'null', CONCAT('"', I.PARENT_TABLE_NAME, '"')), ',',
|
||||
'"index_key_columns":[', ARRAY_TO_STRING(COALESCE(KeyIndexCols.key_column_json_details, []), ','), '],',
|
||||
'"storing_columns":[', ARRAY_TO_STRING(COALESCE(StoringIndexCols.storing_column_json_names, []), ','), ']',
|
||||
'}'
|
||||
) ORDER BY I.INDEX_NAME
|
||||
) AS indexes_json_array_elements
|
||||
FROM INFORMATION_SCHEMA.INDEXES AS I
|
||||
LEFT JOIN index_key_columns_agg_cte AS KeyIndexCols
|
||||
ON I.TABLE_CATALOG = KeyIndexCols.TABLE_CATALOG AND I.TABLE_SCHEMA = KeyIndexCols.TABLE_SCHEMA AND I.TABLE_NAME = KeyIndexCols.TABLE_NAME AND I.INDEX_NAME = KeyIndexCols.INDEX_NAME
|
||||
LEFT JOIN index_storing_columns_agg_cte AS StoringIndexCols
|
||||
ON I.TABLE_CATALOG = StoringIndexCols.TABLE_CATALOG AND I.TABLE_SCHEMA = StoringIndexCols.TABLE_SCHEMA AND I.TABLE_NAME = StoringIndexCols.TABLE_NAME AND I.INDEX_NAME = StoringIndexCols.INDEX_NAME AND I.INDEX_TYPE = 'INDEX'
|
||||
WHERE EXISTS (SELECT 1 FROM table_info_cte TI WHERE I.TABLE_SCHEMA = TI.TABLE_SCHEMA AND I.TABLE_NAME = TI.TABLE_NAME)
|
||||
GROUP BY I.TABLE_SCHEMA, I.TABLE_NAME
|
||||
)
|
||||
|
||||
-- Final SELECT to build the JSON output
|
||||
SELECT
|
||||
TI.TABLE_SCHEMA AS schema_name,
|
||||
TI.TABLE_NAME AS object_name,
|
||||
CASE
|
||||
WHEN @output_format = 'simple' THEN
|
||||
-- IF format is 'simple', return basic JSON
|
||||
CONCAT('{"name":"', IFNULL(REPLACE(TI.TABLE_NAME, '"', '\"'), ''), '"}')
|
||||
ELSE
|
||||
CONCAT(
|
||||
'{',
|
||||
'"schema_name":"', IFNULL(TI.TABLE_SCHEMA, ''), '",',
|
||||
'"object_name":"', IFNULL(TI.TABLE_NAME, ''), '",',
|
||||
'"object_type":"', IFNULL(TI.TABLE_TYPE, ''), '",',
|
||||
'"columns":[', ARRAY_TO_STRING(COALESCE(CI.columns_json_array_elements, []), ','), '],',
|
||||
'"constraints":[', ARRAY_TO_STRING(COALESCE(CONSI.constraints_json_array_elements, []), ','), '],',
|
||||
'"indexes":[', ARRAY_TO_STRING(COALESCE(II.indexes_json_array_elements, []), ','), '],',
|
||||
'}'
|
||||
)
|
||||
END AS object_details
|
||||
FROM table_info_cte AS TI
|
||||
LEFT JOIN columns_info_cte AS CI
|
||||
ON TI.TABLE_SCHEMA = CI.TABLE_SCHEMA AND TI.TABLE_NAME = CI.TABLE_NAME
|
||||
LEFT JOIN constraints_info_cte AS CONSI
|
||||
ON TI.TABLE_SCHEMA = CONSI.TABLE_SCHEMA AND TI.TABLE_NAME = CONSI.TABLE_NAME
|
||||
LEFT JOIN indexes_info_cte AS II
|
||||
ON TI.TABLE_SCHEMA = II.TABLE_SCHEMA AND TI.TABLE_NAME = II.TABLE_NAME
|
||||
ORDER BY TI.TABLE_SCHEMA, TI.TABLE_NAME;
|
||||
|
||||
parameters:
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
|
||||
toolsets:
|
||||
spanner-database-tools:
|
||||
|
||||
112
internal/prebuiltconfigs/tools/sqlite.yaml
Normal file
112
internal/prebuiltconfigs/tools/sqlite.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
# Copyright 2025 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
sources:
|
||||
sqlite-source:
|
||||
kind: sqlite
|
||||
database: ${SQLITE_DATABASE}
|
||||
tools:
|
||||
execute_sql:
|
||||
kind: sqlite-execute-sql
|
||||
source: sqlite-source
|
||||
description: Use this tool to execute SQL.
|
||||
list_tables:
|
||||
kind: sqlite-sql
|
||||
source: sqlite-source
|
||||
description: "Lists SQLite tables. Use 'output_format' ('simple'/'detailed') and 'table_names' (comma-separated or empty) to control output."
|
||||
statement: |
|
||||
WITH table_columns AS (
|
||||
SELECT
|
||||
m.name AS table_name,
|
||||
json_group_array(json_object('column_name', ti.name, 'data_type', ti.type, 'ordinal_position', ti.cid, 'is_not_nullable', ti."notnull" = 1, 'column_default', ti.dflt_value, 'is_primary_key', ti.pk > 0)) AS details
|
||||
FROM sqlite_master AS m, pragma_table_info(m.name) AS ti
|
||||
WHERE m.type = 'table' AND m.name NOT LIKE 'sqlite_%'
|
||||
GROUP BY m.name
|
||||
),
|
||||
table_constraints AS (
|
||||
SELECT
|
||||
table_name,
|
||||
json_group_array(json(details)) AS details
|
||||
FROM (
|
||||
SELECT m.name AS table_name, json_object('constraint_name', 'PRIMARY', 'constraint_type', 'PRIMARY KEY', 'constraint_columns', json_group_array(T.name)) AS details
|
||||
FROM sqlite_master AS m, pragma_table_info(m.name) AS T
|
||||
WHERE m.type = 'table' AND T.pk > 0
|
||||
GROUP BY m.name
|
||||
HAVING COUNT(T.name) > 0
|
||||
UNION ALL
|
||||
SELECT m.name, json_object('constraint_name', 'fk_' || m.name || '_' || F.id, 'constraint_type', 'FOREIGN KEY', 'constraint_columns', json_group_array(F."from"), 'foreign_key_referenced_table', F."table", 'foreign_key_referenced_columns', json_group_array(F."to"))
|
||||
FROM sqlite_master AS m, pragma_foreign_key_list(m.name) AS F
|
||||
WHERE m.type = 'table'
|
||||
GROUP BY m.name, F.id
|
||||
UNION ALL
|
||||
SELECT m.name, json_object('constraint_name', I.name, 'constraint_type', 'UNIQUE', 'constraint_columns', (SELECT json_group_array(C.name) FROM pragma_index_info(I.name) AS C ORDER BY C.seqno))
|
||||
FROM sqlite_master AS m, pragma_index_list(m.name) AS I
|
||||
WHERE m.type = 'table' AND I."unique" = 1 AND I.origin != 'pk'
|
||||
)
|
||||
GROUP BY table_name
|
||||
),
|
||||
table_indexes AS (
|
||||
SELECT
|
||||
m.name AS table_name,
|
||||
json_group_array(json_object('index_name', il.name, 'is_unique', il."unique" = 1, 'is_primary', il.origin = 'pk', 'index_columns', (SELECT json_group_array(ii.name) FROM pragma_index_info(il.name) AS ii))) AS details
|
||||
FROM sqlite_master AS m, pragma_index_list(m.name) AS il
|
||||
WHERE m.type = 'table' AND m.name NOT LIKE 'sqlite_%'
|
||||
GROUP BY m.name
|
||||
),
|
||||
table_triggers AS (
|
||||
SELECT
|
||||
tbl_name AS table_name,
|
||||
json_group_array(json_object('trigger_name', name, 'trigger_definition', sql)) AS details
|
||||
FROM sqlite_master
|
||||
WHERE type = 'trigger'
|
||||
GROUP BY tbl_name
|
||||
)
|
||||
SELECT
|
||||
CASE
|
||||
WHEN '{{.output_format}}' = 'simple' THEN json_object('name', m.name)
|
||||
ELSE json_object(
|
||||
'schema_name', 'main',
|
||||
'object_name', m.name,
|
||||
'object_type', m.type,
|
||||
'columns', json(COALESCE(tc.details, '[]')),
|
||||
'constraints', json(COALESCE(tcons.details, '[]')),
|
||||
'indexes', json(COALESCE(ti.details, '[]')),
|
||||
'triggers', json(COALESCE(tt.details, '[]'))
|
||||
)
|
||||
END AS object_details
|
||||
FROM
|
||||
sqlite_master AS m
|
||||
LEFT JOIN table_columns tc ON m.name = tc.table_name
|
||||
LEFT JOIN table_constraints tcons ON m.name = tcons.table_name
|
||||
LEFT JOIN table_indexes ti ON m.name = ti.table_name
|
||||
LEFT JOIN table_triggers tt ON m.name = tt.table_name
|
||||
WHERE
|
||||
m.type = 'table'
|
||||
AND m.name NOT LIKE 'sqlite_%'
|
||||
{{if .table_names}}
|
||||
AND instr(',' || '{{.table_names}}' || ',', ',' || m.name || ',') > 0
|
||||
{{end}};
|
||||
templateParameters:
|
||||
- name: output_format
|
||||
type: string
|
||||
description: "Optional: Use 'simple' to return table names only or use 'detailed' to return the full information schema."
|
||||
default: "detailed"
|
||||
- name: table_names
|
||||
type: string
|
||||
description: "Optional: A comma-separated list of table names. If empty, details for all tables in user-accessible schemas will be listed."
|
||||
default: ""
|
||||
toolsets:
|
||||
sqlite_database_tools:
|
||||
- execute_sql
|
||||
- list_tables
|
||||
@@ -355,14 +355,14 @@ func httpHandler(s *Server, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// check if client have `Mcp-Session-Id` header
|
||||
// if `Mcp-Session-Id` header is set, we are using v2025-03-26 since
|
||||
// previous version doesn't use this header.
|
||||
// `Mcp-Session-Id` is only set for v2025-03-26 in Toolbox
|
||||
headerSessionId := r.Header.Get("Mcp-Session-Id")
|
||||
if headerSessionId != "" {
|
||||
protocolVersion = v20250326.PROTOCOL_VERSION
|
||||
}
|
||||
|
||||
// check if client have `MCP-Protocol-Version` header
|
||||
// Only supported for v2025-06-18+.
|
||||
headerProtocolVersion := r.Header.Get("MCP-Protocol-Version")
|
||||
if headerProtocolVersion != "" {
|
||||
if !mcp.VerifyProtocolVersion(headerProtocolVersion) {
|
||||
|
||||
153
internal/sources/alloydbadmin/alloydbadmin.go
Normal file
153
internal/sources/alloydbadmin/alloydbadmin.go
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package alloydbadmin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
alloydbrestapi "google.golang.org/api/alloydb/v1"
|
||||
)
|
||||
|
||||
const SourceKind string = "alloydb-admin"
|
||||
|
||||
type userAgentRoundTripper struct {
|
||||
userAgent string
|
||||
next http.RoundTripper
|
||||
}
|
||||
|
||||
func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
newReq := *req
|
||||
newReq.Header = make(http.Header)
|
||||
for k, v := range req.Header {
|
||||
newReq.Header[k] = v
|
||||
}
|
||||
ua := newReq.Header.Get("User-Agent")
|
||||
if ua == "" {
|
||||
newReq.Header.Set("User-Agent", rt.userAgent)
|
||||
} else {
|
||||
newReq.Header.Set("User-Agent", rt.userAgent+" "+ua)
|
||||
}
|
||||
return rt.next.RoundTrip(&newReq)
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
ua, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("Error in User Agent retrieval: %s", err)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if r.UseClientOAuth {
|
||||
client = &http.Client{
|
||||
Transport: &userAgentRoundTripper{
|
||||
userAgent: ua,
|
||||
next: http.DefaultTransport,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Use Application Default Credentials
|
||||
creds, err := google.FindDefaultCredentials(ctx, alloydbrestapi.CloudPlatformScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find default credentials: %w", err)
|
||||
}
|
||||
baseClient := oauth2.NewClient(ctx, creds.TokenSource)
|
||||
baseClient.Transport = &userAgentRoundTripper{
|
||||
userAgent: ua,
|
||||
next: baseClient.Transport,
|
||||
}
|
||||
client = baseClient
|
||||
}
|
||||
|
||||
service, err := alloydbrestapi.NewService(ctx, option.WithHTTPClient(client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new alloydb service: %w", err)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
BaseURL: "https://alloydb.googleapis.com",
|
||||
Service: service,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
BaseURL string
|
||||
Service *alloydbrestapi.Service
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) GetService(ctx context.Context, accessToken string) (*alloydbrestapi.Service, error) {
|
||||
if s.UseClientOAuth {
|
||||
token := &oauth2.Token{AccessToken: accessToken}
|
||||
client := oauth2.NewClient(ctx, oauth2.StaticTokenSource(token))
|
||||
service, err := alloydbrestapi.NewService(ctx, option.WithHTTPClient(client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new alloydb service: %w", err)
|
||||
}
|
||||
return service, nil
|
||||
}
|
||||
return s.Service, nil
|
||||
}
|
||||
|
||||
func (s *Source) UseClientAuthorization() bool {
|
||||
return s.UseClientOAuth
|
||||
}
|
||||
125
internal/sources/alloydbadmin/alloydbadmin_test.go
Normal file
125
internal/sources/alloydbadmin/alloydbadmin_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbadmin_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
func TestParseFromYamlAlloyDBAdmin(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
kind: alloydb-admin
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-alloydb-admin-instance": alloydbadmin.Config{
|
||||
Name: "my-alloydb-admin-instance",
|
||||
Kind: alloydbadmin.SourceKind,
|
||||
UseClientOAuth: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "use client auth example",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
kind: alloydb-admin
|
||||
useClientOAuth: true
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-alloydb-admin-instance": alloydbadmin.Config{
|
||||
Name: "my-alloydb-admin-instance",
|
||||
Kind: alloydbadmin.SourceKind,
|
||||
UseClientOAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYaml(t *testing.T) {
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
kind: alloydb-admin
|
||||
project: test-project
|
||||
`,
|
||||
err: "unable to parse source \"my-alloydb-admin-instance\" as \"alloydb-admin\": [2:1] unknown field \"project\"\n 1 | kind: alloydb-admin\n> 2 | project: test-project\n ^\n",
|
||||
},
|
||||
{
|
||||
desc: "missing required field",
|
||||
in: `
|
||||
sources:
|
||||
my-alloydb-admin-instance:
|
||||
useClientOAuth: true
|
||||
`,
|
||||
err: "missing 'kind' field for source \"my-alloydb-admin-instance\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if errStr != tc.err {
|
||||
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,8 @@ package bigquery
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
bigqueryapi "cloud.google.com/go/bigquery"
|
||||
"github.com/goccy/go-yaml"
|
||||
@@ -26,6 +28,7 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
bigqueryrestapi "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
@@ -52,11 +55,12 @@ func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources
|
||||
|
||||
type Config struct {
|
||||
// BigQuery configs
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Project string `yaml:"project" validate:"required"`
|
||||
Location string `yaml:"location"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Project string `yaml:"project" validate:"required"`
|
||||
Location string `yaml:"location"`
|
||||
AllowedDatasets []string `yaml:"allowedDatasets"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
@@ -84,6 +88,37 @@ func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.So
|
||||
}
|
||||
}
|
||||
|
||||
allowedDatasets := make(map[string]struct{})
|
||||
// Get full id of allowed datasets and verify they exist.
|
||||
if len(r.AllowedDatasets) > 0 {
|
||||
for _, allowed := range r.AllowedDatasets {
|
||||
var projectID, datasetID, allowedFullID string
|
||||
if strings.Contains(allowed, ".") {
|
||||
parts := strings.Split(allowed, ".")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid allowedDataset format: %q, expected 'project.dataset' or 'dataset'", allowed)
|
||||
}
|
||||
projectID = parts[0]
|
||||
datasetID = parts[1]
|
||||
allowedFullID = allowed
|
||||
} else {
|
||||
projectID = client.Project()
|
||||
datasetID = allowed
|
||||
allowedFullID = fmt.Sprintf("%s.%s", projectID, datasetID)
|
||||
}
|
||||
|
||||
dataset := client.DatasetInProject(projectID, datasetID)
|
||||
_, err := dataset.Metadata(ctx)
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusNotFound {
|
||||
return nil, fmt.Errorf("allowedDataset '%s' not found in project '%s'", datasetID, projectID)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to verify allowedDataset '%s' in project '%s': %w", datasetID, projectID, err)
|
||||
}
|
||||
allowedDatasets[allowedFullID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
@@ -94,6 +129,7 @@ func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.So
|
||||
TokenSource: tokenSource,
|
||||
MaxQueryResultRows: 50,
|
||||
ClientCreator: clientCreator,
|
||||
AllowedDatasets: allowedDatasets,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
return s, nil
|
||||
@@ -113,6 +149,7 @@ type Source struct {
|
||||
TokenSource oauth2.TokenSource
|
||||
MaxQueryResultRows int
|
||||
ClientCreator BigqueryClientCreator
|
||||
AllowedDatasets map[string]struct{}
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
@@ -153,6 +190,29 @@ func (s *Source) BigQueryClientCreator() BigqueryClientCreator {
|
||||
return s.ClientCreator
|
||||
}
|
||||
|
||||
func (s *Source) BigQueryAllowedDatasets() []string {
|
||||
if len(s.AllowedDatasets) == 0 {
|
||||
return nil
|
||||
}
|
||||
datasets := make([]string, 0, len(s.AllowedDatasets))
|
||||
for d := range s.AllowedDatasets {
|
||||
datasets = append(datasets, d)
|
||||
}
|
||||
return datasets
|
||||
}
|
||||
|
||||
// IsDatasetAllowed checks if a given dataset is accessible based on the source's configuration.
|
||||
func (s *Source) IsDatasetAllowed(projectID, datasetID string) bool {
|
||||
// If the normalized map is empty, it means no restrictions were configured.
|
||||
if len(s.AllowedDatasets) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
targetDataset := fmt.Sprintf("%s.%s", projectID, datasetID)
|
||||
_, ok := s.AllowedDatasets[targetDataset]
|
||||
return ok
|
||||
}
|
||||
|
||||
func initBigQueryConnection(
|
||||
ctx context.Context,
|
||||
tracer trace.Tracer,
|
||||
|
||||
@@ -69,6 +69,27 @@ func TestParseFromYamlBigQuery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with allowed datasets example",
|
||||
in: `
|
||||
sources:
|
||||
my-instance:
|
||||
kind: bigquery
|
||||
project: my-project
|
||||
location: us
|
||||
allowedDatasets:
|
||||
- my_dataset
|
||||
`,
|
||||
want: server.SourceConfigs{
|
||||
"my-instance": bigquery.Config{
|
||||
Name: "my-instance",
|
||||
Kind: bigquery.SourceKind,
|
||||
Project: "my-project",
|
||||
Location: "us",
|
||||
AllowedDatasets: []string{"my_dataset"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
|
||||
117
internal/sources/cloudmonitoring/cloud_monitoring.go
Normal file
117
internal/sources/cloudmonitoring/cloud_monitoring.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package cloudmonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
monitoring "google.golang.org/api/monitoring/v3"
|
||||
)
|
||||
|
||||
const SourceKind string = "cloud-monitoring"
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
// Initialize initializes a Cloud Monitoring Source instance.
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
ua, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in User Agent retrieval: %s", err)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if r.UseClientOAuth {
|
||||
client = nil
|
||||
} else {
|
||||
// Use Application Default Credentials
|
||||
creds, err := google.FindDefaultCredentials(ctx, monitoring.MonitoringScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find default credentials: %w", err)
|
||||
}
|
||||
client = oauth2.NewClient(ctx, creds.TokenSource)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
BaseURL: "https://monitoring.googleapis.com",
|
||||
Client: client,
|
||||
UserAgent: ua,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
BaseURL string `yaml:"baseUrl"`
|
||||
Client *http.Client
|
||||
UserAgent string
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) GetClient(ctx context.Context, accessToken string) (*http.Client, error) {
|
||||
if s.UseClientOAuth {
|
||||
if accessToken == "" {
|
||||
return nil, fmt.Errorf("client-side OAuth is enabled but no access token was provided")
|
||||
}
|
||||
token := &oauth2.Token{AccessToken: accessToken}
|
||||
return oauth2.NewClient(ctx, oauth2.StaticTokenSource(token)), nil
|
||||
}
|
||||
return s.Client, nil
|
||||
}
|
||||
|
||||
func (s *Source) UseClientAuthorization() bool {
|
||||
return s.UseClientOAuth
|
||||
}
|
||||
135
internal/sources/cloudmonitoring/cloud_monitoring_test.go
Normal file
135
internal/sources/cloudmonitoring/cloud_monitoring_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudmonitoring_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudmonitoring"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
func TestParseFromYamlCloudMonitoring(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
kind: cloud-monitoring
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-monitoring-instance": cloudmonitoring.Config{
|
||||
Name: "my-cloud-monitoring-instance",
|
||||
Kind: cloudmonitoring.SourceKind,
|
||||
UseClientOAuth: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "use client auth example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
kind: cloud-monitoring
|
||||
useClientOAuth: true
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-monitoring-instance": cloudmonitoring.Config{
|
||||
Name: "my-cloud-monitoring-instance",
|
||||
Kind: cloudmonitoring.SourceKind,
|
||||
UseClientOAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYaml(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
kind: cloud-monitoring
|
||||
project: test-project
|
||||
`,
|
||||
err: `unable to parse source "my-cloud-monitoring-instance" as "cloud-monitoring": [2:1] unknown field "project"
|
||||
1 | kind: cloud-monitoring
|
||||
> 2 | project: test-project
|
||||
^
|
||||
`,
|
||||
},
|
||||
{
|
||||
desc: "missing required field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-monitoring-instance:
|
||||
useClientOAuth: true
|
||||
`,
|
||||
err: "missing 'kind' field for source \"my-cloud-monitoring-instance\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if errStr != tc.err {
|
||||
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
153
internal/sources/cloudsqladmin/cloud_sql_admin.go
Normal file
153
internal/sources/cloudsqladmin/cloud_sql_admin.go
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package cloudsqladmin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/util"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
sqladmin "google.golang.org/api/sqladmin/v1"
|
||||
)
|
||||
|
||||
const SourceKind string = "cloud-sql-admin"
|
||||
|
||||
type userAgentRoundTripper struct {
|
||||
userAgent string
|
||||
next http.RoundTripper
|
||||
}
|
||||
|
||||
func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
newReq := *req
|
||||
newReq.Header = make(http.Header)
|
||||
for k, v := range req.Header {
|
||||
newReq.Header[k] = v
|
||||
}
|
||||
ua := newReq.Header.Get("User-Agent")
|
||||
if ua == "" {
|
||||
newReq.Header.Set("User-Agent", rt.userAgent)
|
||||
} else {
|
||||
newReq.Header.Set("User-Agent", rt.userAgent+" "+ua)
|
||||
}
|
||||
return rt.next.RoundTrip(&newReq)
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ sources.SourceConfig = Config{}
|
||||
|
||||
func init() {
|
||||
if !sources.Register(SourceKind, newConfig) {
|
||||
panic(fmt.Sprintf("source kind %q already registered", SourceKind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (sources.SourceConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
}
|
||||
|
||||
func (r Config) SourceConfigKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
// Initialize initializes a CloudSQL Admin Source instance.
|
||||
func (r Config) Initialize(ctx context.Context, tracer trace.Tracer) (sources.Source, error) {
|
||||
ua, err := util.UserAgentFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in User Agent retrieval: %s", err)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if r.UseClientOAuth {
|
||||
client = &http.Client{
|
||||
Transport: &userAgentRoundTripper{
|
||||
userAgent: ua,
|
||||
next: http.DefaultTransport,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Use Application Default Credentials
|
||||
creds, err := google.FindDefaultCredentials(ctx, sqladmin.SqlserviceAdminScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find default credentials: %w", err)
|
||||
}
|
||||
baseClient := oauth2.NewClient(ctx, creds.TokenSource)
|
||||
baseClient.Transport = &userAgentRoundTripper{
|
||||
userAgent: ua,
|
||||
next: baseClient.Transport,
|
||||
}
|
||||
client = baseClient
|
||||
}
|
||||
|
||||
service, err := sqladmin.NewService(ctx, option.WithHTTPClient(client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new sqladmin service: %w", err)
|
||||
}
|
||||
|
||||
s := &Source{
|
||||
Name: r.Name,
|
||||
Kind: SourceKind,
|
||||
BaseURL: "https://sqladmin.googleapis.com",
|
||||
Service: service,
|
||||
UseClientOAuth: r.UseClientOAuth,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ sources.Source = &Source{}
|
||||
|
||||
type Source struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
BaseURL string
|
||||
Service *sqladmin.Service
|
||||
UseClientOAuth bool
|
||||
}
|
||||
|
||||
func (s *Source) SourceKind() string {
|
||||
return SourceKind
|
||||
}
|
||||
|
||||
func (s *Source) GetService(ctx context.Context, accessToken string) (*sqladmin.Service, error) {
|
||||
if s.UseClientOAuth {
|
||||
token := &oauth2.Token{AccessToken: accessToken}
|
||||
client := oauth2.NewClient(ctx, oauth2.StaticTokenSource(token))
|
||||
service, err := sqladmin.NewService(ctx, option.WithHTTPClient(client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new sqladmin service: %w", err)
|
||||
}
|
||||
return service, nil
|
||||
}
|
||||
return s.Service, nil
|
||||
}
|
||||
|
||||
func (s *Source) UseClientAuthorization() bool {
|
||||
return s.UseClientOAuth
|
||||
}
|
||||
135
internal/sources/cloudsqladmin/cloud_sql_admin_test.go
Normal file
135
internal/sources/cloudsqladmin/cloud_sql_admin_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudsqladmin_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqladmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
)
|
||||
|
||||
func TestParseFromYamlCloudSQLAdmin(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.SourceConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
kind: cloud-sql-admin
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-sql-admin-instance": cloudsqladmin.Config{
|
||||
Name: "my-cloud-sql-admin-instance",
|
||||
Kind: cloudsqladmin.SourceKind,
|
||||
UseClientOAuth: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "use client auth example",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
kind: cloud-sql-admin
|
||||
useClientOAuth: true
|
||||
`,
|
||||
want: map[string]sources.SourceConfig{
|
||||
"my-cloud-sql-admin-instance": cloudsqladmin.Config{
|
||||
Name: "my-cloud-sql-admin-instance",
|
||||
Kind: cloudsqladmin.SourceKind,
|
||||
UseClientOAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if !cmp.Equal(tc.want, got.Sources) {
|
||||
t.Fatalf("incorrect parse: want %v, got %v", tc.want, got.Sources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailParseFromYaml(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "extra field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
kind: cloud-sql-admin
|
||||
project: test-project
|
||||
`,
|
||||
err: `unable to parse source "my-cloud-sql-admin-instance" as "cloud-sql-admin": [2:1] unknown field "project"
|
||||
1 | kind: cloud-sql-admin
|
||||
> 2 | project: test-project
|
||||
^
|
||||
`,
|
||||
},
|
||||
{
|
||||
desc: "missing required field",
|
||||
in: `
|
||||
sources:
|
||||
my-cloud-sql-admin-instance:
|
||||
useClientOAuth: true
|
||||
`,
|
||||
err: "missing 'kind' field for source \"my-cloud-sql-admin-instance\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := struct {
|
||||
Sources server.SourceConfigs `yaml:"sources"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.Unmarshal(testutils.FormatYaml(tc.in), &got)
|
||||
if err == nil {
|
||||
t.Fatalf("expect parsing to fail")
|
||||
}
|
||||
errStr := err.Error()
|
||||
if errStr != tc.err {
|
||||
t.Fatalf("unexpected error: got %q, want %q", errStr, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -52,7 +52,7 @@ type Config struct {
|
||||
Project string `yaml:"project" validate:"required"`
|
||||
Region string `yaml:"region" validate:"required"`
|
||||
Instance string `yaml:"instance" validate:"required"`
|
||||
IPType sources.IPType `yaml:"ipType" validate:"required"`
|
||||
IPType sources.IPType `yaml:"ipType"`
|
||||
User string `yaml:"user" validate:"required"`
|
||||
Password string `yaml:"password" validate:"required"`
|
||||
Database string `yaml:"database" validate:"required"`
|
||||
|
||||
165
internal/tools/alloydb/alloydbgetcluster/alloydbgetcluster.go
Normal file
165
internal/tools/alloydb/alloydbgetcluster/alloydbgetcluster.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbgetcluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
alloydbadmin "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "alloydb-get-cluster"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// Configuration for the get-cluster tool.
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of the tool.
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize initializes the tool from the configuration.
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source %q not found", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(*alloydbadmin.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `%s`", kind, alloydbadmin.SourceKind)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("projectId", "The GCP project ID."),
|
||||
tools.NewStringParameter("locationId", "The location of the cluster (e.g., 'us-central1')."),
|
||||
tools.NewStringParameter("clusterId", "The ID of the cluster."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
|
||||
inputSchema := allParameters.McpManifest()
|
||||
inputSchema.Required = []string{"projectId", "locationId", "clusterId"}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Source: s,
|
||||
AllParams: allParameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tool represents the get-cluster tool.
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
|
||||
Source *alloydbadmin.Source
|
||||
AllParams tools.Parameters
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke executes the tool's logic.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
projectId, ok := paramsMap["projectId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing 'projectId' parameter; expected a string")
|
||||
}
|
||||
locationId, ok := paramsMap["locationId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'locationId' parameter; expected a string")
|
||||
}
|
||||
clusterId, ok := paramsMap["clusterId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'clusterId' parameter; expected a string")
|
||||
}
|
||||
|
||||
service, err := t.Source.GetService(ctx, string(accessToken))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectId, locationId, clusterId)
|
||||
|
||||
resp, err := service.Projects.Locations.Clusters.Get(urlString).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting AlloyDB cluster: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ParseParams parses the parameters for the tool.
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool's manifest.
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the tool's MCP manifest.
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized.
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.Source.UseClientAuthorization()
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbgetcluster_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
alloydbgetcluster "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydbgetcluster"
|
||||
)
|
||||
|
||||
func TestParseFromYaml(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
get-my-cluster:
|
||||
kind: alloydb-get-cluster
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"get-my-cluster": alloydbgetcluster.Config{
|
||||
Name: "get-my-cluster",
|
||||
Kind: "alloydb-get-cluster",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth required",
|
||||
in: `
|
||||
tools:
|
||||
get-my-cluster-auth:
|
||||
kind: alloydb-get-cluster
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"get-my-cluster-auth": alloydbgetcluster.Config{
|
||||
Name: "get-my-cluster-auth",
|
||||
Kind: "alloydb-get-cluster",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
170
internal/tools/alloydb/alloydbgetinstance/alloydbgetinstance.go
Normal file
170
internal/tools/alloydb/alloydbgetinstance/alloydbgetinstance.go
Normal file
@@ -0,0 +1,170 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbgetinstance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
alloydbadmin "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "alloydb-get-instance"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// Configuration for the get-instance tool.
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of the tool.
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize initializes the tool from the configuration.
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source %q not found", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(*alloydbadmin.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `%s`", kind, alloydbadmin.SourceKind)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("project", "The GCP project ID."),
|
||||
tools.NewStringParameter("location", "The location of the instance (e.g., 'us-central1')."),
|
||||
tools.NewStringParameter("cluster", "The ID of the cluster."),
|
||||
tools.NewStringParameter("instance", "The ID of the instance."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
|
||||
inputSchema := allParameters.McpManifest()
|
||||
inputSchema.Required = []string{"project", "location", "cluster", "instance"}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Source: s,
|
||||
AllParams: allParameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tool represents the get-instance tool.
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
|
||||
Source *alloydbadmin.Source
|
||||
AllParams tools.Parameters
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke executes the tool's logic.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
project, ok := paramsMap["project"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing 'project' parameter; expected a string")
|
||||
}
|
||||
location, ok := paramsMap["location"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'location' parameter; expected a string")
|
||||
}
|
||||
cluster, ok := paramsMap["cluster"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'cluster' parameter; expected a string")
|
||||
}
|
||||
instance, ok := paramsMap["instance"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'instance' parameter; expected a string")
|
||||
}
|
||||
|
||||
service, err := t.Source.GetService(ctx, string(accessToken))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("projects/%s/locations/%s/clusters/%s/instances/%s", project, location, cluster, instance)
|
||||
|
||||
resp, err := service.Projects.Locations.Clusters.Instances.Get(urlString).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting AlloyDB instance: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ParseParams parses the parameters for the tool.
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool's manifest.
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the tool's MCP manifest.
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized.
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.Source.UseClientAuthorization()
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbgetinstance_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
alloydbgetinstance "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydbgetinstance"
|
||||
)
|
||||
|
||||
func TestParseFromYaml(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
get-my-instance:
|
||||
kind: alloydb-get-instance
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"get-my-instance": alloydbgetinstance.Config{
|
||||
Name: "get-my-instance",
|
||||
Kind: "alloydb-get-instance",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth required",
|
||||
in: `
|
||||
tools:
|
||||
get-my-instance-auth:
|
||||
kind: alloydb-get-instance
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"get-my-instance-auth": alloydbgetinstance.Config{
|
||||
Name: "get-my-instance-auth",
|
||||
Kind: "alloydb-get-instance",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
171
internal/tools/alloydb/alloydbgetuser/alloydbgetuser.go
Normal file
171
internal/tools/alloydb/alloydbgetuser/alloydbgetuser.go
Normal file
@@ -0,0 +1,171 @@
|
||||
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbgetuser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
alloydbadmin "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "alloydb-get-user"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// Configuration for the get-user tool.
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of the tool.
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize initializes the tool from the configuration.
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source %q not found", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(*alloydbadmin.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `%s`", kind, alloydbadmin.SourceKind)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("project", "The GCP project ID."),
|
||||
tools.NewStringParameter("location", "The location of the cluster (e.g., 'us-central1')."),
|
||||
tools.NewStringParameter("cluster", "The ID of the cluster."),
|
||||
tools.NewStringParameter("user", "The ID of the user."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
|
||||
inputSchema := allParameters.McpManifest()
|
||||
inputSchema.Required = []string{"project", "location", "cluster", "user"}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Source: s,
|
||||
AllParams: allParameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tool represents the get-user tool.
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
|
||||
Source *alloydbadmin.Source
|
||||
AllParams tools.Parameters
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke executes the tool's logic.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
project, ok := paramsMap["project"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing 'project' parameter; expected a string")
|
||||
}
|
||||
location, ok := paramsMap["location"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'location' parameter; expected a string")
|
||||
}
|
||||
cluster, ok := paramsMap["cluster"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'cluster' parameter; expected a string")
|
||||
}
|
||||
user, ok := paramsMap["user"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'user' parameter; expected a string")
|
||||
}
|
||||
|
||||
service, err := t.Source.GetService(ctx, string(accessToken))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("projects/%s/locations/%s/clusters/%s/users/%s", project, location, cluster, user)
|
||||
|
||||
resp, err := service.Projects.Locations.Clusters.Users.Get(urlString).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting AlloyDB user: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ParseParams parses the parameters for the tool.
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool's manifest.
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the tool's MCP manifest.
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized.
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.Source.UseClientAuthorization()
|
||||
}
|
||||
95
internal/tools/alloydb/alloydbgetuser/alloydbgetuser_test.go
Normal file
95
internal/tools/alloydb/alloydbgetuser/alloydbgetuser_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydbgetuser_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
alloydbgetuser "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydbgetuser"
|
||||
)
|
||||
|
||||
func TestParseFromYaml(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
get-my-user:
|
||||
kind: alloydb-get-user
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"get-my-user": alloydbgetuser.Config{
|
||||
Name: "get-my-user",
|
||||
Kind: "alloydb-get-user",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth required",
|
||||
in: `
|
||||
tools:
|
||||
get-my-user-auth:
|
||||
kind: alloydb-get-user
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"get-my-user-auth": alloydbgetuser.Config{
|
||||
Name: "get-my-user-auth",
|
||||
Kind: "alloydb-get-user",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,161 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydblistclusters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
alloydbadmin "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "alloydb-list-clusters"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// Configuration for the list-clusters tool.
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of the tool.
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize initializes the tool from the configuration.
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source %q not found", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(*alloydbadmin.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `%s`", kind, alloydbadmin.SourceKind)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("projectId", "The GCP project ID to list clusters for."),
|
||||
tools.NewStringParameterWithDefault("locationId", "-", "Optional: The location to list clusters in (e.g., 'us-central1'). Use '-' to list clusters across all locations.(Default: '-')"),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
|
||||
inputSchema := allParameters.McpManifest()
|
||||
inputSchema.Required = []string{"projectId", "locationId"}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Source: s,
|
||||
AllParams: allParameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tool represents the list-clusters tool.
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
Description string `yaml:"description"`
|
||||
|
||||
Source *alloydbadmin.Source
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke executes the tool's logic.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
projectId, ok := paramsMap["projectId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing 'projectId' parameter; expected a string")
|
||||
}
|
||||
locationId, ok := paramsMap["locationId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'locationId' parameter; expected a string")
|
||||
}
|
||||
|
||||
service, err := t.Source.GetService(ctx, string(accessToken))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("projects/%s/locations/%s", projectId, locationId)
|
||||
|
||||
resp, err := service.Projects.Locations.Clusters.List(urlString).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing AlloyDB clusters: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ParseParams parses the parameters for the tool.
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool's manifest.
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the tool's MCP manifest.
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized.
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.Source.UseClientAuthorization()
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydblistclusters_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
alloydblistclusters "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydblistclusters"
|
||||
)
|
||||
|
||||
func TestParseFromYaml(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
list-my-clusters:
|
||||
kind: alloydb-list-clusters
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"list-my-clusters": alloydblistclusters.Config{
|
||||
Name: "list-my-clusters",
|
||||
Kind: "alloydb-list-clusters",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth required",
|
||||
in: `
|
||||
tools:
|
||||
list-my-clusters-auth:
|
||||
kind: alloydb-list-clusters
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"list-my-clusters-auth": alloydblistclusters.Config{
|
||||
Name: "list-my-clusters-auth",
|
||||
Kind: "alloydb-list-clusters",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,166 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydblistinstances
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
alloydbadmin "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "alloydb-list-instances"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// Configuration for the list-instances tool.
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of the tool.
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize initializes the tool from the configuration.
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source %q not found", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(*alloydbadmin.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `%s`", kind, alloydbadmin.SourceKind)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("projectId", "The GCP project ID to list instances for."),
|
||||
tools.NewStringParameterWithDefault("locationId", "-", "Optional: The location of the cluster (e.g., 'us-central1'). Use '-' to get results for all regions.(Default: '-')"),
|
||||
tools.NewStringParameterWithDefault("clusterId", "-", "Optional: The ID of the cluster to list instances from. Use '-' to get results for all clusters.(Default: '-')"),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
|
||||
inputSchema := allParameters.McpManifest()
|
||||
inputSchema.Required = []string{"projectId"}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Source: s,
|
||||
AllParams: allParameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tool represents the list-instances tool.
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
Description string `yaml:"description"`
|
||||
|
||||
Source *alloydbadmin.Source
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke executes the tool's logic.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
projectId, ok := paramsMap["projectId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing 'projectId' parameter; expected a string")
|
||||
}
|
||||
locationId, ok := paramsMap["locationId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'locationId' parameter; expected a string")
|
||||
}
|
||||
clusterId, ok := paramsMap["clusterId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'clusterId' parameter; expected a string")
|
||||
}
|
||||
|
||||
service, err := t.Source.GetService(ctx, string(accessToken))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectId, locationId, clusterId)
|
||||
|
||||
resp, err := service.Projects.Locations.Clusters.Instances.List(urlString).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing AlloyDB instances: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ParseParams parses the parameters for the tool.
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool's manifest.
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the tool's MCP manifest.
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized.
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.Source.UseClientAuthorization()
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydblistinstances_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
alloydblistinstances "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydblistinstances"
|
||||
)
|
||||
|
||||
func TestParseFromYaml(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
list-my-instances:
|
||||
kind: alloydb-list-instances
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"list-my-instances": alloydblistinstances.Config{
|
||||
Name: "list-my-instances",
|
||||
Kind: "alloydb-list-instances",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth required",
|
||||
in: `
|
||||
tools:
|
||||
list-my-instances-auth:
|
||||
kind: alloydb-list-instances
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"list-my-instances-auth": alloydblistinstances.Config{
|
||||
Name: "list-my-instances-auth",
|
||||
Kind: "alloydb-list-instances",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
166
internal/tools/alloydb/alloydblistusers/alloydblistusers.go
Normal file
166
internal/tools/alloydb/alloydblistusers/alloydblistusers.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydblistusers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
alloydbadmin "github.com/googleapis/genai-toolbox/internal/sources/alloydbadmin"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
)
|
||||
|
||||
const kind string = "alloydb-list-users"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
// Configuration for the list-users tool.
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
BaseURL string `yaml:"baseURL"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
// ToolConfigKind returns the kind of the tool.
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
// Initialize initializes the tool from the configuration.
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source %q not found", cfg.Source)
|
||||
}
|
||||
|
||||
s, ok := rawS.(*alloydbadmin.Source)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be `%s`", kind, alloydbadmin.SourceKind)
|
||||
}
|
||||
|
||||
allParameters := tools.Parameters{
|
||||
tools.NewStringParameter("projectId", "The GCP project ID."),
|
||||
tools.NewStringParameter("locationId", "The location of the cluster (e.g., 'us-central1')."),
|
||||
tools.NewStringParameter("clusterId", "The ID of the cluster to list users from."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
|
||||
inputSchema := allParameters.McpManifest()
|
||||
inputSchema.Required = []string{"projectId", "locationId", "clusterId"}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: inputSchema,
|
||||
}
|
||||
|
||||
return Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Source: s,
|
||||
AllParams: allParameters,
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: paramManifest},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tool represents the list-users tool.
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
Description string `yaml:"description"`
|
||||
|
||||
Source *alloydbadmin.Source
|
||||
AllParams tools.Parameters `yaml:"allParams"`
|
||||
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke executes the tool's logic.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
projectId, ok := paramsMap["projectId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid or missing 'projectId' parameter; expected a string")
|
||||
}
|
||||
locationId, ok := paramsMap["locationId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'locationId' parameter; expected a string")
|
||||
}
|
||||
clusterId, ok := paramsMap["clusterId"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid 'clusterId' parameter; expected a string")
|
||||
}
|
||||
|
||||
service, err := t.Source.GetService(ctx, string(accessToken))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectId, locationId, clusterId)
|
||||
|
||||
resp, err := service.Projects.Locations.Clusters.Users.List(urlString).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing AlloyDB users: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ParseParams parses the parameters for the tool.
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.AllParams, data, claims)
|
||||
}
|
||||
|
||||
// Manifest returns the tool's manifest.
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
// McpManifest returns the tool's MCP manifest.
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
// Authorized checks if the tool is authorized.
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.Source.UseClientAuthorization()
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package alloydblistusers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
alloydblistusers "github.com/googleapis/genai-toolbox/internal/tools/alloydb/alloydblistusers"
|
||||
)
|
||||
|
||||
func TestParseFromYaml(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
list-my-users:
|
||||
kind: alloydb-list-users
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"list-my-users": alloydblistusers.Config{
|
||||
Name: "list-my-users",
|
||||
Kind: "alloydb-list-users",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with auth required",
|
||||
in: `
|
||||
tools:
|
||||
list-my-users-auth:
|
||||
kind: alloydb-list-users
|
||||
source: my-alloydb-admin-source
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"list-my-users-auth": alloydblistusers.Config{
|
||||
Name: "list-my-users-auth",
|
||||
Kind: "alloydb-list-users",
|
||||
Source: "my-alloydb-admin-source",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,307 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigqueryanalyzecontribution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
bigqueryapi "cloud.google.com/go/bigquery"
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/uuid"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
bigqueryds "github.com/googleapis/genai-toolbox/internal/sources/bigquery"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
bigqueryrestapi "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
const kind string = "bigquery-analyze-contribution"
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
BigQueryClient() *bigqueryapi.Client
|
||||
BigQueryRestService() *bigqueryrestapi.Service
|
||||
BigQueryClientCreator() bigqueryds.BigqueryClientCreator
|
||||
UseClientAuthorization() bool
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &bigqueryds.Source{}
|
||||
|
||||
var compatibleSources = [...]string{bigqueryds.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description" validate:"required"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
inputDataParameter := tools.NewStringParameter("input_data",
|
||||
"The data that contain the test and control data to analyze. Can be a fully qualified BigQuery table ID or a SQL query.")
|
||||
contributionMetricParameter := tools.NewStringParameter("contribution_metric",
|
||||
`The name of the column that contains the metric to analyze.
|
||||
Provides the expression to use to calculate the metric you are analyzing.
|
||||
To calculate a summable metric, the expression must be in the form SUM(metric_column_name),
|
||||
where metric_column_name is a numeric data type.
|
||||
|
||||
To calculate a summable ratio metric, the expression must be in the form
|
||||
SUM(numerator_metric_column_name)/SUM(denominator_metric_column_name),
|
||||
where numerator_metric_column_name and denominator_metric_column_name are numeric data types.
|
||||
|
||||
To calculate a summable by category metric, the expression must be in the form
|
||||
SUM(metric_sum_column_name)/COUNT(DISTINCT categorical_column_name). The summed column must be a numeric data type.
|
||||
The categorical column must have type BOOL, DATE, DATETIME, TIME, TIMESTAMP, STRING, or INT64.`)
|
||||
isTestColParameter := tools.NewStringParameter("is_test_col",
|
||||
"The name of the column that identifies whether a row is in the test or control group.")
|
||||
dimensionIDColsParameter := tools.NewArrayParameterWithRequired("dimension_id_cols",
|
||||
"An array of column names that uniquely identify each dimension.", false, tools.NewStringParameter("dimension_id_col", "A dimension column name."))
|
||||
topKInsightsParameter := tools.NewIntParameterWithDefault("top_k_insights_by_apriori_support", 30,
|
||||
"The number of top insights to return, ranked by apriori support.")
|
||||
pruningMethodParameter := tools.NewStringParameterWithDefault("pruning_method", "PRUNE_REDUNDANT_INSIGHTS",
|
||||
"The method to use for pruning redundant insights. Can be 'NO_PRUNING' or 'PRUNE_REDUNDANT_INSIGHTS'.")
|
||||
|
||||
parameters := tools.Parameters{
|
||||
inputDataParameter,
|
||||
contributionMetricParameter,
|
||||
isTestColParameter,
|
||||
dimensionIDColsParameter,
|
||||
topKInsightsParameter,
|
||||
pruningMethodParameter,
|
||||
}
|
||||
|
||||
mcpManifest := tools.McpManifest{
|
||||
Name: cfg.Name,
|
||||
Description: cfg.Description,
|
||||
InputSchema: parameters.McpManifest(),
|
||||
}
|
||||
|
||||
// finish tool setup
|
||||
t := Tool{
|
||||
Name: cfg.Name,
|
||||
Kind: kind,
|
||||
Parameters: parameters,
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
UseClientOAuth: s.UseClientAuthorization(),
|
||||
ClientCreator: s.BigQueryClientCreator(),
|
||||
Client: s.BigQueryClient(),
|
||||
RestService: s.BigQueryRestService(),
|
||||
manifest: tools.Manifest{Description: cfg.Description, Parameters: parameters.Manifest(), AuthRequired: cfg.AuthRequired},
|
||||
mcpManifest: mcpManifest,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Name string `yaml:"name"`
|
||||
Kind string `yaml:"kind"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
UseClientOAuth bool `yaml:"useClientOAuth"`
|
||||
Parameters tools.Parameters `yaml:"parameters"`
|
||||
|
||||
Client *bigqueryapi.Client
|
||||
RestService *bigqueryrestapi.Service
|
||||
ClientCreator bigqueryds.BigqueryClientCreator
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
// Invoke runs the contribution analysis.
|
||||
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
inputData, ok := paramsMap["input_data"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to cast input_data parameter %s", paramsMap["input_data"])
|
||||
}
|
||||
|
||||
modelID := fmt.Sprintf("contribution_analysis_model_%s", strings.ReplaceAll(uuid.New().String(), "-", ""))
|
||||
|
||||
var options []string
|
||||
options = append(options, "MODEL_TYPE = 'CONTRIBUTION_ANALYSIS'")
|
||||
options = append(options, fmt.Sprintf("CONTRIBUTION_METRIC = '%s'", paramsMap["contribution_metric"]))
|
||||
options = append(options, fmt.Sprintf("IS_TEST_COL = '%s'", paramsMap["is_test_col"]))
|
||||
|
||||
if val, ok := paramsMap["dimension_id_cols"]; ok {
|
||||
if cols, ok := val.([]any); ok {
|
||||
var strCols []string
|
||||
for _, c := range cols {
|
||||
strCols = append(strCols, fmt.Sprintf("'%s'", c))
|
||||
}
|
||||
options = append(options, fmt.Sprintf("DIMENSION_ID_COLS = [%s]", strings.Join(strCols, ", ")))
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to cast dimension_id_cols parameter %s", paramsMap["dimension_id_cols"])
|
||||
}
|
||||
}
|
||||
if val, ok := paramsMap["top_k_insights_by_apriori_support"]; ok {
|
||||
options = append(options, fmt.Sprintf("TOP_K_INSIGHTS_BY_APRIORI_SUPPORT = %v", val))
|
||||
}
|
||||
if val, ok := paramsMap["pruning_method"].(string); ok {
|
||||
upperVal := strings.ToUpper(val)
|
||||
if upperVal != "NO_PRUNING" && upperVal != "PRUNE_REDUNDANT_INSIGHTS" {
|
||||
return nil, fmt.Errorf("invalid pruning_method: %s", val)
|
||||
}
|
||||
options = append(options, fmt.Sprintf("PRUNING_METHOD = '%s'", upperVal))
|
||||
}
|
||||
|
||||
var inputDataSource string
|
||||
trimmedUpperInputData := strings.TrimSpace(strings.ToUpper(inputData))
|
||||
if strings.HasPrefix(trimmedUpperInputData, "SELECT") || strings.HasPrefix(trimmedUpperInputData, "WITH") {
|
||||
inputDataSource = fmt.Sprintf("(%s)", inputData)
|
||||
} else {
|
||||
inputDataSource = fmt.Sprintf("SELECT * FROM `%s`", inputData)
|
||||
}
|
||||
|
||||
// Use temp model to skip the clean up at the end. To use TEMP MODEL, queries have to be
|
||||
// in the same BigQuery session.
|
||||
createModelSQL := fmt.Sprintf("CREATE TEMP MODEL %s OPTIONS(%s) AS %s",
|
||||
modelID,
|
||||
strings.Join(options, ", "),
|
||||
inputDataSource,
|
||||
)
|
||||
|
||||
bqClient := t.Client
|
||||
var err error
|
||||
|
||||
// Initialize new client if using user OAuth token
|
||||
if t.UseClientOAuth {
|
||||
tokenStr, err := accessToken.ParseBearerToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing access token: %w", err)
|
||||
}
|
||||
bqClient, _, err = t.ClientCreator(tokenStr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client from OAuth access token: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
createModelQuery := bqClient.Query(createModelSQL)
|
||||
createModelQuery.CreateSession = true
|
||||
createModelJob, err := createModelQuery.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start create model job: %w", err)
|
||||
}
|
||||
|
||||
status, err := createModelJob.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to wait for create model job: %w", err)
|
||||
}
|
||||
if err := status.Err(); err != nil {
|
||||
return nil, fmt.Errorf("create model job failed: %w", err)
|
||||
}
|
||||
|
||||
if status.Statistics == nil || status.Statistics.SessionInfo == nil || status.Statistics.SessionInfo.SessionID == "" {
|
||||
return nil, fmt.Errorf("failed to create a BigQuery session")
|
||||
}
|
||||
sessionID := status.Statistics.SessionInfo.SessionID
|
||||
getInsightsSQL := fmt.Sprintf("SELECT * FROM ML.GET_INSIGHTS(MODEL %s)", modelID)
|
||||
|
||||
getInsightsQuery := bqClient.Query(getInsightsSQL)
|
||||
getInsightsQuery.QueryConfig.ConnectionProperties = []*bigqueryapi.ConnectionProperty{
|
||||
{Key: "session_id", Value: sessionID},
|
||||
}
|
||||
|
||||
job, err := getInsightsQuery.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute get insights query: %w", err)
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read query results: %w", err)
|
||||
}
|
||||
|
||||
var out []any
|
||||
for {
|
||||
var row map[string]bigqueryapi.Value
|
||||
err := it.Next(&row)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to iterate through query results: %w", err)
|
||||
}
|
||||
vMap := make(map[string]any)
|
||||
for key, value := range row {
|
||||
vMap[key] = value
|
||||
}
|
||||
out = append(out, vMap)
|
||||
}
|
||||
|
||||
if len(out) > 0 {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// This handles the standard case for a SELECT query that successfully
|
||||
// executes but returns zero rows.
|
||||
return "The query returned 0 rows.", nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
|
||||
return tools.ParseParams(t.Parameters, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization() bool {
|
||||
return t.UseClientOAuth
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigqueryanalyzecontribution_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/bigquery/bigqueryanalyzecontribution"
|
||||
)
|
||||
|
||||
func TestParseFromYamlBigQueryAnalyzeContribution(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: bigquery-analyze-contribution
|
||||
source: my-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": bigqueryanalyzecontribution.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "bigquery-analyze-contribution",
|
||||
Source: "my-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -98,6 +98,7 @@ type CAPayload struct {
|
||||
Project string `json:"project"`
|
||||
Messages []Message `json:"messages"`
|
||||
InlineContext InlineContext `json:"inlineContext"`
|
||||
ClientIdEnum string `json:"clientIdEnum"`
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
@@ -243,6 +244,7 @@ func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken
|
||||
},
|
||||
Options: Options{Chart: ChartOptions{Image: ImageOptions{NoImage: map[string]any{}}}},
|
||||
},
|
||||
ClientIdEnum: "GENAI_TOOLBOX",
|
||||
}
|
||||
|
||||
// Call the streaming API
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user