mirror of
https://github.com/googleapis/genai-toolbox.git
synced 2026-01-11 08:28:11 -05:00
Compare commits
16 Commits
v0.22.0
...
py-sdk-doc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12b25a0beb | ||
|
|
073c8b3268 | ||
|
|
0cb3ad9026 | ||
|
|
18d0440f4e | ||
|
|
7a135ce078 | ||
|
|
ea9e2d12bd | ||
|
|
bea9705450 | ||
|
|
489117d747 | ||
|
|
32367a472f | ||
|
|
3b40fea25e | ||
|
|
f6b6a9fb5d | ||
|
|
1dd971b8d5 | ||
|
|
290cba0f1e | ||
|
|
047def93ef | ||
|
|
875b5277e3 | ||
|
|
a29f9e5484 |
@@ -589,6 +589,26 @@ steps:
|
||||
firestore \
|
||||
firestore
|
||||
|
||||
- id: "mongodb"
|
||||
name: golang:1
|
||||
waitFor: ["compile-test-binary"]
|
||||
entrypoint: /bin/bash
|
||||
env:
|
||||
- "GOPATH=/gopath"
|
||||
- "MONGODB_DATABASE=$_DATABASE_NAME"
|
||||
- "SERVICE_ACCOUNT_EMAIL=$SERVICE_ACCOUNT_EMAIL"
|
||||
secretEnv: ["MONGODB_URI", "CLIENT_ID"]
|
||||
volumes:
|
||||
- name: "go"
|
||||
path: "/gopath"
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
.ci/test_with_coverage.sh \
|
||||
"MongoDB" \
|
||||
mongodb \
|
||||
mongodb
|
||||
|
||||
- id: "looker"
|
||||
name: golang:1
|
||||
waitFor: ["compile-test-binary"]
|
||||
@@ -867,6 +887,26 @@ steps:
|
||||
singlestore \
|
||||
singlestore
|
||||
|
||||
- id: "mariadb"
|
||||
name: golang:1
|
||||
waitFor: ["compile-test-binary"]
|
||||
entrypoint: /bin/bash
|
||||
env:
|
||||
- "GOPATH=/gopath"
|
||||
- "MARIADB_DATABASE=$_MARIADB_DATABASE"
|
||||
- "MARIADB_PORT=$_MARIADB_PORT"
|
||||
- "SERVICE_ACCOUNT_EMAIL=$SERVICE_ACCOUNT_EMAIL"
|
||||
secretEnv: ["MARIADB_USER", "MARIADB_PASS", "MARIADB_HOST", "CLIENT_ID"]
|
||||
volumes:
|
||||
- name: "go"
|
||||
path: "/gopath"
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
# skip coverage check as it re-uses current MySQL implementation
|
||||
go test ./tests/mariadb
|
||||
|
||||
|
||||
availableSecrets:
|
||||
secretManager:
|
||||
- versionName: projects/$PROJECT_ID/secrets/cloud_sql_pg_user/versions/latest
|
||||
@@ -979,6 +1019,14 @@ availableSecrets:
|
||||
env: SINGLESTORE_PASSWORD
|
||||
- versionName: projects/$PROJECT_ID/secrets/singlestore_host/versions/latest
|
||||
env: SINGLESTORE_HOST
|
||||
- versionName: projects/$PROJECT_ID/secrets/mariadb_user/versions/latest
|
||||
env: MARIADB_USER
|
||||
- versionName: projects/$PROJECT_ID/secrets/mariadb_pass/versions/latest
|
||||
env: MARIADB_PASS
|
||||
- versionName: projects/$PROJECT_ID/secrets/mariadb_host/versions/latest
|
||||
env: MARIADB_HOST
|
||||
- versionName: projects/$PROJECT_ID/secrets/mongodb_uri/versions/latest
|
||||
env: MONGODB_URI
|
||||
|
||||
options:
|
||||
logging: CLOUD_LOGGING_ONLY
|
||||
@@ -1039,3 +1087,6 @@ substitutions:
|
||||
_SINGLESTORE_PORT: "3308"
|
||||
_SINGLESTORE_DATABASE: "singlestore"
|
||||
_SINGLESTORE_USER: "root"
|
||||
_MARIADB_PORT: "3307"
|
||||
_MARIADB_DATABASE: test_database
|
||||
|
||||
|
||||
@@ -1 +1,9 @@
|
||||
@import 'td/code-dark';
|
||||
@import 'td/code-dark';
|
||||
|
||||
// Make tabs scrollable horizontally instead of wrapping
|
||||
.nav-tabs {
|
||||
flex-wrap: nowrap;
|
||||
white-space: nowrap;
|
||||
overflow-x: auto;
|
||||
overflow-y: hidden;
|
||||
}
|
||||
@@ -109,7 +109,7 @@ golangci-lint run --fix
|
||||
Execute unit tests locally:
|
||||
|
||||
```bash
|
||||
go test -race -v ./...
|
||||
go test -race -v ./cmd/... ./internal/...
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
|
||||
18
README.md
18
README.md
@@ -158,17 +158,29 @@ To install Toolbox as a binary:
|
||||
>
|
||||
> </details>
|
||||
> <details>
|
||||
> <summary>Windows (AMD64)</summary>
|
||||
> <summary>Windows (Command Prompt)</summary>
|
||||
>
|
||||
> To install Toolbox as a binary on Windows (AMD64):
|
||||
> To install Toolbox as a binary on Windows (Command Prompt):
|
||||
>
|
||||
> ```powershell
|
||||
> ```cmd
|
||||
> :: see releases page for other versions
|
||||
> set VERSION=0.22.0
|
||||
> curl -o toolbox.exe "https://storage.googleapis.com/genai-toolbox/v%VERSION%/windows/amd64/toolbox.exe"
|
||||
> ```
|
||||
>
|
||||
> </details>
|
||||
> <details>
|
||||
> <summary>Windows (PowerShell)</summary>
|
||||
>
|
||||
> To install Toolbox as a binary on Windows (PowerShell):
|
||||
>
|
||||
> ```powershell
|
||||
> # see releases page for other versions
|
||||
> $VERSION = "0.21.0"
|
||||
> curl.exe -o toolbox.exe "https://storage.googleapis.com/genai-toolbox/v$VERSION/windows/amd64/toolbox.exe"
|
||||
> ```
|
||||
>
|
||||
> </details>
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
@@ -184,11 +184,14 @@ import (
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgresgetcolumncardinality"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistactivequeries"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistavailableextensions"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistdatabasestats"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistindexes"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistinstalledextensions"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistlocks"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistpgsettings"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistpublicationtables"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistquerystats"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistroles"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistschemas"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistsequences"
|
||||
_ "github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslisttables"
|
||||
|
||||
@@ -1488,7 +1488,7 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"alloydb_postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "alloydb_postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables", "list_active_queries", "list_available_extensions", "list_installed_extensions", "list_autovacuum_configurations", "list_memory_configurations", "list_top_bloated_tables", "list_replication_slots", "list_invalid_indexes", "get_query_plan", "list_views", "list_schemas", "database_overview", "list_triggers", "list_indexes", "list_sequences", "long_running_transactions", "list_locks", "replication_stats", "list_query_stats", "get_column_cardinality", "list_publication_tables", "list_tablespaces"},
|
||||
ToolNames: []string{"execute_sql", "list_tables", "list_active_queries", "list_available_extensions", "list_installed_extensions", "list_autovacuum_configurations", "list_memory_configurations", "list_top_bloated_tables", "list_replication_slots", "list_invalid_indexes", "get_query_plan", "list_views", "list_schemas", "database_overview", "list_triggers", "list_indexes", "list_sequences", "long_running_transactions", "list_locks", "replication_stats", "list_query_stats", "get_column_cardinality", "list_publication_tables", "list_tablespaces", "list_pg_settings", "list_database_stats", "list_roles"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1518,7 +1518,7 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"cloud_sql_postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "cloud_sql_postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables", "list_active_queries", "list_available_extensions", "list_installed_extensions", "list_autovacuum_configurations", "list_memory_configurations", "list_top_bloated_tables", "list_replication_slots", "list_invalid_indexes", "get_query_plan", "list_views", "list_schemas", "database_overview", "list_triggers", "list_indexes", "list_sequences", "long_running_transactions", "list_locks", "replication_stats", "list_query_stats", "get_column_cardinality", "list_publication_tables", "list_tablespaces"},
|
||||
ToolNames: []string{"execute_sql", "list_tables", "list_active_queries", "list_available_extensions", "list_installed_extensions", "list_autovacuum_configurations", "list_memory_configurations", "list_top_bloated_tables", "list_replication_slots", "list_invalid_indexes", "get_query_plan", "list_views", "list_schemas", "database_overview", "list_triggers", "list_indexes", "list_sequences", "long_running_transactions", "list_locks", "replication_stats", "list_query_stats", "get_column_cardinality", "list_publication_tables", "list_tablespaces", "list_pg_settings", "list_database_stats", "list_roles"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1618,7 +1618,7 @@ func TestPrebuiltTools(t *testing.T) {
|
||||
wantToolset: server.ToolsetConfigs{
|
||||
"postgres_database_tools": tools.ToolsetConfig{
|
||||
Name: "postgres_database_tools",
|
||||
ToolNames: []string{"execute_sql", "list_tables", "list_active_queries", "list_available_extensions", "list_installed_extensions", "list_autovacuum_configurations", "list_memory_configurations", "list_top_bloated_tables", "list_replication_slots", "list_invalid_indexes", "get_query_plan", "list_views", "list_schemas", "database_overview", "list_triggers", "list_indexes", "list_sequences", "long_running_transactions", "list_locks", "replication_stats", "list_query_stats", "get_column_cardinality", "list_publication_tables", "list_tablespaces"},
|
||||
ToolNames: []string{"execute_sql", "list_tables", "list_active_queries", "list_available_extensions", "list_installed_extensions", "list_autovacuum_configurations", "list_memory_configurations", "list_top_bloated_tables", "list_replication_slots", "list_invalid_indexes", "get_query_plan", "list_views", "list_schemas", "database_overview", "list_triggers", "list_indexes", "list_sequences", "long_running_transactions", "list_locks", "replication_stats", "list_query_stats", "get_column_cardinality", "list_publication_tables", "list_tablespaces", "list_pg_settings", "list_database_stats", "list_roles"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -115,15 +115,25 @@ chmod +x toolbox
|
||||
```
|
||||
|
||||
{{% /tab %}}
|
||||
{{% tab header="Windows (AMD64)" lang="en" %}}
|
||||
To install Toolbox as a binary on Windows (AMD64):
|
||||
{{% tab header="Windows (Command Prompt)" lang="en" %}}
|
||||
To install Toolbox as a binary on Windows (Command Prompt):
|
||||
|
||||
```powershell
|
||||
```cmd
|
||||
:: see releases page for other versions
|
||||
set VERSION=0.22.0
|
||||
curl -o toolbox.exe "https://storage.googleapis.com/genai-toolbox/v%VERSION%/windows/amd64/toolbox.exe"
|
||||
```
|
||||
|
||||
{{% /tab %}}
|
||||
{{% tab header="Windows (PowerShell)" lang="en" %}}
|
||||
To install Toolbox as a binary on Windows (PowerShell):
|
||||
|
||||
```powershell
|
||||
# see releases page for other versions
|
||||
$VERSION = "0.21.0"
|
||||
curl.exe -o toolbox.exe "https://storage.googleapis.com/genai-toolbox/v$VERSION/windows/amd64/toolbox.exe"
|
||||
```
|
||||
|
||||
{{% /tab %}}
|
||||
{{< /tabpane >}}
|
||||
{{% /tab %}}
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.24.4
|
||||
require (
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.4.0
|
||||
google.golang.org/adk v0.1.0
|
||||
google.golang.org/genai v1.35.0
|
||||
google.golang.org/genai v1.36.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
@@ -108,8 +108,8 @@ google.golang.org/adk v0.1.0 h1:+w/fHuqRVolotOATlujRA+2DKUuDrFH2poRdEX2QjB8=
|
||||
google.golang.org/adk v0.1.0/go.mod h1:NvtSLoNx7UzZIiUAI1KoJQLMmt9sG3oCgiCx1TLqKFw=
|
||||
google.golang.org/api v0.255.0 h1:OaF+IbRwOottVCYV2wZan7KUq7UeNUQn1BcPc4K7lE4=
|
||||
google.golang.org/api v0.255.0/go.mod h1:d1/EtvCLdtiWEV4rAEHDHGh2bCnqsWhw+M8y2ECN4a8=
|
||||
google.golang.org/genai v1.35.0 h1:Jo6g25CzVqFzGrX5mhWyBgQqXAUzxcx5jeK7U74zv9c=
|
||||
google.golang.org/genai v1.35.0/go.mod h1:A3kkl0nyBjyFlNjgxIwKq70julKbIxpSxqKO5gw/gmk=
|
||||
google.golang.org/genai v1.36.0 h1:sJCIjqTAmwrtAIaemtTiKkg2TO1RxnYEusTmEQ3nGxM=
|
||||
google.golang.org/genai v1.36.0/go.mod h1:A3kkl0nyBjyFlNjgxIwKq70julKbIxpSxqKO5gw/gmk=
|
||||
google.golang.org/genproto v0.0.0-20251014184007-4626949a642f h1:vLd1CJuJOUgV6qijD7KT5Y2ZtC97ll4dxjTUappMnbo=
|
||||
google.golang.org/genproto v0.0.0-20251014184007-4626949a642f/go.mod h1:PI3KrSadr00yqfv6UDvgZGFsmLqeRIwt8x4p5Oo7CdM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f h1:OiFuztEyBivVKDvguQJYWq1yDcfAHIID/FVrPR4oiI0=
|
||||
|
||||
@@ -4,7 +4,7 @@ go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/googleapis/mcp-toolbox-sdk-go v0.4.0
|
||||
google.golang.org/genai v1.35.0
|
||||
google.golang.org/genai v1.36.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
@@ -102,8 +102,8 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/api v0.255.0 h1:OaF+IbRwOottVCYV2wZan7KUq7UeNUQn1BcPc4K7lE4=
|
||||
google.golang.org/api v0.255.0/go.mod h1:d1/EtvCLdtiWEV4rAEHDHGh2bCnqsWhw+M8y2ECN4a8=
|
||||
google.golang.org/genai v1.35.0 h1:Jo6g25CzVqFzGrX5mhWyBgQqXAUzxcx5jeK7U74zv9c=
|
||||
google.golang.org/genai v1.35.0/go.mod h1:A3kkl0nyBjyFlNjgxIwKq70julKbIxpSxqKO5gw/gmk=
|
||||
google.golang.org/genai v1.36.0 h1:sJCIjqTAmwrtAIaemtTiKkg2TO1RxnYEusTmEQ3nGxM=
|
||||
google.golang.org/genai v1.36.0/go.mod h1:A3kkl0nyBjyFlNjgxIwKq70julKbIxpSxqKO5gw/gmk=
|
||||
google.golang.org/genproto v0.0.0-20251014184007-4626949a642f h1:vLd1CJuJOUgV6qijD7KT5Y2ZtC97ll4dxjTUappMnbo=
|
||||
google.golang.org/genproto v0.0.0-20251014184007-4626949a642f/go.mod h1:PI3KrSadr00yqfv6UDvgZGFsmLqeRIwt8x4p5Oo7CdM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f h1:OiFuztEyBivVKDvguQJYWq1yDcfAHIID/FVrPR4oiI0=
|
||||
|
||||
@@ -33,12 +33,12 @@ require (
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.32.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/api v0.255.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20251014184007-4626949a642f // indirect
|
||||
|
||||
@@ -100,18 +100,18 @@ go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
|
||||
@@ -52,6 +52,10 @@ details on how to connect your AI tools (IDEs) to databases via Toolbox and MCP.
|
||||
* `list_sequences`: List sequences in a PostgreSQL database.
|
||||
* `list_publication_tables`: List publication tables in a PostgreSQL database.
|
||||
* `list_tablespaces`: Lists tablespaces in the database.
|
||||
* `list_pg_settings`: List configuration parameters for the PostgreSQL server.
|
||||
* `list_database_stats`: Lists the key performance and activity statistics for
|
||||
each database in the AlloyDB instance.
|
||||
* `list_roles`: Lists all the user-created roles in PostgreSQL database.
|
||||
|
||||
## AlloyDB Postgres Admin
|
||||
|
||||
@@ -231,6 +235,10 @@ details on how to connect your AI tools (IDEs) to databases via Toolbox and MCP.
|
||||
* `list_sequences`: List sequences in a PostgreSQL database.
|
||||
* `list_publication_tables`: List publication tables in a PostgreSQL database.
|
||||
* `list_tablespaces`: Lists tablespaces in the database.
|
||||
* `list_pg_settings`: List configuration parameters for the PostgreSQL server.
|
||||
* `list_database_stats`: Lists the key performance and activity statistics for
|
||||
each database in the postgreSQL instance.
|
||||
* `list_roles`: Lists all the user-created roles in PostgreSQL database.
|
||||
|
||||
## Cloud SQL for PostgreSQL Observability
|
||||
|
||||
@@ -538,6 +546,10 @@ details on how to connect your AI tools (IDEs) to databases via Toolbox and MCP.
|
||||
* `list_sequences`: List sequences in a PostgreSQL database.
|
||||
* `list_publication_tables`: List publication tables in a PostgreSQL database.
|
||||
* `list_tablespaces`: Lists tablespaces in the database.
|
||||
* `list_pg_settings`: List configuration parameters for the PostgreSQL server.
|
||||
* `list_database_stats`: Lists the key performance and activity statistics for
|
||||
each database in the PostgreSQL server.
|
||||
* `list_roles`: Lists all the user-created roles in PostgreSQL database.
|
||||
|
||||
## Google Cloud Serverless for Apache Spark
|
||||
|
||||
|
||||
@@ -83,6 +83,16 @@ cluster][alloydb-free-trial].
|
||||
- [`postgres-list-tablespaces`](../tools/postgres/postgres-list-tablespaces.md)
|
||||
List tablespaces in an AlloyDB for PostgreSQL database.
|
||||
|
||||
- [`postgres-list-pg-settings`](../tools/postgres/postgres-list-pg-settings.md)
|
||||
List configuration parameters for the PostgreSQL server.
|
||||
|
||||
- [`postgres-list-database-stats`](../tools/postgres/postgres-list-database-stats.md)
|
||||
Lists the key performance and activity statistics for each database in the AlloyDB
|
||||
instance.
|
||||
|
||||
- [`postgres-list-roles`](../tools/postgres/postgres-list-roles.md)
|
||||
Lists all the user-created roles in PostgreSQL database..
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [AlloyDB using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/alloydb_pg_mcp/)
|
||||
|
||||
@@ -79,6 +79,16 @@ to a database by following these instructions][csql-pg-quickstart].
|
||||
- [`postgres-list-tablespaces`](../tools/postgres/postgres-list-tablespaces.md)
|
||||
List tablespaces in a PostgreSQL database.
|
||||
|
||||
- [`postgres-list-pg-settings`](../tools/postgres/postgres-list-pg-settings.md)
|
||||
List configuration parameters for the PostgreSQL server.
|
||||
|
||||
- [`postgres-list-database-stats`](../tools/postgres/postgres-list-database-stats.md)
|
||||
Lists the key performance and activity statistics for each database in the postgreSQL
|
||||
instance.
|
||||
|
||||
- [`postgres-list-roles`](../tools/postgres/postgres-list-roles.md)
|
||||
Lists all the user-created roles in PostgreSQL database..
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [Cloud SQL for Postgres using
|
||||
|
||||
78
docs/en/resources/sources/mariadb.md
Normal file
78
docs/en/resources/sources/mariadb.md
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
title: "MariaDB"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
MariaDB is an open-source relational database compatible with MySQL.
|
||||
|
||||
---
|
||||
## About
|
||||
|
||||
MariaDB is a relational database management system derived from MySQL. It
|
||||
implements the MySQL protocol and client libraries and supports modern SQL
|
||||
features with a focus on performance and reliability.
|
||||
|
||||
**Note**: MariaDB is supported using the MySQL source.
|
||||
## Available Tools
|
||||
|
||||
- [`mysql-sql`](../tools/mysql/mysql-sql.md)
|
||||
Execute pre-defined prepared SQL queries in MariaDB.
|
||||
|
||||
- [`mysql-execute-sql`](../tools/mysql/mysql-execute-sql.md)
|
||||
Run parameterized SQL queries in MariaDB.
|
||||
|
||||
- [`mysql-list-active-queries`](../tools/mysql/mysql-list-active-queries.md)
|
||||
List active queries in MariaDB.
|
||||
|
||||
- [`mysql-list-tables`](../tools/mysql/mysql-list-tables.md)
|
||||
List tables in a MariaDB database.
|
||||
|
||||
- [`mysql-list-tables-missing-unique-indexes`](../tools/mysql/mysql-list-tables-missing-unique-indexes.md)
|
||||
List tables in a MariaDB database that do not have primary or unique indices.
|
||||
|
||||
- [`mysql-list-table-fragmentation`](../tools/mysql/mysql-list-table-fragmentation.md)
|
||||
List table fragmentation in MariaDB tables.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Database User
|
||||
|
||||
This source only uses standard authentication. You will need to [create a
|
||||
MariaDB user][mariadb-users] to log in to the database.
|
||||
|
||||
[mariadb-users]: https://mariadb.com/kb/en/create-user/
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
sources:
|
||||
my_mariadb_db:
|
||||
kind: mysql
|
||||
host: 127.0.0.1
|
||||
port: 3306
|
||||
database: my_db
|
||||
user: ${MARIADB_USER}
|
||||
password: ${MARIADB_PASS}
|
||||
# Optional TLS and other driver parameters. For example, enable preferred TLS:
|
||||
# queryParams:
|
||||
# tls: preferred
|
||||
queryTimeout: 30s # Optional: query timeout duration
|
||||
```
|
||||
|
||||
{{< notice tip >}}
|
||||
Use environment variables instead of committing credentials to source files.
|
||||
{{< /notice >}}
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
| ------------ | :------: | :----------: | ----------------------------------------------------------------------------------------------- |
|
||||
| kind | string | true | Must be `mysql`. |
|
||||
| host | string | true | IP address to connect to (e.g. "127.0.0.1"). |
|
||||
| port | string | true | Port to connect to (e.g. "3307"). |
|
||||
| database | string | true | Name of the MariaDB database to connect to (e.g. "my_db"). |
|
||||
| user | string | true | Name of the MariaDB user to connect as (e.g. "my-mysql-user"). |
|
||||
| password | string | true | Password of the MariaDB user (e.g. "my-password"). |
|
||||
| queryTimeout | string | false | Maximum time to wait for query execution (e.g. "30s", "2m"). By default, no timeout is applied. |
|
||||
| queryParams | map<string,string> | false | Arbitrary DSN parameters passed to the driver (e.g. `tls: preferred`, `charset: utf8mb4`). Useful for enabling TLS or other connection options. |
|
||||
@@ -74,6 +74,16 @@ reputation for reliability, feature robustness, and performance.
|
||||
- [`postgres-list-tablespaces`](../tools/postgres/postgres-list-tablespaces.md)
|
||||
List tablespaces in a PostgreSQL database.
|
||||
|
||||
- [`postgres-list-pg-settings`](../tools/postgres/postgres-list-pg-settings.md)
|
||||
List configuration parameters for the PostgreSQL server.
|
||||
|
||||
- [`postgres-list-database-stats`](../tools/postgres/postgres-list-database-stats.md)
|
||||
Lists the key performance and activity statistics for each database in the postgreSQL
|
||||
server.
|
||||
|
||||
- [`postgres-list-roles`](../tools/postgres/postgres-list-roles.md)
|
||||
Lists all the user-created roles in PostgreSQL database..
|
||||
|
||||
### Pre-built Configurations
|
||||
|
||||
- [PostgreSQL using MCP](https://googleapis.github.io/genai-toolbox/how-to/connect-ide/postgres_mcp/)
|
||||
|
||||
@@ -0,0 +1,95 @@
|
||||
---
|
||||
title: "postgres-list-database-stats"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "postgres-list-database-stats" tool lists lists key performance and activity statistics of PostgreSQL databases.
|
||||
aliases:
|
||||
- /resources/tools/postgres-list-database-stats
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `postgres-list-database-stats` lists the key performance and activity statistics for each PostgreSQL database in the instance, offering insights into cache efficiency, transaction throughput, row-level activity, temporary file usage, and contention. It's compatible with
|
||||
any of the following sources:
|
||||
|
||||
- [alloydb-postgres](../../sources/alloydb-pg.md)
|
||||
- [cloud-sql-postgres](../../sources/cloud-sql-pg.md)
|
||||
- [postgres](../../sources/postgres.md)
|
||||
|
||||
`postgres-list-database-stats` lists detailed information as JSON for each database. The tool
|
||||
takes the following input parameters:
|
||||
|
||||
- `database_name` (optional): A text to filter results by database name. Default: `""`
|
||||
- `include_templates` (optional): Boolean, set to `true` to include template databases in the results. Default: `false`
|
||||
- `database_owner` (optional): A text to filter results by database owner. Default: `""`
|
||||
- `default_tablespace` (optional): A text to filter results by the default tablespace name. Default: `""`
|
||||
- `order_by` (optional): Specifies the sorting order. Valid values are `'size'` (descending) or `'commit'` (descending). Default: `database_name` ascending.
|
||||
- `limit` (optional): The maximum number of databases to return. Default: `10`
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_database_stats:
|
||||
kind: postgres-list-database-stats
|
||||
source: postgres-source
|
||||
description: |
|
||||
Lists the key performance and activity statistics for each PostgreSQL
|
||||
database in the instance, offering insights into cache efficiency,
|
||||
transaction throughput row-level activity, temporary file usage, and
|
||||
contention. It returns: the database name, whether the database is
|
||||
connectable, database owner, default tablespace name, the percentage of
|
||||
data blocks found in the buffer cache rather than being read from disk
|
||||
(a higher value indicates better cache performance), the total number of
|
||||
disk blocks read from disk, the total number of times disk blocks were
|
||||
found already in the cache; the total number of committed transactions,
|
||||
the total number of rolled back transactions, the percentage of rolled
|
||||
back transactions compared to the total number of completed
|
||||
transactions, the total number of rows returned by queries, the total
|
||||
number of live rows fetched by scans, the total number of rows inserted,
|
||||
the total number of rows updated, the total number of rows deleted, the
|
||||
number of temporary files created by queries, the total size of
|
||||
temporary files used by queries in bytes, the number of query
|
||||
cancellations due to conflicts with recovery, the number of deadlocks
|
||||
detected, the current number of active backend connections, the
|
||||
timestamp when the database statistics were last reset, and the total
|
||||
database size in bytes.
|
||||
```
|
||||
|
||||
The response is a json array with the following elements:
|
||||
|
||||
```json
|
||||
{
|
||||
"database_name": "Name of the database",
|
||||
"is_connectable": "Boolean indicating Whether the database allows connections",
|
||||
"database_owner": "Username of the database owner",
|
||||
"default_tablespace": "Name of the default tablespace for the database",
|
||||
"cache_hit_ratio_percent": "The percentage of data blocks found in the buffer cache rather than being read from disk",
|
||||
"blocks_read_from_disk": "The total number of disk blocks read for this database",
|
||||
"blocks_hit_in_cache": "The total number of times disk blocks were found already in the cache.",
|
||||
"xact_commit": "The total number of committed transactions",
|
||||
"xact_rollback": "The total number of rolled back transactions",
|
||||
"rollback_ratio_percent": "The percentage of rolled back transactions compared to the total number of completed transactions",
|
||||
"rows_returned_by_queries": "The total number of rows returned by queries",
|
||||
"rows_fetched_by_scans": "The total number of live rows fetched by scans",
|
||||
"tup_inserted": "The total number of rows inserted",
|
||||
"tup_updated": "The total number of rows updated",
|
||||
"tup_deleted": "The total number of rows deleted",
|
||||
"temp_files": "The number of temporary files created by queries",
|
||||
"temp_size_bytes": "The total size of temporary files used by queries in bytes",
|
||||
"conflicts": "Number of query cancellations due to conflicts",
|
||||
"deadlocks": "Number of deadlocks detected",
|
||||
"active_connections": "The current number of active backend connections",
|
||||
"statistics_last_reset": "The timestamp when the database statistics were last reset",
|
||||
"database_size_bytes": "The total disk size of the database in bytes"
|
||||
}
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:--------:|:------------:|------------------------------------------------------|
|
||||
| kind | string | true | Must be "postgres-list-database-stats". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | false | Description of the tool that is passed to the agent. |
|
||||
@@ -21,12 +21,10 @@ any of the following sources:
|
||||
`postgres-list-indexes` lists detailed information as JSON for indexes. The tool
|
||||
takes the following input parameters:
|
||||
|
||||
- `table_name` (optional): A text to filter results by table name. The input is
|
||||
used within a LIKE clause. Default: `""`
|
||||
- `index_name` (optional): A text to filter results by index name. The input is
|
||||
used within a LIKE clause. Default: `""`
|
||||
- `schema_name` (optional): A text to filter results by schema name. The input
|
||||
is used within a LIKE clause. Default: `""`
|
||||
- `table_name` (optional): A text to filter results by table name. Default: `""`
|
||||
- `index_name` (optional): A text to filter results by index name. Default: `""`
|
||||
- `schema_name` (optional): A text to filter results by schema name. Default: `""`
|
||||
- `only_unused` (optional): If true, returns indexes that have never been used.
|
||||
- `limit` (optional): The maximum number of rows to return. Default: `50`.
|
||||
|
||||
## Example
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
---
|
||||
title: "postgres-list-pg-settings"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "postgres-list-pg-settings" tool lists PostgreSQL run-time configuration settings.
|
||||
aliases:
|
||||
- /resources/tools/postgres-list-pg-settings
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `postgres-list-pg-settings` tool lists the configuration parameters for the postgres server, their current values, and related information. It's compatible with any of the following sources:
|
||||
|
||||
- [alloydb-postgres](../../sources/alloydb-pg.md)
|
||||
- [cloud-sql-postgres](../../sources/cloud-sql-pg.md)
|
||||
- [postgres](../../sources/postgres.md)
|
||||
|
||||
`postgres-list-pg-settings` lists detailed information as JSON for each setting. The tool
|
||||
takes the following input parameters:
|
||||
|
||||
- `setting_name` (optional): A text to filter results by setting name. Default: `""`
|
||||
- `limit` (optional): The maximum number of rows to return. Default: `50`.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_indexes:
|
||||
kind: postgres-list-pg-settings
|
||||
source: postgres-source
|
||||
description: |
|
||||
Lists configuration parameters for the postgres server ordered lexicographically,
|
||||
with a default limit of 50 rows. It returns the parameter name, its current setting,
|
||||
unit of measurement, a short description, the source of the current setting (e.g.,
|
||||
default, configuration file, session), and whether a restart is required when the
|
||||
parameter value is changed."
|
||||
```
|
||||
|
||||
The response is a json array with the following elements:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Setting name",
|
||||
"current_value": "Current value of the setting",
|
||||
"unit": "Unit of the setting",
|
||||
"short_desc": "Short description of the setting",
|
||||
"source": "Source of the current value (e.g., default, configuration file, session)",
|
||||
"requires_restart": "Indicates if a server restart is required to apply a change ('Yes', 'No', or 'No (Reload sufficient)')"
|
||||
}
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:--------:|:------------:|------------------------------------------------------|
|
||||
| kind | string | true | Must be "postgres-list-pg-settings". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | false | Description of the tool that is passed to the agent. |
|
||||
70
docs/en/resources/tools/postgres/postgres-list-roles.md
Normal file
70
docs/en/resources/tools/postgres/postgres-list-roles.md
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
title: "postgres-list-roles"
|
||||
type: docs
|
||||
weight: 1
|
||||
description: >
|
||||
The "postgres-list-roles" tool lists user-created roles in a Postgres database.
|
||||
aliases:
|
||||
- /resources/tools/postgres-list-roles
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
The `postgres-list-roles` tool lists all the user-created roles in the instance, excluding system roles (like `cloudsql%` or `pg_%`). It provides details about each role's attributes and memberships. It's compatible with
|
||||
any of the following sources:
|
||||
|
||||
- [alloydb-postgres](../../sources/alloydb-pg.md)
|
||||
- [cloud-sql-postgres](../../sources/cloud-sql-pg.md)
|
||||
- [postgres](../../sources/postgres.md)
|
||||
|
||||
`postgres-list-roles` lists detailed information as JSON for each role. The tool
|
||||
takes the following input parameters:
|
||||
|
||||
- `role_name` (optional): A text to filter results by role name. Default: `""`
|
||||
- `limit` (optional): The maximum number of roles to return. Default: `50`
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
list_indexes:
|
||||
kind: postgres-list-roles
|
||||
source: postgres-source
|
||||
description: |
|
||||
Lists all the user-created roles in the instance . It returns the role name,
|
||||
Object ID, the maximum number of concurrent connections the role can make,
|
||||
along with boolean indicators for: superuser status, privilege inheritance
|
||||
from member roles, ability to create roles, ability to create databases,
|
||||
ability to log in, replication privilege, and the ability to bypass
|
||||
row-level security, the password expiration timestamp, a list of direct
|
||||
members belonging to this role, and a list of other roles/groups that this
|
||||
role is a member of.
|
||||
```
|
||||
|
||||
The response is a json array with the following elements:
|
||||
|
||||
```json
|
||||
{
|
||||
"role_name": "Name of the role",
|
||||
"oid": "Object ID of the role",
|
||||
"connection_limit": "Maximum concurrent connections allowed (-1 for no limit)",
|
||||
"is_superuser": "Boolean, true if the role is a superuser",
|
||||
"inherits_privileges": "Boolean, true if the role inherits privileges of roles it is a member of",
|
||||
"can_create_roles": "Boolean, true if the role can create other roles",
|
||||
"can_create_db": "Boolean, true if the role can create databases",
|
||||
"can_login": "Boolean, true if the role can log in",
|
||||
"is_replication_role": "Boolean, true if this is a replication role",
|
||||
"bypass_rls": "Boolean, true if the role bypasses row-level security policies",
|
||||
"valid_until": "Timestamp until the password is valid (null if forever)",
|
||||
"direct_members": ["Array of role names that are direct members of this role"],
|
||||
"member_of": ["Array of role names that this role is a member of"]
|
||||
}
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
| **field** | **type** | **required** | **description** |
|
||||
|-------------|:--------:|:------------:|------------------------------------------------------|
|
||||
| kind | string | true | Must be "postgres-list-roles". |
|
||||
| source | string | true | Name of the source the SQL should execute on. |
|
||||
| description | string | false | Description of the tool that is passed to the agent. |
|
||||
@@ -21,9 +21,9 @@ the following sources:
|
||||
`postgres-list-schemas` lists detailed information as JSON for each schema. The
|
||||
tool takes the following input parameters:
|
||||
|
||||
- `schema_name` (optional): A pattern to filter schema names using SQL LIKE
|
||||
operator.
|
||||
If omitted, all user-defined schemas are returned.
|
||||
- `schema_name` (optional): A text to filter results by schema name. Default: `""`
|
||||
- `owner` (optional): A text to filter results by owner name. Default: `""`
|
||||
- `limit` (optional): The maximum number of rows to return. Default: `50`.
|
||||
|
||||
## Example
|
||||
|
||||
|
||||
@@ -20,9 +20,9 @@ Postgres database. It's compatible with any of the following sources:
|
||||
`postgres-list-sequences` lists detailed information as JSON for all sequences.
|
||||
The tool takes the following input parameters:
|
||||
|
||||
- `sequencename` (optional): A text to filter results by sequence name. The
|
||||
- `sequence_name` (optional): A text to filter results by sequence name. The
|
||||
input is used within a LIKE clause. Default: `""`
|
||||
- `schemaname` (optional): A text to filter results by schema name. The input is
|
||||
- `schema_name` (optional): A text to filter results by schema name. The input is
|
||||
used within a LIKE clause. Default: `""`
|
||||
- `limit` (optional): The maximum number of rows to return. Default: `50`.
|
||||
|
||||
@@ -45,9 +45,9 @@ The response is a json array with the following elements:
|
||||
|
||||
```json
|
||||
{
|
||||
"sequencename": "sequence name",
|
||||
"schemaname": "schema name",
|
||||
"sequenceowner": "owner of the sequence",
|
||||
"sequence_name": "sequence name",
|
||||
"schema_name": "schema name",
|
||||
"sequence_owner": "owner of the sequence",
|
||||
"data_type": "data type of the sequence",
|
||||
"start_value": "starting value of the sequence",
|
||||
"min_value": "minimum value of the sequence",
|
||||
|
||||
@@ -19,11 +19,11 @@ a Postgres database, excluding those in system schemas (`pg_catalog`,
|
||||
- [postgres](../../sources/postgres.md)
|
||||
|
||||
`postgres-list-views` lists detailed view information (schemaname, viewname,
|
||||
ownername) as JSON for views in a database. The tool takes the following input
|
||||
ownername, definition) as JSON for views in a database. The tool takes the following input
|
||||
parameters:
|
||||
|
||||
- `viewname` (optional): A string pattern to filter view names. The search uses
|
||||
SQL LIKE operator to filter the views. Default: `""`
|
||||
- `view_name` (optional): A string pattern to filter view names. Default: `""`
|
||||
- `schema_name` (optional): A string pattern to filter schema names. Default: `""`
|
||||
- `limit` (optional): The maximum number of rows to return. Default: `50`.
|
||||
|
||||
## Example
|
||||
|
||||
25
docs/en/sdks/JS-sdk/_index.md
Normal file
25
docs/en/sdks/JS-sdk/_index.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
title: "JS SDK"
|
||||
type: docs
|
||||
weight: 7
|
||||
description: >
|
||||
JS SDKs to connect to the MCP Toolbox server.
|
||||
---
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The MCP Toolbox service provides a centralized way to manage and expose tools
|
||||
(like API connectors, database query tools, etc.) for use by GenAI applications.
|
||||
|
||||
These JS SDKs act as clients for that service. They handle the communication needed to:
|
||||
|
||||
* Fetch tool definitions from your running Toolbox instance.
|
||||
* Provide convenient JS objects or functions representing those tools.
|
||||
* Invoke the tools (calling the underlying APIs/services configured in Toolbox).
|
||||
* Handle authentication and parameter binding as needed.
|
||||
|
||||
By using these SDKs, you can easily leverage your Toolbox-managed tools directly
|
||||
within your JS applications or AI orchestration frameworks.
|
||||
|
||||
[Github](https://github.com/googleapis/mcp-toolbox-sdk-js)
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
title: "Go SDK"
|
||||
weight: 2
|
||||
description: Go lang client SDK
|
||||
icon: fa-brands fa-golang
|
||||
manualLink: "https://github.com/googleapis/mcp-toolbox-sdk-go"
|
||||
manualLinkTarget: _blank
|
||||
---
|
||||
|
||||
<html>
|
||||
<head>
|
||||
<link rel="canonical" href="https://github.com/googleapis/mcp-toolbox-sdk-go"/>
|
||||
<meta http-equiv="refresh" content="0;url=https://github.com/googleapis/mcp-toolbox-sdk-go"/>
|
||||
</head>
|
||||
</html>
|
||||
25
docs/en/sdks/go-sdk/_index.md
Normal file
25
docs/en/sdks/go-sdk/_index.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
title: "Go SDK"
|
||||
type: docs
|
||||
weight: 7
|
||||
description: >
|
||||
Go SDKs to connect to the MCP Toolbox server.
|
||||
---
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The MCP Toolbox service provides a centralized way to manage and expose tools
|
||||
(like API connectors, database query tools, etc.) for use by GenAI applications.
|
||||
|
||||
The Go SDK act as clients for that service. They handle the communication needed to:
|
||||
|
||||
* Fetch tool definitions from your running Toolbox instance.
|
||||
* Provide convenient Go structs representing those tools.
|
||||
* Invoke the tools (calling the underlying APIs/services configured in Toolbox).
|
||||
* Handle authentication and parameter binding as needed.
|
||||
|
||||
By using the SDK, you can easily leverage your Toolbox-managed tools directly
|
||||
within your Go applications or AI orchestration frameworks.
|
||||
|
||||
[Github](https://github.com/googleapis/mcp-toolbox-sdk-go)
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
title: "JS SDK"
|
||||
weight: 2
|
||||
description: Javascript client SDK
|
||||
icon: fa-brands fa-node-js
|
||||
manualLink: "https://github.com/googleapis/mcp-toolbox-sdk-js"
|
||||
manualLinkTarget: _blank
|
||||
---
|
||||
|
||||
<html>
|
||||
<head>
|
||||
<link rel="canonical" href="https://github.com/googleapis/mcp-toolbox-sdk-js"/>
|
||||
<meta http-equiv="refresh" content="0;url=https://github.com/googleapis/mcp-toolbox-sdk-js"/>
|
||||
</head>
|
||||
</html>
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
title: "Python SDK"
|
||||
weight: 2
|
||||
description: Python client SDK
|
||||
icon: fa-brands fa-python
|
||||
manualLink: "https://github.com/googleapis/mcp-toolbox-sdk-python"
|
||||
manualLinkTarget: _blank
|
||||
---
|
||||
|
||||
<html>
|
||||
<head>
|
||||
<link rel="canonical" href="https://github.com/googleapis/mcp-toolbox-sdk-python"/>
|
||||
<meta http-equiv="refresh" content="0;url=https://github.com/googleapis/mcp-toolbox-sdk-python"/>
|
||||
</head>
|
||||
</html>
|
||||
55
docs/en/sdks/python-sdk/_index.md
Normal file
55
docs/en/sdks/python-sdk/_index.md
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
title: "Python SDK"
|
||||
type: docs
|
||||
weight: 7
|
||||
description: >
|
||||
Python SDKs to connect to the MCP Toolbox server.
|
||||
---
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The MCP Toolbox service provides a centralized way to manage and expose tools
|
||||
(like API connectors, database query tools, etc.) for use by GenAI applications.
|
||||
|
||||
These Python SDKs act as clients for that service. They handle the communication needed to:
|
||||
|
||||
* Fetch tool definitions from your running Toolbox instance.
|
||||
* Provide convenient Python objects or functions representing those tools.
|
||||
* Invoke the tools (calling the underlying APIs/services configured in Toolbox).
|
||||
* Handle authentication and parameter binding as needed.
|
||||
|
||||
By using these SDKs, you can easily leverage your Toolbox-managed tools directly
|
||||
within your Python applications or AI orchestration frameworks.
|
||||
|
||||
## Which Package Should I Use?
|
||||
|
||||
Choosing the right package depends on how you are building your application:
|
||||
|
||||
* [`toolbox-langchain`](langchain):
|
||||
Use this package if you are building your application using the LangChain or
|
||||
LangGraph frameworks. It provides tools that are directly compatible with the
|
||||
LangChain ecosystem (`BaseTool` interface), simplifying integration.
|
||||
* [`toolbox-llamaindex`](llamaindex):
|
||||
Use this package if you are building your application using the LlamaIndex framework.
|
||||
It provides tools that are directly compatible with the
|
||||
LlamaIndex ecosystem (`BaseTool` interface), simplifying integration.
|
||||
* [`toolbox-core`](core):
|
||||
Use this package if you are not using LangChain/LangGraph or any other
|
||||
orchestration framework, or if you need a framework-agnostic way to interact
|
||||
with Toolbox tools (e.g., for custom orchestration logic or direct use in
|
||||
Python scripts).
|
||||
|
||||
## Available Packages
|
||||
|
||||
This repository hosts the following Python packages. See the package-specific
|
||||
README for detailed installation and usage instructions:
|
||||
|
||||
| Package | Target Use Case | Integration | Path | Details (README) | PyPI Status |
|
||||
| :------ | :---------- | :---------- | :---------------------- | :---------- | :---------
|
||||
| `toolbox-core` | Framework-agnostic / Custom applications | Use directly / Custom | `packages/toolbox-core/` | 📄 [View README](https://github.com/googleapis/mcp-toolbox-sdk-python/blob/main/packages/toolbox-core/README.md) |  |
|
||||
| `toolbox-langchain` | LangChain / LangGraph applications | LangChain / LangGraph | `packages/toolbox-langchain/` | 📄 [View README](https://github.com/googleapis/mcp-toolbox-sdk-python/blob/main/packages/toolbox-langchain/README.md) |  |
|
||||
| `toolbox-llamaindex` | LlamaIndex applications | LlamaIndex | `packages/toolbox-llamaindex/` | 📄 [View README](https://github.com/googleapis/mcp-toolbox-sdk-python/blob/main/packages/toolbox-llamaindex/README.md) |  |
|
||||
|
||||
|
||||
[Github](https://github.com/googleapis/mcp-toolbox-sdk-python)
|
||||
@@ -208,6 +208,18 @@ tools:
|
||||
kind: postgres-list-tablespaces
|
||||
source: alloydb-pg-source
|
||||
|
||||
list_pg_settings:
|
||||
kind: postgres-list-pg-settings
|
||||
source: alloydb-pg-source
|
||||
|
||||
list_database_stats:
|
||||
kind: postgres-list-database-stats
|
||||
source: alloydb-pg-source
|
||||
|
||||
list_roles:
|
||||
kind: postgres-list-roles
|
||||
source: alloydb-pg-source
|
||||
|
||||
toolsets:
|
||||
alloydb_postgres_database_tools:
|
||||
- execute_sql
|
||||
@@ -234,3 +246,6 @@ toolsets:
|
||||
- get_column_cardinality
|
||||
- list_publication_tables
|
||||
- list_tablespaces
|
||||
- list_pg_settings
|
||||
- list_database_stats
|
||||
- list_roles
|
||||
|
||||
@@ -210,6 +210,18 @@ tools:
|
||||
kind: postgres-list-tablespaces
|
||||
source: cloudsql-pg-source
|
||||
|
||||
list_pg_settings:
|
||||
kind: postgres-list-pg-settings
|
||||
source: cloudsql-pg-source
|
||||
|
||||
list_database_stats:
|
||||
kind: postgres-list-database-stats
|
||||
source: cloudsql-pg-source
|
||||
|
||||
list_roles:
|
||||
kind: postgres-list-roles
|
||||
source: cloudsql-pg-source
|
||||
|
||||
toolsets:
|
||||
cloud_sql_postgres_database_tools:
|
||||
- execute_sql
|
||||
@@ -236,3 +248,6 @@ toolsets:
|
||||
- get_column_cardinality
|
||||
- list_publication_tables
|
||||
- list_tablespaces
|
||||
- list_pg_settings
|
||||
- list_database_stats
|
||||
- list_roles
|
||||
|
||||
@@ -209,6 +209,18 @@ tools:
|
||||
kind: postgres-list-tablespaces
|
||||
source: postgresql-source
|
||||
|
||||
list_pg_settings:
|
||||
kind: postgres-list-pg-settings
|
||||
source: postgresql-source
|
||||
|
||||
list_database_stats:
|
||||
kind: postgres-list-database-stats
|
||||
source: postgresql-source
|
||||
|
||||
list_roles:
|
||||
kind: postgres-list-roles
|
||||
source: postgresql-source
|
||||
|
||||
toolsets:
|
||||
postgres_database_tools:
|
||||
- execute_sql
|
||||
@@ -235,3 +247,6 @@ toolsets:
|
||||
- get_column_cardinality
|
||||
- list_publication_tables
|
||||
- list_tablespaces
|
||||
- list_pg_settings
|
||||
- list_database_stats
|
||||
- list_roles
|
||||
|
||||
@@ -98,11 +98,10 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
}
|
||||
|
||||
allParameters := parameters.Parameters{}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Fetches the current state of the PostgreSQL server, returning the version, whether it's a replica, uptime duration, maximum connection limit, number of current connections, number of active connections, and the percentage of connections in use."
|
||||
if cfg.Description == "" {
|
||||
cfg.Description = "Fetches the current state of the PostgreSQL server, returning the version, whether it's a replica, uptime duration, maximum connection limit, number of current connections, number of active connections, and the percentage of connections in use."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
@@ -134,7 +133,13 @@ func (t Tool) ToConfig() tools.ToolConfig {
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
sliceParams := params.AsSlice()
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, databaseOverviewStatement, sliceParams...)
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1,276 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslistdatabasestats
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/alloydbpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/postgres"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/util/parameters"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
const kind string = "postgres-list-database-stats"
|
||||
|
||||
// SQL query to list database statistics
|
||||
const listDatabaseStats = `
|
||||
WITH database_stats AS (
|
||||
SELECT
|
||||
s.datname AS database_name,
|
||||
-- Database Metadata
|
||||
d.datallowconn AS is_connectable,
|
||||
pg_get_userbyid(d.datdba) AS database_owner,
|
||||
ts.spcname AS default_tablespace,
|
||||
|
||||
-- Cache Performance
|
||||
CASE
|
||||
WHEN (s.blks_hit + s.blks_read) = 0 THEN 0
|
||||
ELSE round((s.blks_hit * 100.0) / (s.blks_hit + s.blks_read), 2)
|
||||
END AS cache_hit_ratio_percent,
|
||||
s.blks_read AS blocks_read_from_disk,
|
||||
s.blks_hit AS blocks_hit_in_cache,
|
||||
|
||||
-- Transaction Throughput
|
||||
s.xact_commit,
|
||||
s.xact_rollback,
|
||||
round(s.xact_rollback * 100.0 / (s.xact_commit + s.xact_rollback + 1), 2) AS rollback_ratio_percent,
|
||||
|
||||
-- Tuple Activity
|
||||
s.tup_returned AS rows_returned_by_queries,
|
||||
s.tup_fetched AS rows_fetched_by_scans,
|
||||
s.tup_inserted,
|
||||
s.tup_updated,
|
||||
s.tup_deleted,
|
||||
|
||||
-- Temporary File Usage
|
||||
s.temp_files,
|
||||
s.temp_bytes AS temp_size_bytes,
|
||||
|
||||
-- Conflicts & Deadlocks
|
||||
s.conflicts,
|
||||
s.deadlocks,
|
||||
|
||||
-- General Info
|
||||
s.numbackends AS active_connections,
|
||||
s.stats_reset AS statistics_last_reset,
|
||||
pg_database_size(s.datid) AS database_size_bytes
|
||||
FROM
|
||||
pg_stat_database s
|
||||
JOIN
|
||||
pg_database d ON d.oid = s.datid
|
||||
JOIN
|
||||
pg_tablespace ts ON ts.oid = d.dattablespace
|
||||
WHERE
|
||||
-- Exclude cloudsql internal databases
|
||||
s.datname NOT IN ('cloudsqladmin')
|
||||
-- Exclude template databases if not requested
|
||||
AND ( $2::boolean IS TRUE OR d.datistemplate IS FALSE )
|
||||
)
|
||||
SELECT *
|
||||
FROM database_stats
|
||||
WHERE
|
||||
($1::text IS NULL OR database_name LIKE '%' || $1::text || '%')
|
||||
AND ($3::text IS NULL OR database_owner LIKE '%' || $3::text || '%')
|
||||
AND ($4::text IS NULL OR default_tablespace LIKE '%' || $4::text || '%')
|
||||
ORDER BY
|
||||
CASE WHEN $5::text = 'size' THEN database_size_bytes END DESC,
|
||||
CASE WHEN $5::text = 'commit' THEN xact_commit END DESC,
|
||||
database_name
|
||||
LIMIT COALESCE($6::int, 10);
|
||||
`
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
PostgresPool() *pgxpool.Pool
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &alloydbpg.Source{}
|
||||
var _ compatibleSource = &cloudsqlpg.Source{}
|
||||
var _ compatibleSource = &postgres.Source{}
|
||||
|
||||
var compatibleSources = [...]string{alloydbpg.SourceKind, cloudsqlpg.SourceKind, postgres.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters := parameters.Parameters{
|
||||
parameters.NewStringParameterWithDefault("database_name", "", "Optional: A specific database name pattern to search for."),
|
||||
parameters.NewBooleanParameterWithDefault("include_templates", false, "Optional: Whether to include template databases in the results."),
|
||||
parameters.NewStringParameterWithDefault("database_owner", "", "Optional: A specific database owner name pattern to search for."),
|
||||
parameters.NewStringParameterWithDefault("default_tablespace", "", "Optional: A specific default tablespace name pattern to search for."),
|
||||
parameters.NewStringParameterWithDefault("order_by", "", "Optional: The field to order the results by. Valid values are 'size' and 'commit'."),
|
||||
parameters.NewIntParameterWithDefault("limit", 10, "Optional: The maximum number of rows to return."),
|
||||
}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description =
|
||||
"Lists the key performance and activity statistics for each PostgreSQL database" +
|
||||
"in the instance, offering insights into cache efficiency, transaction throughput" +
|
||||
"row-level activity, temporary file " +
|
||||
"usage, and contention. " +
|
||||
"It returns: the database name, whether the database is connectable, " +
|
||||
"database owner, default tablespace name, the percentage of data blocks " +
|
||||
"found in the buffer cache rather than being read from disk (a higher " +
|
||||
"value indicates better cache performance), the total number of disk " +
|
||||
"blocks read from disk, the total number of times disk blocks were found " +
|
||||
"already in the cache; the total number of committed transactions, the " +
|
||||
"total number of rolled back transactions, the percentage of rolled back " +
|
||||
"transactions compared to the total number of completed transactions, the " +
|
||||
"total number of rows returned by queries, the total number of live rows " +
|
||||
"fetched by scans, the total number of rows inserted, the total number " +
|
||||
"of rows updated, the total number of rows deleted, the number of " +
|
||||
"temporary files created by queries, the total size of all temporary " +
|
||||
"files created by queries in bytes, the number of query cancellations due " +
|
||||
"to conflicts with recovery, the number of deadlocks detected, the current " +
|
||||
"number of active connections to the database, the timestamp of the " +
|
||||
"last statistics reset, and total database size in bytes."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
Config: cfg,
|
||||
allParams: allParameters,
|
||||
pool: s.PostgresPool(),
|
||||
manifest: tools.Manifest{
|
||||
Description: cfg.Description,
|
||||
Parameters: allParameters.Manifest(),
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Config
|
||||
allParams parameters.Parameters `yaml:"allParams"`
|
||||
pool *pgxpool.Pool
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listDatabaseStats, sliceParams...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
fields := results.FieldDescriptions()
|
||||
var out []map[string]any
|
||||
|
||||
for results.Next() {
|
||||
values, err := results.Values()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
rowMap := make(map[string]any)
|
||||
for i, field := range fields {
|
||||
rowMap[string(field.Name)] = values[i]
|
||||
}
|
||||
out = append(out, rowMap)
|
||||
}
|
||||
|
||||
// this will catch actual query execution errors
|
||||
if err := results.Err(); err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (parameters.ParamValues, error) {
|
||||
return parameters.ParseParams(t.allParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization(resourceMgr tools.SourceProvider) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t Tool) ToConfig() tools.ToolConfig {
|
||||
return t.Config
|
||||
}
|
||||
|
||||
func (t Tool) GetAuthTokenHeaderName() string {
|
||||
return "Authorization"
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslistdatabasestats_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistdatabasestats"
|
||||
)
|
||||
|
||||
func TestParseFromYamlPostgresListDatabaseStats(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-database-stats
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslistdatabasestats.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-database-stats",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-database-stats
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslistdatabasestats.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-database-stats",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -59,7 +59,8 @@ const listIndexesStatement = `
|
||||
ON i.oid = s.indexrelid
|
||||
WHERE
|
||||
t.relkind = 'r'
|
||||
AND s.schemaname NOT IN ('pg_catalog', 'information_schema')
|
||||
AND s.schemaname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND s.schemaname NOT LIKE 'pg_temp_%'
|
||||
)
|
||||
SELECT *
|
||||
FROM IndexDetails
|
||||
@@ -67,11 +68,12 @@ const listIndexesStatement = `
|
||||
($1::text IS NULL OR schema_name LIKE '%' || $1 || '%')
|
||||
AND ($2::text IS NULL OR table_name LIKE '%' || $2 || '%')
|
||||
AND ($3::text IS NULL OR index_name LIKE '%' || $3 || '%')
|
||||
AND ($4::boolean IS NOT TRUE OR is_used IS FALSE)
|
||||
ORDER BY
|
||||
schema_name,
|
||||
table_name,
|
||||
index_name
|
||||
LIMIT COALESCE($4::int, 50);
|
||||
LIMIT COALESCE($5::int, 50);
|
||||
`
|
||||
|
||||
func init() {
|
||||
@@ -131,13 +133,14 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
parameters.NewStringParameterWithDefault("schema_name", "", "Optional: a text to filter results by schema name. The input is used within a LIKE clause."),
|
||||
parameters.NewStringParameterWithDefault("table_name", "", "Optional: a text to filter results by table name. The input is used within a LIKE clause."),
|
||||
parameters.NewStringParameterWithDefault("index_name", "", "Optional: a text to filter results by index name. The input is used within a LIKE clause."),
|
||||
parameters.NewBooleanParameterWithDefault("only_unused", false, "Optional: If true, only returns indexes that have never been used."),
|
||||
parameters.NewIntParameterWithDefault("limit", 50, "Optional: The maximum number of rows to return. Default is 50"),
|
||||
}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists available user indexes in the database, excluding system schemas (pg_catalog, information_schema). For each index, the following properties are returned: schema name, table name, index name, index type (access method), a boolean indicating if it's a unique index, a boolean indicating if it's for a primary key, the index definition, index size in bytes, the number of index scans, the number of index tuples read, the number of table tuples fetched via index scans, and a boolean indicating if the index has been used at least once."
|
||||
|
||||
if cfg.Description == "" {
|
||||
cfg.Description = "Lists available user indexes in the database, excluding system schemas (pg_catalog, information_schema). For each index, the following properties are returned: schema name, table name, index name, index type (access method), a boolean indicating if it's a unique index, a boolean indicating if it's for a primary key, the index definition, index size in bytes, the number of index scans, the number of index tuples read, the number of table tuples fetched via index scans, and a boolean indicating if the index has been used at least once."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
@@ -169,7 +172,13 @@ func (t Tool) ToConfig() tools.ToolConfig {
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
sliceParams := params.AsSlice()
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listIndexesStatement, sliceParams...)
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1,204 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslistpgsettings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/alloydbpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/postgres"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/util/parameters"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
const kind string = "postgres-list-pg-settings"
|
||||
|
||||
const listPgSettingsStatement = `
|
||||
SELECT
|
||||
name,
|
||||
setting AS current_value,
|
||||
unit,
|
||||
short_desc,
|
||||
source,
|
||||
CASE context
|
||||
WHEN 'postmaster' THEN 'Yes'
|
||||
WHEN 'sighup' THEN 'No (Reload sufficient)'
|
||||
ELSE 'No'
|
||||
END
|
||||
AS requires_restart
|
||||
FROM pg_settings
|
||||
WHERE ($1::text IS NULL OR name LIKE '%' || $1::text || '%')
|
||||
ORDER BY name
|
||||
LIMIT COALESCE($2::int, 50);
|
||||
`
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
PostgresPool() *pgxpool.Pool
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &alloydbpg.Source{}
|
||||
var _ compatibleSource = &cloudsqlpg.Source{}
|
||||
var _ compatibleSource = &postgres.Source{}
|
||||
|
||||
var compatibleSources = [...]string{alloydbpg.SourceKind, cloudsqlpg.SourceKind, postgres.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters := parameters.Parameters{
|
||||
parameters.NewStringParameterWithDefault("setting_name", "", "Optional: A specific configuration parameter name pattern to search for."),
|
||||
parameters.NewIntParameterWithDefault("limit", 50, "Optional: The maximum number of rows to return."),
|
||||
}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists configuration parameters for the postgres server ordered lexicographically, with a default limit of 50 rows. It returns the parameter name, its current setting, unit of measurement, a short description, the source of the current setting (e.g., default, configuration file, session), and whether a restart is required when the parameter value is changed."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
Config: cfg,
|
||||
allParams: allParameters,
|
||||
pool: s.PostgresPool(),
|
||||
manifest: tools.Manifest{
|
||||
Description: cfg.Description,
|
||||
Parameters: allParameters.Manifest(),
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Config
|
||||
allParams parameters.Parameters `yaml:"allParams"`
|
||||
pool *pgxpool.Pool
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listPgSettingsStatement, sliceParams...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
fields := results.FieldDescriptions()
|
||||
var out []map[string]any
|
||||
|
||||
for results.Next() {
|
||||
values, err := results.Values()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
rowMap := make(map[string]any)
|
||||
for i, field := range fields {
|
||||
rowMap[string(field.Name)] = values[i]
|
||||
}
|
||||
out = append(out, rowMap)
|
||||
}
|
||||
|
||||
// this will catch actual query execution errors
|
||||
if err := results.Err(); err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (parameters.ParamValues, error) {
|
||||
return parameters.ParseParams(t.allParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization(resourceMgr tools.SourceProvider) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t Tool) ToConfig() tools.ToolConfig {
|
||||
return t.Config
|
||||
}
|
||||
|
||||
func (t Tool) GetAuthTokenHeaderName() string {
|
||||
return "Authorization"
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslistpgsettings_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistpgsettings"
|
||||
)
|
||||
|
||||
func TestParseFromYamlPostgreslistPgSettings(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-pg-settings
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslistpgsettings.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-pg-settings",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-pg-settings
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslistpgsettings.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-pg-settings",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
228
internal/tools/postgres/postgreslistroles/postgreslistroles.go
Normal file
228
internal/tools/postgres/postgreslistroles/postgreslistroles.go
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslistroles
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/alloydbpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlpg"
|
||||
"github.com/googleapis/genai-toolbox/internal/sources/postgres"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools"
|
||||
"github.com/googleapis/genai-toolbox/internal/util/parameters"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
const kind string = "postgres-list-roles"
|
||||
|
||||
const listRolesStatement = `
|
||||
WITH RoleDetails AS (
|
||||
SELECT
|
||||
r.rolname AS role_name,
|
||||
r.oid AS oid,
|
||||
r.rolconnlimit AS connection_limit,
|
||||
r.rolsuper AS is_superuser,
|
||||
r.rolinherit AS inherits_privileges,
|
||||
r.rolcreaterole AS can_create_roles,
|
||||
r.rolcreatedb AS can_create_db,
|
||||
r.rolcanlogin AS can_login,
|
||||
r.rolreplication AS is_replication_role,
|
||||
r.rolbypassrls AS bypass_rls,
|
||||
r.rolvaliduntil AS valid_until,
|
||||
-- List of roles that belong to this role (Direct Members)
|
||||
ARRAY(
|
||||
SELECT m_r.rolname
|
||||
FROM pg_auth_members pam
|
||||
JOIN pg_roles m_r ON pam.member = m_r.oid
|
||||
WHERE pam.roleid = r.oid
|
||||
) AS direct_members,
|
||||
-- List of roles that this role belongs to (Member Of)
|
||||
ARRAY(
|
||||
SELECT g_r.rolname
|
||||
FROM pg_auth_members pam
|
||||
JOIN pg_roles g_r ON pam.roleid = g_r.oid
|
||||
WHERE pam.member = r.oid
|
||||
) AS member_of
|
||||
FROM pg_roles r
|
||||
-- Exclude system and internal roles
|
||||
WHERE r.rolname NOT LIKE 'cloudsql%'
|
||||
AND r.rolname NOT LIKE 'alloydb_%'
|
||||
AND r.rolname NOT LIKE 'pg_%'
|
||||
)
|
||||
SELECT *
|
||||
FROM RoleDetails
|
||||
WHERE
|
||||
($1::text IS NULL OR role_name LIKE '%' || $1 || '%')
|
||||
ORDER BY role_name
|
||||
LIMIT COALESCE($2::int, 50);
|
||||
`
|
||||
|
||||
func init() {
|
||||
if !tools.Register(kind, newConfig) {
|
||||
panic(fmt.Sprintf("tool kind %q already registered", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
|
||||
actual := Config{Name: name}
|
||||
if err := decoder.DecodeContext(ctx, &actual); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
type compatibleSource interface {
|
||||
PostgresPool() *pgxpool.Pool
|
||||
}
|
||||
|
||||
// validate compatible sources are still compatible
|
||||
var _ compatibleSource = &alloydbpg.Source{}
|
||||
var _ compatibleSource = &cloudsqlpg.Source{}
|
||||
var _ compatibleSource = &postgres.Source{}
|
||||
|
||||
var compatibleSources = [...]string{alloydbpg.SourceKind, cloudsqlpg.SourceKind, postgres.SourceKind}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"name" validate:"required"`
|
||||
Kind string `yaml:"kind" validate:"required"`
|
||||
Source string `yaml:"source" validate:"required"`
|
||||
Description string `yaml:"description"`
|
||||
AuthRequired []string `yaml:"authRequired"`
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.ToolConfig = Config{}
|
||||
|
||||
func (cfg Config) ToolConfigKind() string {
|
||||
return kind
|
||||
}
|
||||
|
||||
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
|
||||
// verify source exists
|
||||
rawS, ok := srcs[cfg.Source]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no source named %q configured", cfg.Source)
|
||||
}
|
||||
|
||||
// verify the source is compatible
|
||||
s, ok := rawS.(compatibleSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
|
||||
}
|
||||
|
||||
allParameters := parameters.Parameters{
|
||||
parameters.NewStringParameterWithDefault("role_name", "", "Optional: a text to filter results by role name. The input is used within a LIKE clause."),
|
||||
parameters.NewIntParameterWithDefault("limit", 50, "Optional: The maximum number of rows to return. Default is 10"),
|
||||
}
|
||||
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists all the user-created roles in the instance . It returns the role name, Object ID, the maximum number of concurrent connections the role can make, along with boolean indicators for: superuser status, privilege inheritance from member roles, ability to create roles, ability to create databases, ability to log in, replication privilege, and the ability to bypass row-level security, the password expiration timestamp, a list of direct members belonging to this role, and a list of other roles/groups that this role is a member of."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
Config: cfg,
|
||||
allParams: allParameters,
|
||||
pool: s.PostgresPool(),
|
||||
manifest: tools.Manifest{
|
||||
Description: description,
|
||||
Parameters: allParameters.Manifest(),
|
||||
AuthRequired: cfg.AuthRequired,
|
||||
},
|
||||
mcpManifest: mcpManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validate interface
|
||||
var _ tools.Tool = Tool{}
|
||||
|
||||
type Tool struct {
|
||||
Config
|
||||
allParams parameters.Parameters `yaml:"allParams"`
|
||||
pool *pgxpool.Pool
|
||||
manifest tools.Manifest
|
||||
mcpManifest tools.McpManifest
|
||||
}
|
||||
|
||||
func (t Tool) ToConfig() tools.ToolConfig {
|
||||
return t.Config
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listRolesStatement, sliceParams...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
fields := results.FieldDescriptions()
|
||||
var out []map[string]any
|
||||
|
||||
for results.Next() {
|
||||
values, err := results.Values()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse row: %w", err)
|
||||
}
|
||||
rowMap := make(map[string]any)
|
||||
for i, field := range fields {
|
||||
rowMap[string(field.Name)] = values[i]
|
||||
}
|
||||
out = append(out, rowMap)
|
||||
}
|
||||
|
||||
// this will catch actual query execution errors
|
||||
if err := results.Err(); err != nil {
|
||||
return nil, fmt.Errorf("unable to execute query: %w", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (parameters.ParamValues, error) {
|
||||
return parameters.ParseParams(t.allParams, data, claims)
|
||||
}
|
||||
|
||||
func (t Tool) Manifest() tools.Manifest {
|
||||
return t.manifest
|
||||
}
|
||||
|
||||
func (t Tool) McpManifest() tools.McpManifest {
|
||||
return t.mcpManifest
|
||||
}
|
||||
|
||||
func (t Tool) Authorized(verifiedAuthServices []string) bool {
|
||||
return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
|
||||
}
|
||||
|
||||
func (t Tool) RequiresClientAuthorization(resourceMgr tools.SourceProvider) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t Tool) GetAuthTokenHeaderName() string {
|
||||
return "Authorization"
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgreslistroles_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
yaml "github.com/goccy/go-yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/googleapis/genai-toolbox/internal/server"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/internal/tools/postgres/postgreslistroles"
|
||||
)
|
||||
|
||||
func TestParseFromYamlPostgresListRoles(t *testing.T) {
|
||||
ctx, err := testutils.ContextWithNewLogger()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
tcs := []struct {
|
||||
desc string
|
||||
in string
|
||||
want server.ToolConfigs
|
||||
}{
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-roles
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
authRequired:
|
||||
- my-google-auth-service
|
||||
- other-auth-service
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslistroles.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-roles",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{"my-google-auth-service", "other-auth-service"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "basic example",
|
||||
in: `
|
||||
tools:
|
||||
example_tool:
|
||||
kind: postgres-list-roles
|
||||
source: my-postgres-instance
|
||||
description: some description
|
||||
`,
|
||||
want: server.ToolConfigs{
|
||||
"example_tool": postgreslistroles.Config{
|
||||
Name: "example_tool",
|
||||
Kind: "postgres-list-roles",
|
||||
Source: "my-postgres-instance",
|
||||
Description: "some description",
|
||||
AuthRequired: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
got := struct {
|
||||
Tools server.ToolConfigs `yaml:"tools"`
|
||||
}{}
|
||||
// Parse contents
|
||||
err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to unmarshal: %s", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
|
||||
t.Fatalf("incorrect parse: diff %v", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -32,28 +32,28 @@ const kind string = "postgres-list-schemas"
|
||||
|
||||
const listSchemasStatement = `
|
||||
WITH
|
||||
schema_grants AS (
|
||||
SELECT schema_oid, jsonb_object_agg(grantee, privileges) AS grants
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
n.oid AS schema_oid,
|
||||
CASE
|
||||
WHEN p.grantee = 0 THEN 'PUBLIC'
|
||||
ELSE pg_catalog.pg_get_userbyid(p.grantee)
|
||||
END
|
||||
AS grantee,
|
||||
jsonb_agg(p.privilege_type ORDER BY p.privilege_type) AS privileges
|
||||
FROM pg_catalog.pg_namespace n, aclexplode(n.nspacl) p
|
||||
WHERE n.nspacl IS NOT NULL
|
||||
GROUP BY n.oid, grantee
|
||||
) permissions_by_grantee
|
||||
GROUP BY schema_oid
|
||||
),
|
||||
all_schemas AS (
|
||||
SELECT
|
||||
n.nspname AS schema_name,
|
||||
pg_catalog.pg_get_userbyid(n.nspowner) AS owner,
|
||||
schema_grants AS (
|
||||
SELECT schema_oid, jsonb_object_agg(grantee, privileges) AS grants
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
n.oid AS schema_oid,
|
||||
CASE
|
||||
WHEN p.grantee = 0 THEN 'PUBLIC'
|
||||
ELSE pg_catalog.pg_get_userbyid(p.grantee)
|
||||
END
|
||||
AS grantee,
|
||||
jsonb_agg(p.privilege_type ORDER BY p.privilege_type) AS privileges
|
||||
FROM pg_catalog.pg_namespace n, aclexplode(n.nspacl) p
|
||||
WHERE n.nspacl IS NOT NULL
|
||||
GROUP BY n.oid, grantee
|
||||
) permissions_by_grantee
|
||||
GROUP BY schema_oid
|
||||
),
|
||||
all_schemas AS (
|
||||
SELECT
|
||||
n.nspname AS schema_name,
|
||||
pg_catalog.pg_get_userbyid(n.nspowner) AS owner,
|
||||
COALESCE(sg.grants, '{}'::jsonb) AS grants,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
@@ -67,18 +67,21 @@ const listSchemasStatement = `
|
||||
) AS views,
|
||||
(SELECT COUNT(*) FROM pg_catalog.pg_proc p WHERE p.pronamespace = n.oid)
|
||||
AS functions
|
||||
FROM pg_catalog.pg_namespace n
|
||||
LEFT JOIN schema_grants sg
|
||||
ON n.oid = sg.schema_oid
|
||||
)
|
||||
FROM pg_catalog.pg_namespace n
|
||||
LEFT JOIN schema_grants sg
|
||||
ON n.oid = sg.schema_oid
|
||||
)
|
||||
SELECT *
|
||||
FROM all_schemas
|
||||
-- Exclude system schemas and temporary schemas created per session.
|
||||
-- Exclude system schemas and temporary schemas created per session.
|
||||
WHERE
|
||||
schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND schema_name NOT LIKE 'pg_temp_%'
|
||||
AND ($1::text IS NULL OR schema_name LIKE '%' || $1::text || '%')
|
||||
ORDER BY schema_name;
|
||||
schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND schema_name NOT LIKE 'pg_temp_%'
|
||||
AND schema_name NOT LIKE 'pg_toast_temp_%'
|
||||
AND ($1::text IS NULL OR schema_name ILIKE '%' || $1::text || '%')
|
||||
AND ($2::text IS NULL OR owner ILIKE '%' || $2::text || '%')
|
||||
ORDER BY schema_name
|
||||
LIMIT COALESCE($3::int, NULL);
|
||||
`
|
||||
|
||||
func init() {
|
||||
@@ -136,12 +139,14 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
|
||||
allParameters := parameters.Parameters{
|
||||
parameters.NewStringParameterWithDefault("schema_name", "", "Optional: A specific schema name pattern to search for."),
|
||||
parameters.NewStringParameterWithDefault("owner", "", "Optional: A specific schema owner name pattern to search for."),
|
||||
parameters.NewIntParameterWithDefault("limit", 10, "Optional: The maximum number of schemas to return."),
|
||||
}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists all schemas in the database ordered by schema name and excluding system and temporary schemas. It returns the schema name, schema owner, grants, number of functions, number of tables and number of views within each schema."
|
||||
|
||||
if cfg.Description == "" {
|
||||
cfg.Description = "Lists all schemas in the database ordered by schema name and excluding system and temporary schemas. It returns the schema name, schema owner, grants, number of functions, number of tables and number of views within each schema."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
@@ -169,7 +174,13 @@ type Tool struct {
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
sliceParams := params.AsSlice()
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listSchemasStatement, sliceParams...)
|
||||
if err != nil {
|
||||
|
||||
@@ -32,9 +32,9 @@ const kind string = "postgres-list-sequences"
|
||||
|
||||
const listSequencesStatement = `
|
||||
SELECT
|
||||
sequencename,
|
||||
schemaname,
|
||||
sequenceowner,
|
||||
sequencename as sequence_name,
|
||||
schemaname as schema_name,
|
||||
sequenceowner as sequence_owner,
|
||||
data_type,
|
||||
start_value,
|
||||
min_value,
|
||||
@@ -45,7 +45,7 @@ const listSequencesStatement = `
|
||||
WHERE
|
||||
($1::text IS NULL OR schemaname LIKE '%' || $1 || '%')
|
||||
AND ($2::text IS NULL OR sequencename LIKE '%' || $2 || '%')
|
||||
ORDER BY schemaname, sequencename
|
||||
ORDER BY schema_name, sequence_name
|
||||
LIMIT COALESCE($3::int, 50);
|
||||
|
||||
`
|
||||
@@ -104,15 +104,15 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
}
|
||||
|
||||
allParameters := parameters.Parameters{
|
||||
parameters.NewStringParameterWithDefault("schemaname", "", "Optional: A specific schema name pattern to search for."),
|
||||
parameters.NewStringParameterWithDefault("sequencename", "", "Optional: A specific sequence name pattern to search for."),
|
||||
parameters.NewStringParameterWithDefault("schema_name", "", "Optional: A specific schema name pattern to search for."),
|
||||
parameters.NewStringParameterWithDefault("sequence_name", "", "Optional: A specific sequence name pattern to search for."),
|
||||
parameters.NewIntParameterWithDefault("limit", 50, "Optional: The maximum number of rows to return. Default is 50"),
|
||||
}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists sequences in the database. Returns sequence name, schema name, sequence owner, data type of the sequence, starting value, minimum value, maximum value of the sequence, the value by which the sequence is incremented, and the last value generated by the sequence in the current session"
|
||||
|
||||
if cfg.Description == "" {
|
||||
cfg.Description = "Lists sequences in the database. Returns sequence name, schema name, sequence owner, data type of the sequence, starting value, minimum value, maximum value of the sequence, the value by which the sequence is incremented, and the last value generated by the sequence in the current session"
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
@@ -144,7 +144,13 @@ func (t Tool) ToConfig() tools.ToolConfig {
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
sliceParams := params.AsSlice()
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listSequencesStatement, sliceParams...)
|
||||
if err != nil {
|
||||
|
||||
@@ -135,11 +135,11 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
parameters.NewStringParameterWithDefault("table_name", "", "Optional: A specific table name pattern to search for."),
|
||||
parameters.NewIntParameterWithDefault("limit", 50, "Optional: The maximum number of rows to return."),
|
||||
}
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists all non-internal triggers in a database. Returns trigger name, schema name, table name, whether its enabled or disabled, timing (e.g BEFORE/AFTER of the event), the events that cause the trigger to fire such as INSERT, UPDATE, or DELETE, whether the trigger activates per ROW or per STATEMENT, the handler function executed by the trigger and full definition."
|
||||
|
||||
if cfg.Description == "" {
|
||||
cfg.Description = "Lists all non-internal triggers in a database. Returns trigger name, schema name, table name, whether its enabled or disabled, timing (e.g BEFORE/AFTER of the event), the events that cause the trigger to fire such as INSERT, UPDATE, or DELETE, whether the trigger activates per ROW or per STATEMENT, the handler function executed by the trigger and full definition."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
@@ -171,7 +171,13 @@ func (t Tool) ToConfig() tools.ToolConfig {
|
||||
}
|
||||
|
||||
func (t Tool) Invoke(ctx context.Context, resourceMgr tools.SourceProvider, params parameters.ParamValues, accessToken tools.AccessToken) (any, error) {
|
||||
sliceParams := params.AsSlice()
|
||||
paramsMap := params.AsMap()
|
||||
|
||||
newParams, err := parameters.GetParams(t.allParams, paramsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to extract standard params %w", err)
|
||||
}
|
||||
sliceParams := newParams.AsSlice()
|
||||
|
||||
results, err := t.pool.Query(ctx, listTriggersStatement, sliceParams...)
|
||||
if err != nil {
|
||||
|
||||
@@ -31,13 +31,24 @@ import (
|
||||
const kind string = "postgres-list-views"
|
||||
|
||||
const listViewsStatement = `
|
||||
SELECT schemaname, viewname, viewowner
|
||||
FROM pg_views
|
||||
WHERE
|
||||
schemaname NOT IN ('pg_catalog', 'information_schema')
|
||||
AND ($1::text IS NULL OR viewname LIKE '%' || $1::text || '%')
|
||||
ORDER BY viewname
|
||||
LIMIT COALESCE($2::int, 50);
|
||||
WITH list_views AS (
|
||||
SELECT
|
||||
schemaname AS schema_name,
|
||||
viewname AS view_name,
|
||||
viewowner AS owner_name,
|
||||
definition
|
||||
FROM pg_views
|
||||
)
|
||||
SELECT *
|
||||
FROM list_views
|
||||
WHERE
|
||||
schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND schema_name NOT LIKE 'pg_temp_%'
|
||||
AND ($1::text IS NULL OR view_name ILIKE '%' || $1::text || '%')
|
||||
AND ($2::text IS NULL OR schema_name ILIKE '%' || $2::text || '%')
|
||||
ORDER BY
|
||||
schema_name, view_name
|
||||
LIMIT COALESCE($3::int, 50);
|
||||
`
|
||||
|
||||
func init() {
|
||||
@@ -94,15 +105,15 @@ func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error)
|
||||
}
|
||||
|
||||
allParameters := parameters.Parameters{
|
||||
parameters.NewStringParameterWithDefault("viewname", "", "Optional: A specific view name to search for."),
|
||||
parameters.NewStringParameterWithDefault("view_name", "", "Optional: A specific view name to search for."),
|
||||
parameters.NewStringParameterWithDefault("schema_name", "", "Optional: A specific schema name to search for."),
|
||||
parameters.NewIntParameterWithDefault("limit", 50, "Optional: The maximum number of rows to return."),
|
||||
}
|
||||
paramManifest := allParameters.Manifest()
|
||||
description := cfg.Description
|
||||
if description == "" {
|
||||
description = "Lists views in the database from pg_views with a default limit of 50 rows. Returns schemaname, viewname and the ownername."
|
||||
if cfg.Description == "" {
|
||||
cfg.Description = "Lists views in the database from pg_views with a default limit of 50 rows. Returns schemaname, viewname, ownername and the definition."
|
||||
}
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, description, cfg.AuthRequired, allParameters, nil)
|
||||
mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters, nil)
|
||||
|
||||
// finish tool setup
|
||||
return Tool{
|
||||
|
||||
@@ -181,7 +181,7 @@ func TestAlloyDBPgToolEndpoints(t *testing.T) {
|
||||
|
||||
// Run Postgres prebuilt tool tests
|
||||
tests.RunPostgresListTablesTest(t, tableNameParam, tableNameAuth, AlloyDBPostgresUser)
|
||||
tests.RunPostgresListViewsTest(t, ctx, pool, tableNameParam)
|
||||
tests.RunPostgresListViewsTest(t, ctx, pool)
|
||||
tests.RunPostgresListSchemasTest(t, ctx, pool)
|
||||
tests.RunPostgresListActiveQueriesTest(t, ctx, pool)
|
||||
tests.RunPostgresListAvailableExtensionsTest(t)
|
||||
@@ -197,6 +197,9 @@ func TestAlloyDBPgToolEndpoints(t *testing.T) {
|
||||
tests.RunPostgresGetColumnCardinalityTest(t, ctx, pool)
|
||||
tests.RunPostgresListPublicationTablesTest(t, ctx, pool)
|
||||
tests.RunPostgresListTableSpacesTest(t)
|
||||
tests.RunPostgresListPgSettingsTest(t, ctx, pool)
|
||||
tests.RunPostgresListDatabaseStatsTest(t, ctx, pool)
|
||||
tests.RunPostgresListRolesTest(t, ctx, pool)
|
||||
}
|
||||
|
||||
// Test connection with different IP type
|
||||
|
||||
@@ -165,7 +165,7 @@ func TestCloudSQLPgSimpleToolEndpoints(t *testing.T) {
|
||||
|
||||
// Run Postgres prebuilt tool tests
|
||||
tests.RunPostgresListTablesTest(t, tableNameParam, tableNameAuth, CloudSQLPostgresUser)
|
||||
tests.RunPostgresListViewsTest(t, ctx, pool, tableNameParam)
|
||||
tests.RunPostgresListViewsTest(t, ctx, pool)
|
||||
tests.RunPostgresListSchemasTest(t, ctx, pool)
|
||||
tests.RunPostgresListActiveQueriesTest(t, ctx, pool)
|
||||
tests.RunPostgresListAvailableExtensionsTest(t)
|
||||
@@ -181,6 +181,9 @@ func TestCloudSQLPgSimpleToolEndpoints(t *testing.T) {
|
||||
tests.RunPostgresGetColumnCardinalityTest(t, ctx, pool)
|
||||
tests.RunPostgresListPublicationTablesTest(t, ctx, pool)
|
||||
tests.RunPostgresListTableSpacesTest(t)
|
||||
tests.RunPostgresListPgSettingsTest(t, ctx, pool)
|
||||
tests.RunPostgresListDatabaseStatsTest(t, ctx, pool)
|
||||
tests.RunPostgresListRolesTest(t, ctx, pool)
|
||||
}
|
||||
|
||||
// Test connection with different IP type
|
||||
|
||||
@@ -209,6 +209,9 @@ func AddPostgresPrebuiltConfig(t *testing.T, config map[string]any) map[string]a
|
||||
PostgresGetColumnCardinalityToolKind = "postgres-get-column-cardinality"
|
||||
PostgresListPublicationTablesToolKind = "postgres-list-publication-tables"
|
||||
PostgresListTablespacesToolKind = "postgres-list-tablespaces"
|
||||
PostgresListPGSettingsToolKind = "postgres-list-pg-settings"
|
||||
PostgresListDatabaseStatsToolKind = "postgres-list-database-stats"
|
||||
PostgresListRolesToolKind = "postgres-list-roles"
|
||||
)
|
||||
|
||||
tools, ok := config["tools"].(map[string]any)
|
||||
@@ -225,34 +228,28 @@ func AddPostgresPrebuiltConfig(t *testing.T, config map[string]any) map[string]a
|
||||
"source": "my-instance",
|
||||
"description": "Lists active queries in the database.",
|
||||
}
|
||||
|
||||
tools["list_installed_extensions"] = map[string]any{
|
||||
"kind": PostgresListInstalledExtensionsToolKind,
|
||||
"source": "my-instance",
|
||||
"description": "Lists installed extensions in the database.",
|
||||
}
|
||||
|
||||
tools["list_available_extensions"] = map[string]any{
|
||||
"kind": PostgresListAvailableExtensionsToolKind,
|
||||
"source": "my-instance",
|
||||
"description": "Lists available extensions in the database.",
|
||||
}
|
||||
|
||||
tools["list_views"] = map[string]any{
|
||||
"kind": PostgresListViewsToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["list_schemas"] = map[string]any{
|
||||
"kind": PostgresListSchemasToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["database_overview"] = map[string]any{
|
||||
"kind": PostgresDatabaseOverviewToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["list_triggers"] = map[string]any{
|
||||
"kind": PostgresListTriggersToolKind,
|
||||
"source": "my-instance",
|
||||
@@ -261,27 +258,22 @@ func AddPostgresPrebuiltConfig(t *testing.T, config map[string]any) map[string]a
|
||||
"kind": PostgresListIndexesToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["list_sequences"] = map[string]any{
|
||||
"kind": PostgresListSequencesToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["list_publication_tables"] = map[string]any{
|
||||
"kind": PostgresListPublicationTablesToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["long_running_transactions"] = map[string]any{
|
||||
"kind": PostgresLongRunningTransactionsToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["list_locks"] = map[string]any{
|
||||
"kind": PostgresListLocksToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["replication_stats"] = map[string]any{
|
||||
"kind": PostgresReplicationStatsToolKind,
|
||||
"source": "my-instance",
|
||||
@@ -298,6 +290,19 @@ func AddPostgresPrebuiltConfig(t *testing.T, config map[string]any) map[string]a
|
||||
"kind": PostgresListTablespacesToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
tools["list_pg_settings"] = map[string]any{
|
||||
"kind": PostgresListPGSettingsToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
tools["list_database_stats"] = map[string]any{
|
||||
"kind": PostgresListDatabaseStatsToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
|
||||
tools["list_roles"] = map[string]any{
|
||||
"kind": PostgresListRolesToolKind,
|
||||
"source": "my-instance",
|
||||
}
|
||||
config["tools"] = tools
|
||||
return config
|
||||
}
|
||||
@@ -881,7 +886,7 @@ func TestCloudSQLMySQL_IPTypeParsingFromYAML(t *testing.T) {
|
||||
project: my-project
|
||||
region: my-region
|
||||
instance: my-instance
|
||||
ipType: private
|
||||
ipType: private
|
||||
database: my_db
|
||||
user: my_user
|
||||
password: my_pass
|
||||
@@ -921,7 +926,7 @@ func TestCloudSQLMySQL_IPTypeParsingFromYAML(t *testing.T) {
|
||||
// Finds and drops all tables in a postgres database.
|
||||
func CleanupPostgresTables(t *testing.T, ctx context.Context, pool *pgxpool.Pool) {
|
||||
query := `
|
||||
SELECT table_name FROM information_schema.tables
|
||||
SELECT table_name FROM information_schema.tables
|
||||
WHERE table_schema = 'public' AND table_type = 'BASE TABLE';`
|
||||
|
||||
rows, err := pool.Query(ctx, query)
|
||||
@@ -954,7 +959,7 @@ func CleanupPostgresTables(t *testing.T, ctx context.Context, pool *pgxpool.Pool
|
||||
// Finds and drops all tables in a mysql database.
|
||||
func CleanupMySQLTables(t *testing.T, ctx context.Context, pool *sql.DB) {
|
||||
query := `
|
||||
SELECT table_name FROM information_schema.tables
|
||||
SELECT table_name FROM information_schema.tables
|
||||
WHERE table_schema = DATABASE() AND table_type = 'BASE TABLE';`
|
||||
|
||||
rows, err := pool.QueryContext(ctx, query)
|
||||
|
||||
351
tests/mariadb/mariadb_integration_test.go
Normal file
351
tests/mariadb/mariadb_integration_test.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mariadb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
"github.com/googleapis/genai-toolbox/internal/testutils"
|
||||
"github.com/googleapis/genai-toolbox/tests"
|
||||
)
|
||||
|
||||
var (
|
||||
MariaDBSourceKind = "mysql"
|
||||
MariaDBToolKind = "mysql-sql"
|
||||
MariaDBDatabase = os.Getenv("MARIADB_DATABASE")
|
||||
MariaDBHost = os.Getenv("MARIADB_HOST")
|
||||
MariaDBPort = os.Getenv("MARIADB_PORT")
|
||||
MariaDBUser = os.Getenv("MARIADB_USER")
|
||||
MariaDBPass = os.Getenv("MARIADB_PASS")
|
||||
)
|
||||
|
||||
func getMariaDBVars(t *testing.T) map[string]any {
|
||||
switch "" {
|
||||
case MariaDBDatabase:
|
||||
t.Fatal("'MARIADB_DATABASE' not set")
|
||||
case MariaDBHost:
|
||||
t.Fatal("'MARIADB_HOST' not set")
|
||||
case MariaDBPort:
|
||||
t.Fatal("'MARIADB_PORT' not set")
|
||||
case MariaDBUser:
|
||||
t.Fatal("'MARIADB_USER' not set")
|
||||
case MariaDBPass:
|
||||
t.Fatal("'MARIADB_PASS' not set")
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"kind": MariaDBSourceKind,
|
||||
"host": MariaDBHost,
|
||||
"port": MariaDBPort,
|
||||
"database": MariaDBDatabase,
|
||||
"user": MariaDBUser,
|
||||
"password": MariaDBPass,
|
||||
}
|
||||
}
|
||||
|
||||
// Copied over from mysql.go
|
||||
func initMariaDB(host, port, user, pass, dbname string) (*sql.DB, error) {
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?parseTime=true", user, pass, host, port, dbname)
|
||||
|
||||
// Interact with the driver directly as you normally would
|
||||
pool, err := sql.Open("mysql", dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sql.Open: %w", err)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func TestMySQLToolEndpoints(t *testing.T) {
|
||||
sourceConfig := getMariaDBVars(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
var args []string
|
||||
|
||||
pool, err := initMariaDB(MariaDBHost, MariaDBPort, MariaDBUser, MariaDBPass, MariaDBDatabase)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create MySQL connection pool: %s", err)
|
||||
}
|
||||
|
||||
// cleanup test environment
|
||||
tests.CleanupMySQLTables(t, ctx, pool)
|
||||
|
||||
// create table name with UUID
|
||||
tableNameParam := "param_table_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
tableNameAuth := "auth_table_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
tableNameTemplateParam := "template_param_table_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
|
||||
// set up data for param tool
|
||||
createParamTableStmt, insertParamTableStmt, paramToolStmt, idParamToolStmt, nameParamToolStmt, arrayToolStmt, paramTestParams := tests.GetMySQLParamToolInfo(tableNameParam)
|
||||
teardownTable1 := tests.SetupMySQLTable(t, ctx, pool, createParamTableStmt, insertParamTableStmt, tableNameParam, paramTestParams)
|
||||
defer teardownTable1(t)
|
||||
|
||||
// set up data for auth tool
|
||||
createAuthTableStmt, insertAuthTableStmt, authToolStmt, authTestParams := tests.GetMySQLAuthToolInfo(tableNameAuth)
|
||||
teardownTable2 := tests.SetupMySQLTable(t, ctx, pool, createAuthTableStmt, insertAuthTableStmt, tableNameAuth, authTestParams)
|
||||
defer teardownTable2(t)
|
||||
|
||||
// Write config into a file and pass it to command
|
||||
toolsFile := tests.GetToolsConfig(sourceConfig, MariaDBToolKind, paramToolStmt, idParamToolStmt, nameParamToolStmt, arrayToolStmt, authToolStmt)
|
||||
toolsFile = tests.AddMySqlExecuteSqlConfig(t, toolsFile)
|
||||
tmplSelectCombined, tmplSelectFilterCombined := tests.GetMySQLTmplToolStatement()
|
||||
toolsFile = tests.AddTemplateParamConfig(t, toolsFile, MariaDBToolKind, tmplSelectCombined, tmplSelectFilterCombined, "")
|
||||
|
||||
toolsFile = tests.AddMySQLPrebuiltToolConfig(t, toolsFile)
|
||||
|
||||
cmd, cleanup, err := tests.StartCmd(ctx, toolsFile, args...)
|
||||
if err != nil {
|
||||
t.Fatalf("command initialization returned an error: %s", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
out, err := testutils.WaitForString(waitCtx, regexp.MustCompile(`Server ready to serve`), cmd.Out)
|
||||
if err != nil {
|
||||
t.Logf("toolbox command logs: \n%s", out)
|
||||
t.Fatalf("toolbox didn't start successfully: %s", err)
|
||||
}
|
||||
|
||||
// Get configs for tests
|
||||
select1Want, mcpMyFailToolWant, createTableStatement, mcpSelect1Want := GetMariaDBWants()
|
||||
|
||||
// Run tests
|
||||
tests.RunToolGetTest(t)
|
||||
tests.RunToolInvokeTest(t, select1Want, tests.DisableArrayTest())
|
||||
tests.RunMCPToolCallMethod(t, mcpMyFailToolWant, mcpSelect1Want)
|
||||
tests.RunExecuteSqlToolInvokeTest(t, createTableStatement, select1Want)
|
||||
tests.RunToolInvokeWithTemplateParameters(t, tableNameTemplateParam)
|
||||
|
||||
// Run specific MySQL tool tests
|
||||
RunMariDBListTablesTest(t, MariaDBDatabase, tableNameParam, tableNameAuth)
|
||||
tests.RunMySQLListActiveQueriesTest(t, ctx, pool)
|
||||
tests.RunMySQLListTablesMissingUniqueIndexes(t, ctx, pool, MariaDBDatabase)
|
||||
tests.RunMySQLListTableFragmentationTest(t, MariaDBDatabase, tableNameParam, tableNameAuth)
|
||||
}
|
||||
|
||||
// RunMariDBListTablesTest run tests against the mysql-list-tables tool
|
||||
func RunMariDBListTablesTest(t *testing.T, databaseName, tableNameParam, tableNameAuth string) {
|
||||
type tableInfo struct {
|
||||
ObjectName string `json:"object_name"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
ObjectDetails string `json:"object_details"`
|
||||
}
|
||||
|
||||
type column struct {
|
||||
DataType string `json:"data_type"`
|
||||
ColumnName string `json:"column_name"`
|
||||
ColumnComment string `json:"column_comment"`
|
||||
ColumnDefault any `json:"column_default"`
|
||||
IsNotNullable bool `json:"is_not_nullable"`
|
||||
OrdinalPosition int `json:"ordinal_position"`
|
||||
}
|
||||
|
||||
type objectDetails struct {
|
||||
Owner any `json:"owner"`
|
||||
Columns []column `json:"columns"`
|
||||
Comment string `json:"comment"`
|
||||
Indexes []any `json:"indexes"`
|
||||
Triggers []any `json:"triggers"`
|
||||
Constraints []any `json:"constraints"`
|
||||
ObjectName string `json:"object_name"`
|
||||
ObjectType string `json:"object_type"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
}
|
||||
|
||||
paramTableWant := objectDetails{
|
||||
ObjectName: tableNameParam,
|
||||
SchemaName: databaseName,
|
||||
ObjectType: "TABLE",
|
||||
Columns: []column{
|
||||
{DataType: "int(11)", ColumnName: "id", IsNotNullable: true, OrdinalPosition: 1, ColumnDefault: nil},
|
||||
{DataType: "varchar(255)", ColumnName: "name", OrdinalPosition: 2, ColumnDefault: "NULL"},
|
||||
},
|
||||
Indexes: []any{map[string]any{"index_columns": []any{"id"}, "index_name": "PRIMARY", "is_primary": true, "is_unique": true}},
|
||||
Triggers: []any{},
|
||||
Constraints: []any{map[string]any{"constraint_columns": []any{"id"}, "constraint_name": "PRIMARY", "constraint_type": "PRIMARY KEY", "foreign_key_referenced_columns": any(nil), "foreign_key_referenced_table": any(nil), "constraint_definition": ""}},
|
||||
}
|
||||
|
||||
authTableWant := objectDetails{
|
||||
ObjectName: tableNameAuth,
|
||||
SchemaName: databaseName,
|
||||
ObjectType: "TABLE",
|
||||
Columns: []column{
|
||||
{DataType: "int(11)", ColumnName: "id", IsNotNullable: true, OrdinalPosition: 1, ColumnDefault: nil},
|
||||
{DataType: "varchar(255)", ColumnName: "name", OrdinalPosition: 2, ColumnDefault: "NULL"},
|
||||
{DataType: "varchar(255)", ColumnName: "email", OrdinalPosition: 3, ColumnDefault: "NULL"},
|
||||
},
|
||||
Indexes: []any{map[string]any{"index_columns": []any{"id"}, "index_name": "PRIMARY", "is_primary": true, "is_unique": true}},
|
||||
Triggers: []any{},
|
||||
Constraints: []any{map[string]any{"constraint_columns": []any{"id"}, "constraint_name": "PRIMARY", "constraint_type": "PRIMARY KEY", "foreign_key_referenced_columns": any(nil), "foreign_key_referenced_table": any(nil), "constraint_definition": ""}},
|
||||
}
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want any
|
||||
isSimple bool
|
||||
isAllTables bool
|
||||
}{
|
||||
{
|
||||
name: "invoke list_tables for all tables detailed output",
|
||||
requestBody: bytes.NewBufferString(`{"table_names":""}`),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant, paramTableWant},
|
||||
isAllTables: true,
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables detailed output",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s"}`, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant},
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables simple output",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s", "output_format": "simple"}`, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{{"name": tableNameAuth}},
|
||||
isSimple: true,
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with multiple table names",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s,%s"}`, tableNameParam, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant, paramTableWant},
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with one existing and one non-existent table",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"table_names": "%s,non_existent_table"}`, tableNameAuth)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []objectDetails{authTableWant},
|
||||
},
|
||||
{
|
||||
name: "invoke list_tables with non-existent table",
|
||||
requestBody: bytes.NewBufferString(`{"table_names": "non_existent_table"}`),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const api = "http://127.0.0.1:5000/api/tool/list_tables/invoke"
|
||||
resp, body := tests.RunRequest(t, http.MethodPost, api, tc.requestBody, nil)
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
t.Fatalf("wrong status code: got %d, want %d, body: %s", resp.StatusCode, tc.wantStatusCode, string(body))
|
||||
}
|
||||
if tc.wantStatusCode != http.StatusOK {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyWrapper struct {
|
||||
Result json.RawMessage `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &bodyWrapper); err != nil {
|
||||
t.Fatalf("error decoding response wrapper: %v", err)
|
||||
}
|
||||
|
||||
var resultString string
|
||||
if err := json.Unmarshal(bodyWrapper.Result, &resultString); err != nil {
|
||||
resultString = string(bodyWrapper.Result)
|
||||
}
|
||||
|
||||
var got any
|
||||
if tc.isSimple {
|
||||
var tables []tableInfo
|
||||
if err := json.Unmarshal([]byte(resultString), &tables); err != nil {
|
||||
t.Fatalf("failed to unmarshal outer JSON array into []tableInfo: %v", err)
|
||||
}
|
||||
var details []map[string]any
|
||||
for _, table := range tables {
|
||||
var d map[string]any
|
||||
if err := json.Unmarshal([]byte(table.ObjectDetails), &d); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested ObjectDetails string: %v", err)
|
||||
}
|
||||
details = append(details, d)
|
||||
}
|
||||
got = details
|
||||
} else {
|
||||
if resultString == "null" {
|
||||
got = nil
|
||||
} else {
|
||||
var tables []tableInfo
|
||||
if err := json.Unmarshal([]byte(resultString), &tables); err != nil {
|
||||
t.Fatalf("failed to unmarshal outer JSON array into []tableInfo: %v", err)
|
||||
}
|
||||
var details []objectDetails
|
||||
for _, table := range tables {
|
||||
var d objectDetails
|
||||
if err := json.Unmarshal([]byte(table.ObjectDetails), &d); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested ObjectDetails string: %v", err)
|
||||
}
|
||||
details = append(details, d)
|
||||
}
|
||||
got = details
|
||||
}
|
||||
}
|
||||
|
||||
opts := []cmp.Option{
|
||||
cmpopts.SortSlices(func(a, b objectDetails) bool { return a.ObjectName < b.ObjectName }),
|
||||
cmpopts.SortSlices(func(a, b column) bool { return a.ColumnName < b.ColumnName }),
|
||||
cmpopts.SortSlices(func(a, b map[string]any) bool { return a["name"].(string) < b["name"].(string) }),
|
||||
}
|
||||
|
||||
// Checking only the current database where the test tables are created to avoid brittle tests.
|
||||
if tc.isAllTables {
|
||||
var filteredGot []objectDetails
|
||||
if got != nil {
|
||||
for _, item := range got.([]objectDetails) {
|
||||
if item.SchemaName == databaseName {
|
||||
filteredGot = append(filteredGot, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filteredGot) == 0 {
|
||||
got = nil
|
||||
} else {
|
||||
got = filteredGot
|
||||
}
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tc.want, got, opts...); diff != "" {
|
||||
t.Errorf("Unexpected result: got %#v, want: %#v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// GetMariaDBWants return the expected wants for mariaDB
|
||||
func GetMariaDBWants() (string, string, string, string) {
|
||||
select1Want := `[{"1":1}]`
|
||||
mcpMyFailToolWant := `{"jsonrpc":"2.0","id":"invoke-fail-tool","result":{"content":[{"type":"text","text":"unable to execute query: Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELEC 1' at line 1"}],"isError":true}}`
|
||||
createTableStatement := `"CREATE TABLE t (id INT AUTO_INCREMENT PRIMARY KEY, name TEXT)"`
|
||||
mcpSelect1Want := `{"jsonrpc":"2.0","id":"invoke my-auth-required-tool","result":{"content":[{"type":"text","text":"{\"1\":1}"}]}}`
|
||||
return select1Want, mcpMyFailToolWant, createTableStatement, mcpSelect1Want
|
||||
}
|
||||
@@ -102,9 +102,10 @@ func TestMongoDBToolEndpoints(t *testing.T) {
|
||||
// Get configs for tests
|
||||
select1Want := `[{"_id":3,"id":3,"name":"Sid"}]`
|
||||
myToolId3NameAliceWant := `[{"_id":5,"id":3,"name":"Alice"}]`
|
||||
myToolById4Want := `[{"_id":4,"id":4,"name":null}]`
|
||||
myToolById4Want := `null`
|
||||
mcpMyFailToolWant := `invalid JSON input: missing colon after key `
|
||||
mcpMyToolId3NameAliceWant := `{"jsonrpc":"2.0","id":"my-simple-tool","result":{"content":[{"type":"text","text":"{\"_id\":5,\"id\":3,\"name\":\"Alice\"}"}]}}`
|
||||
mcpMyToolId3NameAliceWant := `{"jsonrpc":"2.0","id":"my-tool","result":{"content":[{"type":"text","text":"{\"_id\":5,\"id\":3,\"name\":\"Alice\"}"}]}}`
|
||||
mcpAuthRequiredWant := `{"jsonrpc":"2.0","id":"invoke my-auth-required-tool","result":{"content":[{"type":"text","text":"{\"_id\":3,\"id\":3,\"name\":\"Sid\"}"}]}}`
|
||||
|
||||
// Run tests
|
||||
tests.RunToolGetTest(t)
|
||||
@@ -115,13 +116,14 @@ func TestMongoDBToolEndpoints(t *testing.T) {
|
||||
)
|
||||
tests.RunMCPToolCallMethod(t, mcpMyFailToolWant, select1Want,
|
||||
tests.WithMcpMyToolId3NameAliceWant(mcpMyToolId3NameAliceWant),
|
||||
tests.WithMcpSelect1Want(mcpAuthRequiredWant),
|
||||
)
|
||||
|
||||
delete1Want := "1"
|
||||
deleteManyWant := "2"
|
||||
runToolDeleteInvokeTest(t, delete1Want, deleteManyWant)
|
||||
|
||||
insert1Want := `["68666e1035bb36bf1b4d47fb"]`
|
||||
insert1Want := `"68666e1035bb36bf1b4d47fb"`
|
||||
insertManyWant := `["68667a6436ec7d0363668db7","68667a6436ec7d0363668db8","68667a6436ec7d0363668db9"]`
|
||||
runToolInsertInvokeTest(t, insert1Want, insertManyWant)
|
||||
|
||||
@@ -444,12 +446,15 @@ func runToolAggregateInvokeTest(t *testing.T, aggregate1Want string, aggregateMa
|
||||
func setupMongoDB(t *testing.T, ctx context.Context, database *mongo.Database) func(*testing.T) {
|
||||
collectionName := "test_collection"
|
||||
|
||||
if err := database.Collection(collectionName).Drop(ctx); err != nil {
|
||||
t.Logf("Warning: failed to drop collection before setup: %v", err)
|
||||
}
|
||||
|
||||
documents := []map[string]any{
|
||||
{"_id": 1, "id": 1, "name": "Alice", "email": ServiceAccountEmail},
|
||||
{"_id": 1, "id": 2, "name": "FakeAlice", "email": "fakeAlice@gmail.com"},
|
||||
{"_id": 14, "id": 2, "name": "FakeAlice", "email": "fakeAlice@gmail.com"},
|
||||
{"_id": 2, "id": 2, "name": "Jane"},
|
||||
{"_id": 3, "id": 3, "name": "Sid"},
|
||||
{"_id": 4, "id": 4, "name": nil},
|
||||
{"_id": 5, "id": 3, "name": "Alice", "email": "alice@gmail.com"},
|
||||
{"_id": 6, "id": 100, "name": "ToBeDeleted", "email": "bob@gmail.com"},
|
||||
{"_id": 7, "id": 101, "name": "ToBeDeleted", "email": "bob1@gmail.com"},
|
||||
@@ -498,8 +503,6 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
"filterParams": []any{},
|
||||
"projectPayload": `{ "_id": 1, "id": 1, "name" : 1 }`,
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 1,
|
||||
"sort": `{ "id": 1 }`,
|
||||
},
|
||||
"my-tool": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -522,6 +525,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
},
|
||||
"projectPayload": `{ "_id": 1, "id": 1, "name" : 1 }`,
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-tool-by-id": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -539,6 +543,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
},
|
||||
"projectPayload": `{ "_id": 1, "id": 1, "name" : 1 }`,
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-tool-by-name": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -546,7 +551,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
"description": "Tool to test invocation with params.",
|
||||
"authRequired": []string{},
|
||||
"collection": "test_collection",
|
||||
"filterPayload": `{ "name" : {{ .name }} }`,
|
||||
"filterPayload": `{ "name" : {{json .name }} }`,
|
||||
"filterParams": []map[string]any{
|
||||
{
|
||||
"name": "name",
|
||||
@@ -557,6 +562,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
},
|
||||
"projectPayload": `{ "_id": 1, "id": 1, "name" : 1 }`,
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-array-tool": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -564,7 +570,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
"description": "Tool to test invocation with array.",
|
||||
"authRequired": []string{},
|
||||
"collection": "test_collection",
|
||||
"filterPayload": `{ "name": { "$in": {{json .nameArray}} }, "_id": 5 })`,
|
||||
"filterPayload": `{ "name": { "$in": {{json .nameArray}} }, "_id": 5 }`,
|
||||
"filterParams": []map[string]any{
|
||||
{
|
||||
"name": "nameArray",
|
||||
@@ -578,6 +584,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
},
|
||||
"projectPayload": `{ "_id": 1, "id": 1, "name" : 1 }`,
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-auth-tool": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -601,6 +608,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
},
|
||||
"projectPayload": `{ "_id": 0, "name" : 1 }`,
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-auth-required-tool": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -613,6 +621,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
"filterPayload": `{ "_id": 3, "id": 3 }`,
|
||||
"filterParams": []any{},
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-fail-tool": map[string]any{
|
||||
"kind": toolKind,
|
||||
@@ -623,6 +632,7 @@ func getMongoDBToolsConfig(sourceConfig map[string]any, toolKind string) map[str
|
||||
"filterPayload": `{ "id" ; 1 }"}`,
|
||||
"filterParams": []any{},
|
||||
"database": MongoDbDatabase,
|
||||
"limit": 10,
|
||||
},
|
||||
"my-delete-one-tool": map[string]any{
|
||||
"kind": "mongodb-delete-one",
|
||||
|
||||
@@ -119,6 +119,7 @@ func EnableClientAuthTest() InvokeTestOption {
|
||||
// MCPTestConfig represents the various configuration options for mcp tool call tests.
|
||||
type MCPTestConfig struct {
|
||||
myToolId3NameAliceWant string
|
||||
mcpSelect1Want string
|
||||
supportClientAuth bool
|
||||
supportSelect1Auth bool
|
||||
}
|
||||
@@ -149,6 +150,12 @@ func DisableMcpSelect1AuthTest() McpTestOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithMcpSelect1Want(want string) McpTestOption {
|
||||
return func(c *MCPTestConfig) {
|
||||
c.mcpSelect1Want = want
|
||||
}
|
||||
}
|
||||
|
||||
/* Configurations for RunExecuteSqlToolInvokeTest() */
|
||||
|
||||
// ExecuteSqlTestConfig represents the various configuration options for RunExecuteSqlToolInvokeTest()
|
||||
|
||||
@@ -144,7 +144,7 @@ func TestPostgres(t *testing.T) {
|
||||
|
||||
// Run Postgres prebuilt tool tests
|
||||
tests.RunPostgresListTablesTest(t, tableNameParam, tableNameAuth, PostgresUser)
|
||||
tests.RunPostgresListViewsTest(t, ctx, pool, tableNameParam)
|
||||
tests.RunPostgresListViewsTest(t, ctx, pool)
|
||||
tests.RunPostgresListSchemasTest(t, ctx, pool)
|
||||
tests.RunPostgresListActiveQueriesTest(t, ctx, pool)
|
||||
tests.RunPostgresListAvailableExtensionsTest(t)
|
||||
@@ -160,4 +160,7 @@ func TestPostgres(t *testing.T) {
|
||||
tests.RunPostgresGetColumnCardinalityTest(t, ctx, pool)
|
||||
tests.RunPostgresListPublicationTablesTest(t, ctx, pool)
|
||||
tests.RunPostgresListTableSpacesTest(t)
|
||||
tests.RunPostgresListPgSettingsTest(t, ctx, pool)
|
||||
tests.RunPostgresListDatabaseStatsTest(t, ctx, pool)
|
||||
tests.RunPostgresListRolesTest(t, ctx, pool)
|
||||
}
|
||||
|
||||
486
tests/tool.go
486
tests/tool.go
@@ -791,6 +791,7 @@ func RunMCPToolCallMethod(t *testing.T, myFailToolWant, select1Want string, opti
|
||||
// Default values for MCPTestConfig
|
||||
configs := &MCPTestConfig{
|
||||
myToolId3NameAliceWant: `{"jsonrpc":"2.0","id":"my-tool","result":{"content":[{"type":"text","text":"{\"id\":1,\"name\":\"Alice\"}"},{"type":"text","text":"{\"id\":3,\"name\":\"Sid\"}"}]}}`,
|
||||
mcpSelect1Want: select1Want,
|
||||
supportClientAuth: false,
|
||||
supportSelect1Auth: true,
|
||||
}
|
||||
@@ -920,7 +921,7 @@ func RunMCPToolCallMethod(t *testing.T, myFailToolWant, select1Want string, opti
|
||||
},
|
||||
},
|
||||
wantStatusCode: http.StatusOK,
|
||||
wantBody: select1Want,
|
||||
wantBody: configs.mcpSelect1Want,
|
||||
},
|
||||
{
|
||||
name: "MCP Invoke my-auth-required-tool with invalid auth token",
|
||||
@@ -1259,8 +1260,8 @@ func RunPostgresListTablesTest(t *testing.T, tableNameParam, tableNameAuth, user
|
||||
}
|
||||
}
|
||||
|
||||
func setUpPostgresViews(t *testing.T, ctx context.Context, pool *pgxpool.Pool, viewName, tableName string) func() {
|
||||
createView := fmt.Sprintf("CREATE VIEW %s AS SELECT name FROM %s", viewName, tableName)
|
||||
func setUpPostgresViews(t *testing.T, ctx context.Context, pool *pgxpool.Pool, viewName string) func() {
|
||||
createView := fmt.Sprintf("CREATE VIEW %s AS SELECT 1 AS col", viewName)
|
||||
_, err := pool.Exec(ctx, createView)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create view: %v", err)
|
||||
@@ -1274,9 +1275,10 @@ func setUpPostgresViews(t *testing.T, ctx context.Context, pool *pgxpool.Pool, v
|
||||
}
|
||||
}
|
||||
|
||||
func RunPostgresListViewsTest(t *testing.T, ctx context.Context, pool *pgxpool.Pool, tableName string) {
|
||||
viewName1 := "test_view_1" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
dropViewfunc1 := setUpPostgresViews(t, ctx, pool, viewName1, tableName)
|
||||
func RunPostgresListViewsTest(t *testing.T, ctx context.Context, pool *pgxpool.Pool) {
|
||||
//adding this line temporarily
|
||||
viewName := "test_view_" + strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
dropViewfunc1 := setUpPostgresViews(t, ctx, pool, viewName)
|
||||
defer dropViewfunc1()
|
||||
|
||||
invokeTcs := []struct {
|
||||
@@ -1287,13 +1289,13 @@ func RunPostgresListViewsTest(t *testing.T, ctx context.Context, pool *pgxpool.P
|
||||
}{
|
||||
{
|
||||
name: "invoke list_views with newly created view",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"viewname": "%s"}`, viewName1))),
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"view_name": "%s"}`, viewName))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: fmt.Sprintf(`[{"schemaname":"public","viewname":"%s","viewowner":"postgres"}]`, viewName1),
|
||||
want: fmt.Sprintf(`[{"schema_name":"public","view_name":"%s","owner_name":"postgres","definition":" SELECT 1 AS col;"}]`, viewName),
|
||||
},
|
||||
{
|
||||
name: "invoke list_views with non-existent_view",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"viewname": "non_existent_view"}`)),
|
||||
requestBody: bytes.NewBuffer([]byte(`{"view_name": "non_existent_view"}`)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: `null`,
|
||||
},
|
||||
@@ -1349,6 +1351,7 @@ func RunPostgresListSchemasTest(t *testing.T, ctx context.Context, pool *pgxpool
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want []map[string]any
|
||||
compareSubset bool
|
||||
}{
|
||||
{
|
||||
name: "invoke list_schemas with schema_name",
|
||||
@@ -1356,6 +1359,19 @@ func RunPostgresListSchemasTest(t *testing.T, ctx context.Context, pool *pgxpool
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{wantSchema},
|
||||
},
|
||||
{
|
||||
name: "invoke list_schemas with owner name",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"owner": "%s"}`, "postgres"))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{wantSchema},
|
||||
compareSubset: true,
|
||||
},
|
||||
{
|
||||
name: "invoke list_schemas with limit 1",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"schema_name": "%s","limit": 1}`, schemaName))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{wantSchema},
|
||||
},
|
||||
{
|
||||
name: "invoke list_schemas with non-existent schema",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"schema_name": "non_existent_schema"}`)),
|
||||
@@ -1391,8 +1407,25 @@ func RunPostgresListSchemasTest(t *testing.T, ctx context.Context, pool *pgxpool
|
||||
t.Fatalf("failed to unmarshal nested result string: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Unexpected result (-want +got):\n%s", diff)
|
||||
if tc.compareSubset {
|
||||
// Assert that the 'wantTrigger' is present in the 'got' list.
|
||||
found := false
|
||||
for _, resultSchema := range got {
|
||||
if resultSchema["schema_name"] == wantSchema["schema_name"] {
|
||||
found = true
|
||||
if diff := cmp.Diff(wantSchema, resultSchema); diff != "" {
|
||||
t.Errorf("Mismatch in fields for the expected trigger (-want +got):\n%s", diff)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Expected schema '%s' not found in the list of all schemas.", wantSchema)
|
||||
}
|
||||
} else {
|
||||
if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("Unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -2178,15 +2211,15 @@ func RunPostgresListSequencesTest(t *testing.T, ctx context.Context, pool *pgxpo
|
||||
defer teardown(t)
|
||||
|
||||
wantSequence := map[string]any{
|
||||
"sequencename": sequenceName,
|
||||
"schemaname": "public",
|
||||
"sequenceowner": "postgres",
|
||||
"data_type": "bigint",
|
||||
"start_value": float64(1),
|
||||
"min_value": float64(1),
|
||||
"max_value": float64(9223372036854775807),
|
||||
"increment_by": float64(1),
|
||||
"last_value": nil,
|
||||
"sequence_name": sequenceName,
|
||||
"schema_name": "public",
|
||||
"sequence_owner": "postgres",
|
||||
"data_type": "bigint",
|
||||
"start_value": float64(1),
|
||||
"min_value": float64(1),
|
||||
"max_value": float64(9223372036854775807),
|
||||
"increment_by": float64(1),
|
||||
"last_value": nil,
|
||||
}
|
||||
|
||||
invokeTcs := []struct {
|
||||
@@ -2198,13 +2231,13 @@ func RunPostgresListSequencesTest(t *testing.T, ctx context.Context, pool *pgxpo
|
||||
}{
|
||||
{
|
||||
name: "invoke list_sequences",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"sequencename": "%s"}`, sequenceName)),
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"sequence_name": "%s"}`, sequenceName)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{wantSequence},
|
||||
},
|
||||
{
|
||||
name: "invoke list_sequences with non-existent sequence",
|
||||
requestBody: bytes.NewBufferString(`{"sequencename": "non_existent_sequence"}`),
|
||||
requestBody: bytes.NewBufferString(`{"sequence_name": "non_existent_sequence"}`),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: nil,
|
||||
},
|
||||
@@ -2271,6 +2304,415 @@ func RunPostgresListTableSpacesTest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func RunPostgresListPgSettingsTest(t *testing.T, ctx context.Context, pool *pgxpool.Pool) {
|
||||
targetSetting := "maintenance_work_mem"
|
||||
var name, setting, unit, shortDesc, source, contextVal string
|
||||
|
||||
// We query the raw pg_settings to get the data needed to reconstruct the logic
|
||||
// defined in your listPgSettingQuery.
|
||||
err := pool.QueryRow(ctx, `
|
||||
SELECT name, setting, unit, short_desc, source, context
|
||||
FROM pg_settings
|
||||
WHERE name = $1
|
||||
`, targetSetting).Scan(&name, &setting, &unit, &shortDesc, &source, &contextVal)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Setup failed: could not fetch postgres setting '%s': %v", targetSetting, err)
|
||||
}
|
||||
|
||||
// Replicate the SQL CASE logic for 'requires_restart' field
|
||||
requiresRestart := "No"
|
||||
switch contextVal {
|
||||
case "postmaster":
|
||||
requiresRestart = "Yes"
|
||||
case "sighup":
|
||||
requiresRestart = "No (Reload sufficient)"
|
||||
}
|
||||
|
||||
expectedObject := map[string]interface{}{
|
||||
"name": name,
|
||||
"current_value": setting,
|
||||
"unit": unit,
|
||||
"short_desc": shortDesc,
|
||||
"source": source,
|
||||
"requires_restart": requiresRestart,
|
||||
}
|
||||
expectedJSON, _ := json.Marshal([]interface{}{expectedObject})
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "invoke list_pg_settings with specific setting",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"setting_name": "%s"}`, targetSetting))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: string(expectedJSON),
|
||||
},
|
||||
{
|
||||
name: "invoke list_pg_settings with non-existent setting",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"setting_name": "non_existent_config_xyz"}`)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: `null`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const api = "http://127.0.0.1:5000/api/tool/list_pg_settings/invoke"
|
||||
resp, body := RunRequest(t, http.MethodPost, api, tc.requestBody, nil)
|
||||
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
t.Fatalf("wrong status code: got %d, want %d, body: %s", resp.StatusCode, tc.wantStatusCode, string(body))
|
||||
}
|
||||
if tc.wantStatusCode != http.StatusOK {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyWrapper struct {
|
||||
Result json.RawMessage `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &bodyWrapper); err != nil {
|
||||
t.Fatalf("error decoding response wrapper: %v", err)
|
||||
}
|
||||
|
||||
var resultString string
|
||||
if err := json.Unmarshal(bodyWrapper.Result, &resultString); err != nil {
|
||||
resultString = string(bodyWrapper.Result)
|
||||
}
|
||||
|
||||
var got, want any
|
||||
if err := json.Unmarshal([]byte(resultString), &got); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested result string: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal([]byte(tc.want), &want); err != nil {
|
||||
t.Fatalf("failed to unmarshal want string: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
t.Errorf("Unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// RunPostgresDatabaseStatsTest tests the database_stats tool by comparing API results
|
||||
// against a direct query to the database.
|
||||
func RunPostgresListDatabaseStatsTest(t *testing.T, ctx context.Context, pool *pgxpool.Pool) {
|
||||
dbName1 := "test_db_stats_1"
|
||||
dbOwner1 := "test_user1"
|
||||
dbName2 := "test_db_stats_2"
|
||||
dbOwner2 := "test_user2"
|
||||
|
||||
cleanup1 := setUpDatabase(t, ctx, pool, dbName1, dbOwner1)
|
||||
defer cleanup1()
|
||||
cleanup2 := setUpDatabase(t, ctx, pool, dbName2, dbOwner2)
|
||||
defer cleanup2()
|
||||
|
||||
requiredKeys := map[string]bool{
|
||||
"database_name": true,
|
||||
"database_owner": true,
|
||||
"default_tablespace": true,
|
||||
"is_connectable": true,
|
||||
}
|
||||
|
||||
db1Want := map[string]interface{}{
|
||||
"database_name": dbName1,
|
||||
"database_owner": dbOwner1,
|
||||
"default_tablespace": "pg_default",
|
||||
"is_connectable": true,
|
||||
}
|
||||
|
||||
db2Want := map[string]interface{}{
|
||||
"database_name": dbName2,
|
||||
"database_owner": dbOwner2,
|
||||
"default_tablespace": "pg_default",
|
||||
"is_connectable": true,
|
||||
}
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want []map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "invoke database_stats filtering by specific database name",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"database_name": "%s"}`, dbName1))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]interface{}{db1Want},
|
||||
},
|
||||
{
|
||||
name: "invoke database_stats filtering by specific owner",
|
||||
requestBody: bytes.NewBuffer([]byte(fmt.Sprintf(`{"database_owner": "%s"}`, dbOwner2))),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]interface{}{db2Want},
|
||||
},
|
||||
{
|
||||
name: "filter by tablespace",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"default_tablespace": "pg_default"}`)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]interface{}{db1Want, db2Want},
|
||||
},
|
||||
{
|
||||
name: "sort by size (desc)",
|
||||
requestBody: bytes.NewBuffer([]byte(`{"sort_by": "size"}`)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]interface{}{db1Want, db2Want},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const api = "http://127.0.0.1:5000/api/tool/list_database_stats/invoke"
|
||||
resp, body := RunRequest(t, http.MethodPost, api, tc.requestBody, nil)
|
||||
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
t.Fatalf("wrong status code: got %d, want %d, body: %s", resp.StatusCode, tc.wantStatusCode, string(body))
|
||||
}
|
||||
|
||||
var bodyWrapper struct {
|
||||
Result json.RawMessage `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &bodyWrapper); err != nil {
|
||||
t.Fatalf("error decoding response wrapper: %v", err)
|
||||
}
|
||||
|
||||
var resultString string
|
||||
if err := json.Unmarshal(bodyWrapper.Result, &resultString); err != nil {
|
||||
resultString = string(bodyWrapper.Result)
|
||||
}
|
||||
|
||||
var got []map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(resultString), &got); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested result string: %v", err)
|
||||
}
|
||||
|
||||
// Configuration for comparison
|
||||
opts := []cmp.Option{
|
||||
// Ensure consistent order based on name for comparison
|
||||
cmpopts.SortSlices(func(a, b map[string]interface{}) bool {
|
||||
return a["database_name"].(string) < b["database_name"].(string)
|
||||
}),
|
||||
|
||||
// Ignore Volatile Keys which change in every run and only compare the keys in 'requiredKeys'
|
||||
cmpopts.IgnoreMapEntries(func(key string, _ interface{}) bool {
|
||||
return !requiredKeys[key]
|
||||
}),
|
||||
|
||||
// Ignore Irrelevant Databases
|
||||
cmpopts.IgnoreSliceElements(func(v map[string]interface{}) bool {
|
||||
name, ok := v["database_name"].(string)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
return name != dbName1 && name != dbName2
|
||||
}),
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tc.want, got, opts...); diff != "" {
|
||||
t.Errorf("Unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setUpDatabase(t *testing.T, ctx context.Context, pool *pgxpool.Pool, dbName, dbOwner string) func() {
|
||||
_, err := pool.Exec(ctx, fmt.Sprintf("CREATE ROLE %s LOGIN PASSWORD 'password';", dbOwner))
|
||||
if err != nil {
|
||||
_, _ = pool.Exec(ctx, fmt.Sprintf("DROP ROLE %s;", dbOwner))
|
||||
t.Fatalf("failed to create %s: %v", dbOwner, err)
|
||||
}
|
||||
_, err = pool.Exec(ctx, fmt.Sprintf("GRANT %s TO current_user;", dbOwner))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to grant %s to current_user: %v", dbOwner, err)
|
||||
}
|
||||
_, err = pool.Exec(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s;", dbName, dbOwner))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create %s: %v", dbName, err)
|
||||
}
|
||||
return func() {
|
||||
_, _ = pool.Exec(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s;", dbName))
|
||||
_, _ = pool.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %s;", dbOwner))
|
||||
}
|
||||
}
|
||||
|
||||
func setupPostgresRoles(t *testing.T, ctx context.Context, pool *pgxpool.Pool) (string, string, string, func(t *testing.T)) {
|
||||
t.Helper()
|
||||
suffix := strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||
|
||||
adminUser := "test_role_admin_" + suffix
|
||||
superUser := "test_role_super_" + suffix
|
||||
normalUser := "test_role_normal_" + suffix
|
||||
|
||||
createAdminStmt := fmt.Sprintf("CREATE ROLE %s NOLOGIN;", adminUser)
|
||||
if _, err := pool.Exec(ctx, createAdminStmt); err != nil {
|
||||
t.Fatalf("unable to create role %s: %v", adminUser, err)
|
||||
}
|
||||
|
||||
createSuperUserStmt := fmt.Sprintf("CREATE ROLE %s LOGIN CREATEDB;", superUser)
|
||||
if _, err := pool.Exec(ctx, createSuperUserStmt); err != nil {
|
||||
t.Fatalf("unable to create role %s: %v", superUser, err)
|
||||
}
|
||||
|
||||
createNormalUserStmt := fmt.Sprintf("CREATE ROLE %s LOGIN;", normalUser)
|
||||
if _, err := pool.Exec(ctx, createNormalUserStmt); err != nil {
|
||||
t.Fatalf("unable to create role %s: %v", normalUser, err)
|
||||
}
|
||||
|
||||
// Establish Relationships (Admin -> Superuser -> Normal)
|
||||
if _, err := pool.Exec(ctx, fmt.Sprintf("GRANT %s TO %s;", adminUser, superUser)); err != nil {
|
||||
t.Fatalf("unable to grant %s to %s: %v", adminUser, superUser, err)
|
||||
}
|
||||
if _, err := pool.Exec(ctx, fmt.Sprintf("GRANT %s TO %s;", superUser, normalUser)); err != nil {
|
||||
t.Fatalf("unable to grant %s to %s: %v", superUser, normalUser, err)
|
||||
}
|
||||
|
||||
return adminUser, superUser, normalUser, func(t *testing.T) {
|
||||
t.Helper()
|
||||
_, _ = pool.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %s;", normalUser))
|
||||
_, _ = pool.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %s;", superUser))
|
||||
_, _ = pool.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %s;", adminUser))
|
||||
}
|
||||
}
|
||||
|
||||
func RunPostgresListRolesTest(t *testing.T, ctx context.Context, pool *pgxpool.Pool) {
|
||||
adminUser, superUser, normalUser, cleanup := setupPostgresRoles(t, ctx, pool)
|
||||
defer cleanup(t)
|
||||
|
||||
wantAdmin := map[string]any{
|
||||
"role_name": adminUser,
|
||||
"connection_limit": float64(-1),
|
||||
"is_superuser": false,
|
||||
"inherits_privileges": true,
|
||||
"can_create_roles": false,
|
||||
"can_create_db": false,
|
||||
"can_login": false,
|
||||
"is_replication_role": false,
|
||||
"bypass_rls": false,
|
||||
"direct_members": []any{superUser},
|
||||
"member_of": []any{},
|
||||
}
|
||||
|
||||
wantSuperUser := map[string]any{
|
||||
"role_name": superUser,
|
||||
"connection_limit": float64(-1),
|
||||
"is_superuser": false,
|
||||
"inherits_privileges": true,
|
||||
"can_create_roles": false,
|
||||
"can_create_db": true,
|
||||
"can_login": true,
|
||||
"is_replication_role": false,
|
||||
"bypass_rls": false,
|
||||
"direct_members": []any{normalUser},
|
||||
"member_of": []any{adminUser},
|
||||
}
|
||||
|
||||
wantNormalUser := map[string]any{
|
||||
"role_name": normalUser,
|
||||
"connection_limit": float64(-1),
|
||||
"is_superuser": false,
|
||||
"inherits_privileges": true,
|
||||
"can_create_roles": false,
|
||||
"can_create_db": false,
|
||||
"can_login": true,
|
||||
"is_replication_role": false,
|
||||
"bypass_rls": false,
|
||||
"direct_members": []any{},
|
||||
"member_of": []any{superUser},
|
||||
}
|
||||
|
||||
invokeTcs := []struct {
|
||||
name string
|
||||
requestBody io.Reader
|
||||
wantStatusCode int
|
||||
want []map[string]any
|
||||
}{
|
||||
{
|
||||
name: "list_roles with filter for created roles",
|
||||
requestBody: bytes.NewBufferString(`{"role_name": "test_role_"}`),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{wantAdmin, wantNormalUser, wantSuperUser},
|
||||
},
|
||||
{
|
||||
name: "list_roles filter specific role",
|
||||
requestBody: bytes.NewBufferString(fmt.Sprintf(`{"role_name": "%s"}`, superUser)),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: []map[string]any{wantSuperUser},
|
||||
},
|
||||
{
|
||||
name: "list_roles non-existent role",
|
||||
requestBody: bytes.NewBufferString(`{"role_name": "non_existent_role_xyz"}`),
|
||||
wantStatusCode: http.StatusOK,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range invokeTcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const api = "http://127.0.0.1:5000/api/tool/list_roles/invoke"
|
||||
|
||||
resp, respBody := RunRequest(t, http.MethodPost, api, tc.requestBody, nil)
|
||||
if resp.StatusCode != tc.wantStatusCode {
|
||||
t.Fatalf("wrong status code: got %d, want %d, body: %s", resp.StatusCode, tc.wantStatusCode, string(respBody))
|
||||
}
|
||||
if tc.wantStatusCode != http.StatusOK {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyWrapper struct {
|
||||
Result json.RawMessage `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(respBody, &bodyWrapper); err != nil {
|
||||
t.Fatalf("error decoding response wrapper: %v", err)
|
||||
}
|
||||
|
||||
var resultString string
|
||||
if err := json.Unmarshal(bodyWrapper.Result, &resultString); err != nil {
|
||||
resultString = string(bodyWrapper.Result)
|
||||
}
|
||||
|
||||
var got []map[string]any
|
||||
if err := json.Unmarshal([]byte(resultString), &got); err != nil {
|
||||
t.Fatalf("failed to unmarshal nested result string: %v, resultString: %s", err, resultString)
|
||||
}
|
||||
|
||||
gotMap := make(map[string]map[string]any)
|
||||
for _, role := range got {
|
||||
// Remove fields that change every run
|
||||
delete(role, "oid")
|
||||
delete(role, "valid_until")
|
||||
|
||||
if name, ok := role["role_name"].(string); ok {
|
||||
gotMap[name] = role
|
||||
}
|
||||
}
|
||||
|
||||
// Check that every role in 'want' exists in 'got' and matches
|
||||
for _, wantRole := range tc.want {
|
||||
roleName, _ := wantRole["role_name"].(string)
|
||||
|
||||
gotRole, exists := gotMap[roleName]
|
||||
if !exists {
|
||||
t.Errorf("Expected role %q was not found in the response", roleName)
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(wantRole, gotRole); diff != "" {
|
||||
t.Errorf("Role %q mismatch (-want +got):\n%s", roleName, diff)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that if want is nil/empty, got is also empty
|
||||
if len(tc.want) == 0 && len(got) != 0 {
|
||||
t.Errorf("Expected empty result, but got %d roles", len(got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// RunMySQLListTablesTest run tests against the mysql-list-tables tool
|
||||
func RunMySQLListTablesTest(t *testing.T, databaseName, tableNameParam, tableNameAuth, expectedOwner string) {
|
||||
var ownerWant any
|
||||
|
||||
Reference in New Issue
Block a user