From 97d816c0571b8219fe1366ca37487f4a8f99cd60 Mon Sep 17 00:00:00 2001 From: Ilya Kompaniets Date: Wed, 9 Apr 2025 23:42:54 +0300 Subject: [PATCH] Added all unit-tests to CI --- .github/workflows/go.yml | 196 +++++++++++++++++- .../basic-scenarios/common_test.go | 2 +- benchmark/helpers_unix.go | 10 + benchmark/helpers_unix_test.go | 6 - db/es/elasticsearch.go | 8 +- db/es/es.go | 3 +- db/es/es_test.go | 38 +++- db/es/maintenance.go | 10 +- db/es/maintenance_test.go | 2 +- db/es/opensearch.go | 33 ++- db/es/vector_test.go | 11 + db/helpers.go | 4 + db/pgmbed/pgmbed_test.go | 59 +++++- db/sql/insert.go | 12 ++ db/sql/insert_test.go | 4 +- db/sql/query_patch_test.go | 20 +- db/sql/search.go | 26 ++- db/sql/sql_test.go | 62 ++++-- db/testing/docker/mssql/docker-compose.yaml | 27 ++- db/testing/docker/mssql/init.sh | 35 ++++ .../docker/opensearch/docker-compose.yaml | 11 +- 21 files changed, 507 insertions(+), 72 deletions(-) create mode 100755 db/testing/docker/mssql/init.sh diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 53495b4..456df86 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -13,6 +13,120 @@ jobs: build: runs-on: ubuntu-latest + + services: + # Database service containers + mariadb-vector: + image: mariadb:11.7.2 + env: + MARIADB_DATABASE: perfkit_db_ci + MARIADB_USER: user + MARIADB_PASSWORD: password # example value of a secret + MARIADB_ROOT_PASSWORD: password # example value of a secret + ports: + - 3306:3306 + # Additional options to handle GitHub Actions environment limitations + options: >- + --health-cmd="healthcheck.sh --connect --innodb_initialized" + --health-interval=10s + --health-timeout=5s + --health-retries=3 + + postgres: + image: ankane/pgvector:v0.5.1 + env: + POSTGRES_USER: root + POSTGRES_PASSWORD: password # example value of a secret + POSTGRES_DB: perfkit_pg_vector_db_ci + ports: + - 5432:5432 + # Health check to wait until postgres is ready + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + mssql: + image: mcr.microsoft.com/mssql/server:2019-latest + env: + ACCEPT_EULA: 'Y' + MSSQL_SA_PASSWORD: MyP@ssw0rd123 # example value of a secret compliant with MS SQL Server password policy + MSSQL_PID: Developer + MSSQL_TCP_PORT: 1433 + MSSQL_COLLATION: SQL_Latin1_General_CP1_CI_AS + MSSQL_DATA_DIR: /var/opt/mssql/data + MSSQL_LOG_DIR: /var/opt/mssql/log + MSSQL_BACKUP_DIR: /var/opt/mssql/backup + MSSQL_AGENT_ENABLED: true + ports: + - 1433:1433 + + cassandra: + image: cassandra:4.0 + env: + CASSANDRA_USER: admin + CASSANDRA_PASSWORD: password # example value of a secret + ports: + - "9042:9042" + options: >- + --health-cmd="cqlsh -u cassandra -p cassandra 127.0.0.1 9042 --execute='describe keyspaces'" + --health-interval=20s + --health-timeout=10s + --health-retries=15 + --health-start-period=60s + + clickhouse: + image: clickhouse/clickhouse-server:24.10-alpine + env: + CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT: 1 + CLICKHOUSE_DB: perfkit_db_ci + CLICKHOUSE_USER: username + CLICKHOUSE_PASSWORD: password # example value of a secret + ports: + - "8123:8123" + - "9000:9000" + + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.15.1 + env: + node.name: es-test + cluster.name: es-docker-cluster + bootstrap.memory_lock: true + discovery.type: single-node + ES_JAVA_OPTS: -Xms1g -Xmx1g + xpack.security.enabled: false + xpack.security.http.ssl.enabled: false + xpack.security.transport.ssl.enabled: false + action.auto_create_index: true + ports: + - 9200:9200 + # Health check for Elasticsearch + options: >- + --health-cmd "curl -s http://127.0.0.1:9200/_cluster/health?wait_for_status=yellow&timeout=30s || exit 1" + --health-interval 10s + --health-timeout 5s + --health-retries 10 + + opensearch: + image: opensearchproject/opensearch:2.18.0 + env: + node.name: os-test + discovery.type: single-node + OPENSEARCH_JAVA_OPTS: -Xms512m -Xmx512m + OPENSEARCH_INITIAL_ADMIN_PASSWORD: bgnYFGR2RhN3SCX # example value of a secret compliant with OpenSearch password policy + plugins.security.ssl.http.enabled: false + ports: + - 9201:9200 + - 9600:9600 + # Health check for OpenSearch + options: >- + --health-cmd "curl -s -u admin:bgnYFGR2RhN3SCX http://127.0.0.1:9201/_cluster/health?wait_for_status=yellow&timeout=30s || exit 1" + --health-interval 20s + --health-timeout 30s + --health-retries 15 + --health-start-period 60s + steps: - uses: actions/checkout@v4 @@ -20,6 +134,84 @@ jobs: uses: actions/setup-go@v4 with: go-version: '1.22' + + - name: Install database clients + run: | + # Install PostgreSQL client + sudo apt-get update + sudo apt-get install -y postgresql-client + # Install SQL Server tools + curl https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - + curl https://packages.microsoft.com/config/ubuntu/20.04/prod.list | sudo tee /etc/apt/sources.list.d/msprod.list + sudo apt-get update + sudo ACCEPT_EULA=Y apt-get install -y mssql-tools unixodbc-dev - - name: Test - run: go test -v ./benchmark/... + - name: Create SQL Server user and database + run: | + # Wait for SQL Server to be ready (max 30 attempts) + for i in {1..30}; do + if /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P MyP@ssw0rd123 -Q "SELECT 1" -b -o /dev/null; then + echo "SQL Server is ready!" + break + fi + echo "Waiting for SQL Server... (attempt $i/30)"; + sleep 5; + done + + # Create database and user + /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P MyP@ssw0rd123 -Q " + IF NOT EXISTS (SELECT * FROM sys.databases WHERE name = 'perfkit_db_ci') + BEGIN + CREATE DATABASE perfkit_db_ci; + END + GO + USE perfkit_db_ci; + GO + IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = 'perfkit_db_runner') + BEGIN + CREATE LOGIN perfkit_db_runner WITH PASSWORD = 'MyP@ssw0rd123'; + END + GO + IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'perfkit_db_runner') + BEGIN + CREATE USER perfkit_db_runner FOR LOGIN perfkit_db_runner; + END + GO + ALTER ROLE db_owner ADD MEMBER perfkit_db_runner; + GO + " + echo "Database and user created successfully" + + - name: Create vector extension + run: PGPASSWORD=password psql -h localhost -U root -d perfkit_pg_vector_db_ci -c "CREATE EXTENSION vector;" + + - name: Create Cassandra keyspace + run: | + # Wait for Cassandra to be ready (max 30 attempts) + for i in {1..30}; do + if printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/9042; then + echo "Cassandra is ready!" + break + fi + echo "Waiting for cassandra... (attempt $i/30)"; + sleep 5; + done + + echo "Creating keyspace..." + docker exec ${{ job.services.cassandra.id }} cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE IF NOT EXISTS perfkit_db_ci WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};" + echo "Keyspace created" + + - name: Test with Coverage + run: | + go test -v -coverprofile=benchmark_coverage.txt -covermode=atomic ./benchmark/... + go test -v -coverprofile=logger_coverage.txt -covermode=atomic ./logger/... + go test -v -coverprofile=restrelay_coverage.txt -covermode=atomic ./acronis-restrelay-bench/... + go test -v -coverprofile=db_coverage.txt -covermode=atomic ./db/... + go test -v -coverprofile=db_bench_coverage.txt -covermode=atomic ./acronis-db-bench/... + + - name: Upload results to Codecov + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: benchmark_coverage.txt,logger_coverage.txt,restrelay_coverage.txt,db_coverage.txt,db_bench_coverage.txt + fail_ci_if_error: true diff --git a/acronis-db-bench/test-groups/basic-scenarios/common_test.go b/acronis-db-bench/test-groups/basic-scenarios/common_test.go index 4450338..e95a793 100644 --- a/acronis-db-bench/test-groups/basic-scenarios/common_test.go +++ b/acronis-db-bench/test-groups/basic-scenarios/common_test.go @@ -15,7 +15,7 @@ import ( const ( sqliteConnString = "sqlite://:memory:" mariaDBConnString = "mysql://user:password@tcp(localhost:3306)/perfkit_db_ci" - postgresqlConnString = "postgresql://root:password@localhost:5432/perfkit_db_ci?sslmode=disable" + postgresqlConnString = "postgresql://root:password@localhost:5432/perfkit_pg_vector_db_ci?sslmode=disable" ) type TestingSuite struct { diff --git a/benchmark/helpers_unix.go b/benchmark/helpers_unix.go index 4c0b505..2a67bf5 100644 --- a/benchmark/helpers_unix.go +++ b/benchmark/helpers_unix.go @@ -6,14 +6,24 @@ package benchmark import ( "bytes" "fmt" + "os" "os/exec" "strconv" "strings" "syscall" ) +func isGitHubCI() bool { + return os.Getenv("GITHUB_ACTIONS") == "true" +} + // adjustFilenoUlimit adjusts file descriptor limits on Linux and Darwin func (b *Benchmark) adjustFilenoUlimit() int { + // Skip adjustment in GitHub Actions CI + if isGitHubCI() { + return 0 + } + var rLimit syscall.Rlimit fileno := uint64(1048576) diff --git a/benchmark/helpers_unix_test.go b/benchmark/helpers_unix_test.go index a7200fd..2e6e99a 100644 --- a/benchmark/helpers_unix_test.go +++ b/benchmark/helpers_unix_test.go @@ -4,7 +4,6 @@ package benchmark import ( - "os" "os/exec" "runtime" "syscall" @@ -15,11 +14,6 @@ import ( type LogLevel int -// isGitHubCI returns true if running in GitHub Actions CI environment -func isGitHubCI() bool { - return os.Getenv("GITHUB_ACTIONS") == "true" -} - func TestAdjustFilenoUlimit(t *testing.T) { // Skip test in GitHub Actions if isGitHubCI() { diff --git a/db/es/elasticsearch.go b/db/es/elasticsearch.go index d04cc96..99f9de4 100644 --- a/db/es/elasticsearch.go +++ b/db/es/elasticsearch.go @@ -13,6 +13,7 @@ import ( "os" "strconv" "strings" + "time" es8 "github.com/elastic/go-elasticsearch/v8" "github.com/elastic/go-elasticsearch/v8/esapi" @@ -39,6 +40,10 @@ func (d *elasticSearchDialect) name() db.DialectName { return db.ELASTICSEARCH } +func (d *elasticSearchDialect) getVectorType() fieldType { + return "dense_vector" +} + // nolint:gocritic //TODO refactor unnamed returns func elasticCredentialsAndConnString(cs string, tlsEnabled bool) (string, string, string, error) { var u, err = url.Parse(cs) @@ -261,7 +266,8 @@ func (q *esQuerier) insert(ctx context.Context, idxName indexName, query *BulkIn var res, err = q.es.Bulk(query.Reader(), q.es.Bulk.WithContext(ctx), q.es.Bulk.WithIndex(string(idxName)), - q.es.Bulk.WithRefresh("wait_for")) + q.es.Bulk.WithRefresh("wait_for"), + q.es.Bulk.WithTimeout(30*time.Second)) if err != nil { return nil, 0, fmt.Errorf("error from elasticsearch while performing bulk insert: %v", err) } else if res.IsError() { diff --git a/db/es/es.go b/db/es/es.go index 2950daa..a3c5bdc 100644 --- a/db/es/es.go +++ b/db/es/es.go @@ -78,7 +78,7 @@ func (d *esDatabase) TableExists(tableName string) (bool, error) { } func (d *esDatabase) CreateTable(tableName string, tableDefinition *db.TableDefinition, tableMigrationDDL string) error { - return createIndex(d.mig, tableName, tableDefinition, tableMigrationDDL) + return createIndex(d.dialect, d.mig, tableName, tableDefinition, tableMigrationDDL) } func (d *esDatabase) DropTable(name string) error { @@ -213,4 +213,5 @@ func (tq timedQuerier) count(ctx context.Context, idxName indexName, request *Co type dialect interface { name() db.DialectName + getVectorType() fieldType } diff --git a/db/es/es_test.go b/db/es/es_test.go index 98c6bca..3665ad0 100644 --- a/db/es/es_test.go +++ b/db/es/es_test.go @@ -2,6 +2,7 @@ package es import ( "context" + "fmt" "testing" "time" @@ -13,8 +14,8 @@ import ( ) const ( - esConnString = "es://0.0.0.0:9200" - openSearchConnString = "opensearch://admin:%22ScoRpi0n$%22@0.0.0.0:9200" // example value of a secret compliant with OpenSearch password requirements + esConnString = "es://localhost:9200" + openSearchConnString = "opensearch://admin:bgnYFGR2RhN3SCX@localhost:9201" // example value of a secret compliant with OpenSearch password requirements ) type TestingSuite struct { @@ -26,13 +27,10 @@ func TestDatabaseSuiteElasticSearch(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: esConnString}) } -/* func TestDatabaseSuiteOpenSearch(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: openSearchConnString}) } -*/ - type testLogger struct { t *testing.T } @@ -62,7 +60,7 @@ func (suite *TestingSuite) makeTestSession() (db.Database, db.Session, *db.Conte dbo, err := db.Open(db.Config{ ConnString: suite.ConnString, MaxOpenConns: 16, - MaxConnLifetime: 100 * time.Millisecond, + MaxConnLifetime: 1000 * time.Millisecond, QueryLogger: logger, }) @@ -70,6 +68,8 @@ func (suite *TestingSuite) makeTestSession() (db.Database, db.Session, *db.Conte var tableSpec = testTableDefinition() + time.Sleep(1 * time.Second) + if err = dbo.CreateTable("perf_table", tableSpec, ""); err != nil { require.NoError(suite.T(), err, "init scheme") } @@ -95,8 +95,34 @@ func logDbTime(t *testing.T, c *db.Context) { func cleanup(t *testing.T, dbo db.Database) { t.Helper() + exists, err := dbo.TableExists("perf_table") + if err != nil { + t.Error("check table exists", err) + return + } + + if !exists { + return + } + if err := dbo.DropTable("perf_table"); err != nil { t.Error("drop table", err) return } } + +func dbDialect(connString string) (dialect, error) { + scheme, _, err := db.ParseScheme(connString) + if err != nil { + return nil, fmt.Errorf("cannot parse connection string scheme '%v', error: %v", connString, err) + } + + switch scheme { + case "es", "elastic", "elasticsearch": + return &elasticSearchDialect{}, nil + case "os", "opensearch": + return &openSearchDialect{}, nil + default: + return nil, fmt.Errorf("db: unsupported backend '%v'", scheme) + } +} diff --git a/db/es/maintenance.go b/db/es/maintenance.go index 92d3b4d..d277302 100644 --- a/db/es/maintenance.go +++ b/db/es/maintenance.go @@ -72,7 +72,7 @@ type fieldSpec struct { Indexed bool } -func convertToEsType(t db.TableRow) fieldSpec { +func convertToEsType(d dialect, t db.TableRow) fieldSpec { var spec = fieldSpec{ Indexed: t.Indexed, } @@ -91,10 +91,10 @@ func convertToEsType(t db.TableRow) fieldSpec { case db.DataTypeBoolean: spec.Type = fieldTypeBoolean case db.DataTypeVector3Float32: - spec.Type = fieldTypeDenseVector + spec.Type = d.getVectorType() spec.Dims = 3 case db.DataTypeVector768Float32: - spec.Type = fieldTypeDenseVector + spec.Type = d.getVectorType() spec.Dims = 768 default: spec.Type = fieldTypeKeyword @@ -149,7 +149,7 @@ func indexExists(mig migrator, tableName string) (bool, error) { return mig.checkILMPolicyExists(ilmPolicyName) } -func createIndex(mig migrator, indexName string, indexDefinition *db.TableDefinition, tableMigrationDDL string) error { +func createIndex(d dialect, mig migrator, indexName string, indexDefinition *db.TableDefinition, tableMigrationDDL string) error { if err := createSearchQueryBuilder(indexName, indexDefinition.TableRows); err != nil { return err } @@ -217,7 +217,7 @@ func createIndex(mig migrator, indexName string, indexDefinition *db.TableDefini continue } - mp[row.Name] = convertToEsType(row) + mp[row.Name] = convertToEsType(d, row) } if err := mig.initComponentTemplate(mappingTemplateName, componentTemplate{ diff --git a/db/es/maintenance_test.go b/db/es/maintenance_test.go index 650063c..74d9c4d 100644 --- a/db/es/maintenance_test.go +++ b/db/es/maintenance_test.go @@ -10,7 +10,7 @@ func (suite *TestingSuite) TestElasticSearchSchemaInit() { dbo, err := db.Open(db.Config{ ConnString: suite.ConnString, MaxOpenConns: 16, - MaxConnLifetime: 100 * time.Millisecond, + MaxConnLifetime: 1000 * time.Millisecond, }) if err != nil { diff --git a/db/es/opensearch.go b/db/es/opensearch.go index a762174..fa5dec9 100644 --- a/db/es/opensearch.go +++ b/db/es/opensearch.go @@ -9,6 +9,7 @@ import ( "fmt" "net" "net/http" + "net/url" "os" "github.com/opensearch-project/opensearch-go/v4" @@ -33,6 +34,36 @@ func (d *openSearchDialect) name() db.DialectName { return db.OPENSEARCH } +func (d *openSearchDialect) getVectorType() fieldType { + return "knn_vector" +} + +// nolint:gocritic //TODO refactor unnamed returns +func openSearchCredentialsAndConnString(cs string, tlsEnabled bool) (string, string, string, error) { + var u, err = url.Parse(cs) + if err != nil { + return "", "", "", fmt.Errorf("cannot parse connection url %v, err: %v", cs, err) + } + + var username = u.User.Username() + var password, _ = u.User.Password() + + var scheme string + if tlsEnabled { + scheme = "https" + } else { + scheme = "http" + } + + var finalURL = url.URL{ + Scheme: scheme, + Host: u.Host, + } + cs = finalURL.String() + + return username, password, cs, nil +} + type openSearchConnector struct{} func (c *openSearchConnector) ConnectionPool(cfg db.Config) (db.Database, error) { @@ -41,7 +72,7 @@ func (c *openSearchConnector) ConnectionPool(cfg db.Config) (db.Database, error) var err error if s := os.Getenv(magicEsEnvVar); s == "" { - username, password, cs, err = elasticCredentialsAndConnString(cfg.ConnString, cfg.TLSEnabled) + username, password, cs, err = openSearchCredentialsAndConnString(cfg.ConnString, cfg.TLSEnabled) if err != nil { return nil, fmt.Errorf("db: openSearch: %v", err) } diff --git a/db/es/vector_test.go b/db/es/vector_test.go index 61f7dbc..6fb0369 100644 --- a/db/es/vector_test.go +++ b/db/es/vector_test.go @@ -57,6 +57,17 @@ func testVectorTableDefinition(dia db.DialectName) *db.TableDefinition { } func (suite *TestingSuite) TestVectorSearch() { + var actualDialect, err = dbDialect(suite.ConnString) + if err != nil { + suite.T().Error(err) + return + } + + if actualDialect.name() != db.ELASTICSEARCH { + suite.T().Skip("only ElasticSearch supports vector search") + return + } + d, s, c := suite.makeVectorTestSession() defer logDbTime(suite.T(), c) defer vectorCleanup(suite.T(), d) diff --git a/db/helpers.go b/db/helpers.go index 67b5898..70a02e9 100644 --- a/db/helpers.go +++ b/db/helpers.go @@ -262,6 +262,10 @@ func ParseFuncMultipleArgs(s string, sep string) (fName string, args []string, e return "", nil, fmt.Errorf("bad function '%v', closing bracket placed before opening bracket", s) } + if argOpen+1 == argClose { + return s[:argOpen], nil, nil + } + return s[:argOpen], strings.Split(s[argOpen+1:argClose], sep), nil } diff --git a/db/pgmbed/pgmbed_test.go b/db/pgmbed/pgmbed_test.go index 00228e3..2db938d 100644 --- a/db/pgmbed/pgmbed_test.go +++ b/db/pgmbed/pgmbed_test.go @@ -6,12 +6,65 @@ import ( "testing" _ "github.com/lib/pq" + + "github.com/acronis/perfkit/logger" +) + +type LogLevel = logger.LogLevel +type LogMessage = logger.LogMessage + +const ( + LevelError = logger.LevelError + LevelWarn = logger.LevelWarn + LevelInfo = logger.LevelInfo + LevelDebug = logger.LevelDebug + LevelTrace = logger.LevelTrace ) -type testLogger struct{} +type testLogger struct { + level LogLevel + lastMessage *LogMessage +} + +func (l *testLogger) Log(level LogLevel, message string, args ...interface{}) { + l.lastMessage = &LogMessage{Level: level, Message: fmt.Sprintf(message, args...)} + fmt.Printf(message, args...) +} + +func (l *testLogger) Error(format string, args ...interface{}) { + l.Log(LevelError, format, args...) +} + +func (l *testLogger) Warn(format string, args ...interface{}) { + l.Log(LevelWarn, format, args...) +} + +func (l *testLogger) Info(format string, args ...interface{}) { + l.Log(LevelInfo, format, args...) +} + +func (l *testLogger) Debug(format string, args ...interface{}) { + l.Log(LevelDebug, format, args...) +} + +func (l *testLogger) Trace(format string, args ...interface{}) { + l.Log(LevelTrace, format, args...) +} + +func (l *testLogger) GetLevel() LogLevel { + return l.level +} + +func (l *testLogger) SetLevel(level LogLevel) { + l.level = level +} + +func (l *testLogger) GetLastMessage() *LogMessage { + return l.lastMessage +} -func (l *testLogger) Log(format string, args ...interface{}) { - fmt.Printf(format, args...) +func (l *testLogger) Clone() logger.Logger { + return &testLogger{level: l.level} } func TestLaunch(t *testing.T) { diff --git a/db/sql/insert.go b/db/sql/insert.go index 01686b8..c499e6b 100644 --- a/db/sql/insert.go +++ b/db/sql/insert.go @@ -47,6 +47,18 @@ func (g *sqlGateway) bulkInsertParameterized(tableName string, rows [][]interfac var parametersPlaceholder = strings.Join(ret, ",") valuesReference = append(valuesReference, fmt.Sprintf("(%s)", parametersPlaceholder)) } + } else if g.dialect.name() == db.MSSQL { + // SQL Server uses @p1, @p2, etc. for parameter placeholders + var i = 0 + for j := 0; j < len(rows); j++ { + var ret = make([]string, len(columnNames)) + for k := 0; k < len(columnNames); k++ { + ret[k] = fmt.Sprintf("@p%d", i+1) + i++ + } + var parametersPlaceholder = strings.Join(ret, ",") + valuesReference = append(valuesReference, fmt.Sprintf("(%s)", parametersPlaceholder)) + } } else { // Other SQL databases use ? for parameter placeholders for j := 0; j < len(rows); j++ { diff --git a/db/sql/insert_test.go b/db/sql/insert_test.go index 86600f4..0327942 100644 --- a/db/sql/insert_test.go +++ b/db/sql/insert_test.go @@ -1,6 +1,8 @@ package sql -import "github.com/acronis/perfkit/db" +import ( + "github.com/acronis/perfkit/db" +) func (suite *TestingSuite) TestInsert() { d, s, c := suite.makeTestSession() diff --git a/db/sql/query_patch_test.go b/db/sql/query_patch_test.go index fd6c953..3e31d07 100644 --- a/db/sql/query_patch_test.go +++ b/db/sql/query_patch_test.go @@ -8,7 +8,7 @@ import ( func TestDefaultCreateQueryPatchFuncWithMySQL(t *testing.T) { var table = "test_table" - var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$ascii})" + var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$varchar})" var dia = &sqlDialect{dia: &mysqlDialect{ sqlEngine: "xpand-allnodes", @@ -22,7 +22,7 @@ func TestDefaultCreateQueryPatchFuncWithMySQL(t *testing.T) { return } - var expected = "CREATE TABLE test_table (id BIGINT AUTO_INCREMENT PRIMARY KEY, name character set ascii)" + var expected = "CREATE TABLE test_table (id BIGINT AUTO_INCREMENT PRIMARY KEY, name VARCHAR)" if result != expected { t.Errorf("DefaultCreateQueryPatchFunc() got = %v, want %v", result, expected) } @@ -30,7 +30,7 @@ func TestDefaultCreateQueryPatchFuncWithMySQL(t *testing.T) { func TestDefaultCreateQueryPatchFuncWithSQLite(t *testing.T) { var table = "test_table" - var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$ascii})" + var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$varchar})" var dia = &sqlDialect{dia: &sqliteDialect{}} @@ -42,7 +42,7 @@ func TestDefaultCreateQueryPatchFuncWithSQLite(t *testing.T) { return } - var expected = "CREATE TABLE test_table (id INTEGER PRIMARY KEY AUTOINCREMENT, name )" + var expected = "CREATE TABLE test_table (id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR)" if result != expected { t.Errorf("DefaultCreateQueryPatchFunc() got = %v, want %v", result, expected) } @@ -50,7 +50,7 @@ func TestDefaultCreateQueryPatchFuncWithSQLite(t *testing.T) { func TestDefaultCreateQueryPatchFuncWithPostgres(t *testing.T) { var table = "test_table" - var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$ascii})" + var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$varchar})" var dia = &sqlDialect{dia: &pgDialect{}} @@ -62,7 +62,7 @@ func TestDefaultCreateQueryPatchFuncWithPostgres(t *testing.T) { return } - var expected = "CREATE TABLE test_table (id BIGSERIAL PRIMARY KEY, name )" + var expected = "CREATE TABLE test_table (id BIGSERIAL PRIMARY KEY, name VARCHAR)" if result != expected { t.Errorf("DefaultCreateQueryPatchFunc() got = %v, want %v", result, expected) } @@ -70,7 +70,7 @@ func TestDefaultCreateQueryPatchFuncWithPostgres(t *testing.T) { func TestDefaultCreateQueryPatchFuncWithMSSQL(t *testing.T) { var table = "test_table" - var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$ascii})" + var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$varchar})" var dia = &sqlDialect{dia: &msDialect{}} @@ -82,7 +82,7 @@ func TestDefaultCreateQueryPatchFuncWithMSSQL(t *testing.T) { return } - var expected = "CREATE TABLE test_table (id BIGINT IDENTITY(1,1) PRIMARY KEY, name )" + var expected = "CREATE TABLE test_table (id BIGINT IDENTITY(1,1) PRIMARY KEY, name VARCHAR)" if result != expected { t.Errorf("DefaultCreateQueryPatchFunc() got = %v, want %v", result, expected) } @@ -90,7 +90,7 @@ func TestDefaultCreateQueryPatchFuncWithMSSQL(t *testing.T) { func TestDefaultCreateQueryPatchFuncWithCassandra(t *testing.T) { var table = "test_table" - var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$ascii})" + var query = "CREATE TABLE {table} (id {$bigint_autoinc_pk}, name {$varchar})" var dia = &sqlDialect{dia: &cassandraDialect{}} @@ -102,7 +102,7 @@ func TestDefaultCreateQueryPatchFuncWithCassandra(t *testing.T) { return } - var expected = "CREATE TABLE test_table (id bigint PRIMARY KEY, name )" + var expected = "CREATE TABLE test_table (id bigint primary key, name varchar)" if result != expected { t.Errorf("DefaultCreateQueryPatchFunc() got = %v, want %v", result, expected) } diff --git a/db/sql/search.go b/db/sql/search.go index f2ff63e..25a69e6 100644 --- a/db/sql/search.go +++ b/db/sql/search.go @@ -82,6 +82,7 @@ type selectBuilder struct { // sqlOrder generates the ORDER BY clause // Parameters: +// - d: SQL dialect // - fields: columns to select // - values: order specifications (e.g. "asc(field)", "desc(field)") // Returns the ORDER BY clause or error @@ -225,7 +226,14 @@ func (b selectBuilder) sqlConditions(d dialect, optimizeConditions bool, fields } } - fmts, args, err := condgen(d, optimizeConditions, fmt.Sprintf("%v.%v", b.tableName, c.Col), c.Vals) + var fieldName string + if d.name() == db.CASSANDRA { + fieldName = c.Col + } else { + fieldName = fmt.Sprintf("%v.%v", b.tableName, c.Col) + } + + fmts, args, err := condgen(d, optimizeConditions, fieldName, c.Vals) if err != nil { return "", nil, false, err } @@ -358,14 +366,15 @@ func (b selectBuilder) sql(d dialect, c *db.SelectCtrl) (string, bool, error) { } if c.Page.Limit > 0 { - if d.name() != db.MSSQL { - limit = fmt.Sprintf("LIMIT %v OFFSET %v", c.Page.Limit, c.Page.Offset) - } else { - if order == "" { - order = "ORDER BY id DESC" - } else { + switch d.name() { + case db.CASSANDRA: + limit = fmt.Sprintf("LIMIT %v", c.Page.Limit) + case db.MSSQL: + if len(order) != 0 { limit = fmt.Sprintf("OFFSET %v ROWS FETCH NEXT %v ROWS ONLY", c.Page.Offset, c.Page.Limit) } + default: + limit = fmt.Sprintf("LIMIT %v OFFSET %v", c.Page.Limit, c.Page.Offset) } } @@ -923,6 +932,9 @@ func (g *sqlGateway) Select(tableName string, sc *db.SelectCtrl) (db.Rows, error var rows *sql.Rows rows, err = g.rw.queryContext(g.ctx, query) + if err != nil { + return nil, err + } if g.explain && g.explainLogger != nil { if err = logExplainResults(g.explainLogger, g.dialect.name(), rows, query); err != nil { diff --git a/db/sql/sql_test.go b/db/sql/sql_test.go index d30b14b..a9b4f80 100644 --- a/db/sql/sql_test.go +++ b/db/sql/sql_test.go @@ -13,6 +13,7 @@ import ( "github.com/gocql/gocql" "github.com/acronis/perfkit/db" + "github.com/acronis/perfkit/logger" ) const ( @@ -20,7 +21,6 @@ const ( mariaDBConnString = "mysql://user:password@tcp(localhost:3306)/perfkit_db_ci" // example value of a secret sqlServerConnString = "sqlserver://perfkit_db_runner:MyP%40ssw0rd123@localhost:1433?database=perfkit_db_ci" // example value of a secret - postgresqlConnString = "postgresql://root:password@localhost:5432/perfkit_db_ci?sslmode=disable" // example value of a secret pgVectorConnString = "postgresql://root:password@localhost:5432/perfkit_pg_vector_db_ci?sslmode=disable" // example value of a secret clickHouseConnString = "clickhouse://username:password@localhost:9000/perfkit_db_ci" // example value of a secret cassandraConnString = "cql://admin:password@localhost:9042?keyspace=perfkit_db_ci" // example value of a secret @@ -31,7 +31,6 @@ type TestingSuite struct { ConnString string } -/* func TestDatabaseSuiteSQLite(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: sqliteConnString}) } @@ -44,16 +43,10 @@ func TestDatabaseSuiteSQLServer(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: sqlServerConnString}) } -func TestDatabaseSuitePG(t *testing.T) { - suite.Run(t, &TestingSuite{ConnString: postgresqlConnString}) -} -*/ - func TestDatabaseSuitePGVector(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: pgVectorConnString}) } -/* func TestDatabaseSuiteClickHouse(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: clickHouseConnString}) } @@ -62,14 +55,55 @@ func TestDatabaseSuiteCassandra(t *testing.T) { suite.Run(t, &TestingSuite{ConnString: cassandraConnString}) } -*/ - type testLogger struct { - t *testing.T + t *testing.T + level logger.LogLevel + lastMessage *logger.LogMessage +} + +func newTestLogger(t *testing.T, level logger.LogLevel) logger.Logger { + return &testLogger{t: t, level: level} +} + +func (l *testLogger) Log(level logger.LogLevel, message string, args ...interface{}) { + l.lastMessage = &logger.LogMessage{Level: level, Message: fmt.Sprintf(message, args...)} + l.t.Logf(message, args...) +} + +func (l *testLogger) Error(format string, args ...interface{}) { + l.Log(logger.LevelError, format, args...) +} + +func (l *testLogger) Warn(format string, args ...interface{}) { + l.Log(logger.LevelWarn, format, args...) +} + +func (l *testLogger) Info(format string, args ...interface{}) { + l.Log(logger.LevelInfo, format, args...) +} + +func (l *testLogger) Debug(format string, args ...interface{}) { + l.Log(logger.LevelDebug, format, args...) +} + +func (l *testLogger) Trace(format string, args ...interface{}) { + l.Log(logger.LevelTrace, format, args...) +} + +func (l *testLogger) GetLevel() logger.LogLevel { + return l.level +} + +func (l *testLogger) SetLevel(level logger.LogLevel) { + l.level = level +} + +func (l *testLogger) GetLastMessage() *logger.LogMessage { + return l.lastMessage } -func (l *testLogger) Log(format string, args ...interface{}) { - l.t.Logf(format, args...) +func (l *testLogger) Clone() logger.Logger { + return &testLogger{level: l.level} } func testTableDefinition(dia db.DialectName) *db.TableDefinition { @@ -103,7 +137,7 @@ func testTableDefinition(dia db.DialectName) *db.TableDefinition { } func (suite *TestingSuite) makeTestSession() (db.Database, db.Session, *db.Context) { - var logger = &testLogger{t: suite.T()} + var logger = &testLogger{t: suite.T(), level: logger.LevelDebug} dbo, err := db.Open(db.Config{ ConnString: suite.ConnString, diff --git a/db/testing/docker/mssql/docker-compose.yaml b/db/testing/docker/mssql/docker-compose.yaml index 9cbf851..211297b 100644 --- a/db/testing/docker/mssql/docker-compose.yaml +++ b/db/testing/docker/mssql/docker-compose.yaml @@ -8,16 +8,31 @@ x-logging: services: mssql: - image: mcmoe/mssqldocker:v1.0.0 + image: mcr.microsoft.com/mssql/server:2019-latest environment: - ACCEPT_EULA: Y - SA_PASSWORD: MyP@ssw0rd123 # example value of a secret compliant with MS SQL Server password policy - MSSQL_DB: perfkit_db_ci - MSSQL_USER: perfkit_db_runner - MSSQL_PASSWORD: MyP@ssw0rd123 # example value of a secret compliant with MS SQL Server password policy + ACCEPT_EULA: 'Y' + MSSQL_SA_PASSWORD: MyP@ssw0rd123 # example value of a secret compliant with MS SQL Server password policy + MSSQL_PID: Developer + MSSQL_TCP_PORT: 1433 + MSSQL_COLLATION: SQL_Latin1_General_CP1_CI_AS + MSSQL_DATA_DIR: /var/opt/mssql/data + MSSQL_LOG_DIR: /var/opt/mssql/log + MSSQL_BACKUP_DIR: /var/opt/mssql/backup + MSSQL_AGENT_ENABLED: true logging: *logging ports: - "1433:1433" + volumes: + - mssql:/var/opt/mssql + + init: + image: mcr.microsoft.com/mssql-tools + depends_on: + - mssql + restart: "no" + entrypoint: ["/bin/bash", "-c", "chmod +x /init.sh && /init.sh"] + volumes: + - ./init.sh:/init.sh volumes: mssql: diff --git a/db/testing/docker/mssql/init.sh b/db/testing/docker/mssql/init.sh new file mode 100755 index 0000000..0e33c01 --- /dev/null +++ b/db/testing/docker/mssql/init.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Wait for SQL Server to be ready (max 30 attempts) +for i in {1..30}; do + if /opt/mssql-tools/bin/sqlcmd -S mssql -U sa -P MyP@ssw0rd123 -Q "SELECT 1" -b -o /dev/null; then + echo "SQL Server is ready!" + break + fi + echo "Waiting for SQL Server... (attempt $i/30)"; + sleep 5; +done + +# Create database and user +/opt/mssql-tools/bin/sqlcmd -S mssql -U sa -P MyP@ssw0rd123 -Q " + IF NOT EXISTS (SELECT * FROM sys.databases WHERE name = 'perfkit_db_ci') + BEGIN + CREATE DATABASE perfkit_db_ci; + END + GO + USE perfkit_db_ci; + GO + IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = 'perfkit_db_runner') + BEGIN + CREATE LOGIN perfkit_db_runner WITH PASSWORD = 'MyP@ssw0rd123'; + END + GO + IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'perfkit_db_runner') + BEGIN + CREATE USER perfkit_db_runner FOR LOGIN perfkit_db_runner; + END + GO + ALTER ROLE db_owner ADD MEMBER perfkit_db_runner; + GO +" +echo "Database and user created successfully" \ No newline at end of file diff --git a/db/testing/docker/opensearch/docker-compose.yaml b/db/testing/docker/opensearch/docker-compose.yaml index f3245bc..0833288 100644 --- a/db/testing/docker/opensearch/docker-compose.yaml +++ b/db/testing/docker/opensearch/docker-compose.yaml @@ -8,21 +8,18 @@ x-logging: services: opensearch: - image: opensearchproject/opensearch:2.17.0 + image: opensearchproject/opensearch:2.18.0 environment: - node.name=os-test - discovery.type=single-node # uses only for test clusters - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM - - OPENSEARCH_INITIAL_ADMIN_PASSWORD="ScoRpi0n$" # example value of a secret compliant with OpenSearch password policy - ulimits: - memlock: - soft: -1 - hard: -1 + - OPENSEARCH_INITIAL_ADMIN_PASSWORD=bgnYFGR2RhN3SCX # example value of a secret compliant with OpenSearch password policy + - plugins.security.ssl.http.enabled=false logging: *logging volumes: - opensearch-data:/usr/share/opensearch/data ports: - - 9200:9200 + - 9201:9200 - 9600:9600 volumes: