From 6d2d444fbb66b09da3d1025f07840eb8bd7212e3 Mon Sep 17 00:00:00 2001 From: Aditya Maru Date: Fri, 20 Nov 2020 12:37:20 -0500 Subject: [PATCH] cli: remove cockroach dump from CRDB Previously we maintained both BACKUP and dump as BACKUP was enterprise-only but in 20.2+ basic backup that can do at least as much as dump can is free so this is no longer a reason to keep dump. More detailed explanation at #54040. Fixes: #56405 --- pkg/cli/BUILD.bazel | 17 - pkg/cli/cli.go | 1 - pkg/cli/cli_test.go | 15 - pkg/cli/dump.go | 1164 --------------------------------- pkg/cli/dump_test.go | 1053 ----------------------------- pkg/cli/flags.go | 6 - pkg/cli/flags_test.go | 4 +- pkg/cli/sql_util.go | 18 - pkg/cmd/roachtest/BUILD.bazel | 1 - pkg/cmd/roachtest/cluster.go | 26 - pkg/cmd/roachtest/dump.go | 90 --- pkg/cmd/roachtest/registry.go | 1 - 12 files changed, 2 insertions(+), 2394 deletions(-) delete mode 100644 pkg/cli/dump.go delete mode 100644 pkg/cli/dump_test.go delete mode 100644 pkg/cmd/roachtest/dump.go diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel index 4e87ff1560ea..5c956cf30934 100644 --- a/pkg/cli/BUILD.bazel +++ b/pkg/cli/BUILD.bazel @@ -18,7 +18,6 @@ go_library( "demo_cluster.go", "demo_telemetry.go", "doctor.go", - "dump.go", "error.go", "examples.go", "flags.go", @@ -115,7 +114,6 @@ go_library( "//pkg/sql/catalog/catconstants", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", - "//pkg/sql/catalog/typedesc", "//pkg/sql/doctor", "//pkg/sql/execinfrapb", "//pkg/sql/lex", @@ -127,9 +125,7 @@ go_library( "//pkg/sql/row", "//pkg/sql/sem/builtins", "//pkg/sql/sem/tree", - "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", - "//pkg/sql/types", "//pkg/sqlmigrations", "//pkg/storage", "//pkg/storage/cloud", @@ -138,7 +134,6 @@ go_library( "//pkg/ts/tspb", "//pkg/util", "//pkg/util/contextutil", - "//pkg/util/ctxgroup", "//pkg/util/encoding", "//pkg/util/encoding/csv", "//pkg/util/envutil", @@ -161,8 +156,6 @@ go_library( "//pkg/util/stop", "//pkg/util/syncutil", "//pkg/util/sysutil", - "//pkg/util/timeofday", - "//pkg/util/timetz", "//pkg/util/timeutil", "//pkg/util/tracing", "//pkg/util/uuid", @@ -194,7 +187,6 @@ go_library( "//vendor/github.com/kr/pretty", "//vendor/github.com/lib/pq", "//vendor/github.com/lib/pq/auth/kerberos", - "//vendor/github.com/lib/pq/oid", "//vendor/github.com/mattn/go-isatty", "//vendor/github.com/olekukonko/tablewriter", "//vendor/github.com/spf13/cobra", @@ -265,7 +257,6 @@ go_test( "demo_locality_test.go", "demo_test.go", "doctor_test.go", - "dump_test.go", "error_test.go", "flags_test.go", "haproxy_test.go", @@ -318,22 +309,14 @@ go_test( "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util", - "//pkg/util/bitarray", - "//pkg/util/duration", - "//pkg/util/ipaddr", - "//pkg/util/json", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/log/logflags", "//pkg/util/log/severity", "//pkg/util/protoutil", - "//pkg/util/randutil", "//pkg/util/stop", "//pkg/util/timeutil", - "//pkg/util/timeutil/pgdate", - "//pkg/util/uuid", "//pkg/workload/examples", - "//vendor/github.com/cockroachdb/apd/v2:apd", "//vendor/github.com/cockroachdb/datadriven", "//vendor/github.com/cockroachdb/errors", "//vendor/github.com/cockroachdb/pebble", diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go index 90103f17bddc..efd4bf33a122 100644 --- a/pkg/cli/cli.go +++ b/pkg/cli/cli.go @@ -198,7 +198,6 @@ func init() { stmtDiagCmd, authCmd, nodeCmd, - dumpCmd, nodeLocalCmd, userFileCmd, importCmd, diff --git a/pkg/cli/cli_test.go b/pkg/cli/cli_test.go index 586d18255a76..60416b15d906 100644 --- a/pkg/cli/cli_test.go +++ b/pkg/cli/cli_test.go @@ -2042,21 +2042,6 @@ func Example_sqlfmt() { // SELECT (1 + 2) + 3 } -func Example_dump_no_visible_columns() { - c := newCLITest(cliTestParams{}) - defer c.cleanup() - - c.RunWithArgs([]string{"sql", "-e", "create table t(x int); set sql_safe_updates=false; alter table t drop x"}) - c.RunWithArgs([]string{"dump", "defaultdb"}) - - // Output: - // sql -e create table t(x int); set sql_safe_updates=false; alter table t drop x - // ALTER TABLE - // dump defaultdb - // CREATE TABLE public.t (FAMILY "primary" (rowid) - // ); -} - // Example_read_from_file tests the -f parameter. // The input file contains a mix of client-side and // server-side commands to ensure that both are supported with -f. diff --git a/pkg/cli/dump.go b/pkg/cli/dump.go deleted file mode 100644 index c20a88536e6b..000000000000 --- a/pkg/cli/dump.go +++ /dev/null @@ -1,1164 +0,0 @@ -// Copyright 2016 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - "database/sql/driver" - "fmt" - "io" - "os" - "sort" - "strings" - "time" - - "github.com/cockroachdb/cockroach/pkg/cli/cliflags" - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" - "github.com/cockroachdb/cockroach/pkg/sql/lex" - "github.com/cockroachdb/cockroach/pkg/sql/parser" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" - "github.com/cockroachdb/cockroach/pkg/util/timeofday" - "github.com/cockroachdb/cockroach/pkg/util/timetz" - "github.com/cockroachdb/cockroach/pkg/util/version" - "github.com/cockroachdb/errors" - "github.com/lib/pq" - "github.com/lib/pq/oid" - "github.com/spf13/cobra" -) - -// dumpCmd dumps SQL tables. -var dumpCmd = &cobra.Command{ - Use: "dump [options] [ [
...]]", - Short: "dump sql tables\n", - Long: ` -Dump SQL tables of a cockroach database. If the table name -is omitted, dump all tables in the database. -`, - RunE: MaybeDecorateGRPCError(runDump), - Deprecated: "cockroach dump will be removed in a subsequent release.\n" + - "For details, see: https://github.com/cockroachdb/cockroach/issues/54040", -} - -// We accept versions that are strictly newer than v2.1.0-alpha.20180416 -// (hence the "-0" at the end). -var verDump = version.MustParse("v2.1.0-alpha.20180416-0") - -// databasesNamesExtractor extracts list of available databases to dump -func databasesNamesExtractor(conn *sqlConn) ([]string, error) { - var dbNames []string - - maxReservedDescID, err := driver.Int32.ConvertValue(keys.MaxReservedDescID) - if err != nil { - return nil, err - } - - rows, err := conn.Query(`SELECT name FROM system.namespace WHERE id > $1 AND "parentID" = 0`, []driver.Value{maxReservedDescID}) - if err != nil { - return nil, err - } - - vals := make([]driver.Value, 1) - for { - if err := rows.Next(vals); err == io.EOF { - break - } - - if name, ok := vals[0].(string); ok { - dbNames = append(dbNames, name) - } else { - return nil, fmt.Errorf("unexpected value: %T", name) - } - } - - // sort to get deterministic output of ordered database names - sort.Strings(dbNames) - - return dbNames, nil -} - -// runDumps performs a dump of a table or database. -// -// The approach here and its current flaws are summarized -// in https://github.com/cockroachdb/cockroach/issues/28948. -func runDump(cmd *cobra.Command, args []string) error { - conn, err := makeSQLClient("cockroach dump", useDefaultDb) - if err != nil { - return err - } - defer conn.Close() - - if err := conn.requireServerVersion(verDump); err != nil { - return err - } - - var dbNames []string - if dumpCtx.dumpAll && len(args) != 0 { - return fmt.Errorf("cannot specify --%s and a specific database at the same time", cliflags.DumpAll.Name) - } - - if len(args) != 0 { - dbNames = append(dbNames, args[0]) - } else if dumpCtx.dumpAll { - dbNames, err = databasesNamesExtractor(conn) - if err != nil { - return err - } - } - - var tableNames []string - if len(args) > 1 { - tableNames = args[1:] - } - - // Get the cluster timestamp at which to dump at. - clusterTS, err := getAsOf(conn, dumpCtx.asOf) - if err != nil { - return err - } - - var fullMds []basicMetadata - w := os.Stdout - - for _, dbName := range dbNames { - // Collect all user defined types present in this database. - typContext, err := collectUserDefinedTypes(conn, dbName, clusterTS) - if err != nil { - return err - } - // Following pg_dump, we only dump types when dumping the database - // is requested, not when specific tables are requested to be dumped. - shouldDumpTypes := len(tableNames) == 0 - - // Collect any user defined schemas in the database. - schemas, err := collectUserDefinedSchemas(conn, dbName, clusterTS) - if err != nil { - return err - } - // As with types, we only dump schema create statements when dumping - // the full database, not when specific tables are requested. - shouldDumpSchemas := len(tableNames) == 0 - - mds, err := getDumpMetadata(conn, dbName, tableNames, clusterTS) - if err != nil { - return err - } - - if len(mds) == 0 { - continue - } - - byID := make(map[int64]basicMetadata) - for _, md := range mds { - byID[md.ID] = md - } - - // First sort by name to guarantee stable output. - sort.Slice(mds, func(i, j int) bool { - return mds[i].name.String() < mds[j].name.String() - }) - - // Collect transitive dependencies in topological order into collected. - // The topological order is essential here since it captures dependencies - // for views and sequences creation, hence simple alphabetical sort won't - // be enough. - var collected []int64 - seen := make(map[int64]bool) - for _, md := range mds { - collect(md.ID, byID, seen, &collected) - } - // collectOrder maps a table ID to its collection index. This is needed - // instead of just using range over collected because collected may contain - // table IDs not present in the dump spec. It is simpler to sort mds correctly - // to skip over these referenced-but-not-dumped tables. - collectOrder := make(map[int64]int) - for i, id := range collected { - collectOrder[id] = i - } - - // Second sort dumped tables by dependency order. - sort.SliceStable(mds, func(i, j int) bool { - return collectOrder[mds[i].ID] < collectOrder[mds[j].ID] - }) - - if dumpCtx.dumpAll && dumpCtx.dumpMode != dumpDataOnly { - if _, err := fmt.Fprintf(w, "\nCREATE DATABASE IF NOT EXISTS %s;\nUSE %s;\n\n", dbName, dbName); err != nil { - return err - } - } - - // Dump schema create statements, if any. If connecting to a cockroach version - // before 20.2 the list of schemas will be empty, so nothing will be emitted. - if shouldDumpSchemas && dumpCtx.dumpMode != dumpDataOnly { - for _, schema := range schemas { - if _, err := fmt.Fprintf(w, "CREATE SCHEMA %s;\n\n", tree.Name(schema)); err != nil { - return err - } - } - } - - // Dump any type creation statements. - if shouldDumpTypes && dumpCtx.dumpMode != dumpDataOnly { - for _, stmt := range typContext.createStatements { - if _, err := fmt.Fprintf(w, "%s;\n\n", stmt); err != nil { - return err - } - } - } - - if dumpCtx.dumpMode != dumpDataOnly { - for i, md := range mds { - if i > 0 { - fmt.Fprintln(w) - } - if err := dumpCreateTable(w, md); err != nil { - return err - } - } - } - if dumpCtx.dumpMode != dumpSchemaOnly { - for _, md := range mds { - switch md.kind { - case "table": - if err := dumpTableData(w, conn, typContext, md); err != nil { - return err - } - case "sequence": - if err := dumpSequenceData(w, conn, md); err != nil { - return err - } - case "view": - continue - default: - panic("unknown descriptor type: " + md.kind) - } - } - } - fullMds = append(fullMds, mds...) - } - - // Put FK ALTERs at the end. - if dumpCtx.dumpMode != dumpDataOnly { - hasRefs := false - for _, md := range fullMds { - for _, alter := range md.alter { - if !hasRefs { - hasRefs = true - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - } - fmt.Fprintf(w, "%s;\n", alter) - } - } - if hasRefs { - const alterValidateMessage = `-- Validate foreign key constraints. These can fail if there was unvalidated data during the dump.` - if _, err := w.Write([]byte("\n" + alterValidateMessage + "\n")); err != nil { - return err - } - for _, md := range fullMds { - for _, validate := range md.validate { - fmt.Fprintf(w, "%s;\n", validate) - } - } - } - } - return nil -} - -func collect(tid int64, byID map[int64]basicMetadata, seen map[int64]bool, collected *[]int64) { - // has this table already been collected previously? - if seen[tid] { - return - } - // no: mark it as seen. - seen[tid] = true - for _, dep := range byID[tid].dependsOn { - // depth-first collection of dependencies - collect(dep, byID, seen, collected) - } - // Only add it after its dependencies. - *collected = append(*collected, tid) -} - -type basicMetadata struct { - ID int64 - name *tree.TableName - createStmt string - dependsOn []int64 - kind string // "string", "table", or "view" - alter []string - validate []string - ts string -} - -// tableMetadata describes one table to dump. -type tableMetadata struct { - basicMetadata - - columnNames string - columnTypes map[string]tree.ResolvableTypeReference -} - -// dumpTypeContext acts as a collection of user defined types to resolve -// references to user defined types in the dump process. -type dumpTypeContext struct { - typMap map[types.UserDefinedTypeName]*types.T - createStatements []string -} - -// ResolveType implements the tree.TypeReferenceResolver interface. -func (d *dumpTypeContext) ResolveType( - _ context.Context, name *tree.UnresolvedObjectName, -) (*types.T, error) { - key := types.UserDefinedTypeName{ - Name: name.Object(), - Schema: name.Schema(), - } - typ, ok := d.typMap[key] - if !ok { - return nil, errors.Newf("type %s not found", name.String()) - } - return typ, nil -} - -// ResolveTypeByOID implements the tree.TypeReferenceResolver interface. -func (d *dumpTypeContext) ResolveTypeByOID(context.Context, oid.Oid) (*types.T, error) { - return nil, errors.AssertionFailedf("cannot resolve types in dump by OID") -} - -func collectUserDefinedSchemas(conn *sqlConn, dbName string, ts string) ([]string, error) { - query := ` -SELECT - schema_name -FROM - %s.information_schema.schemata -AS OF SYSTEM TIME %s -WHERE - crdb_is_user_defined = 'YES' AND - catalog_name = $1 -` - rows, err := conn.Query(fmt.Sprintf(query, tree.NameString(dbName), lex.EscapeSQLString(ts)), []driver.Value{dbName}) - if err != nil { - // On versions before 20.2, the cluster won't have the crdb_is_user_defined - // column. If we can't find it, then continue with an empty set of user - // defined schemas. - if pqErr := (*pq.Error)(nil); errors.As(err, &pqErr) { - if pgcode.MakeCode(string(pqErr.Code)) == pgcode.UndefinedColumn { - return nil, nil - } - } - return nil, err - } - vals := make([]driver.Value, 1) - var schemas []string - for { - if err := rows.Next(vals); err == io.EOF { - break - } else if err != nil { - return nil, err - } - // Get the schema name from the row. - schemaI := vals[0] - schema, ok := schemaI.(string) - if !ok { - return nil, errors.AssertionFailedf("unexpected value %T", schemaI) - } - schemas = append(schemas, schema) - } - return schemas, nil -} - -// collectUserDefinedTypes constructs a dumpTypeContext consisting of all user -// defined types in the requested database. -func collectUserDefinedTypes(conn *sqlConn, dbName string, ts string) (*dumpTypeContext, error) { - query := ` -SELECT - descriptor_id, schema_name, descriptor_name, create_statement, enum_members -FROM - "".crdb_internal.create_type_statements -AS OF SYSTEM TIME %s -WHERE - database_name = $1 -` - rows, err := conn.Query(fmt.Sprintf(query, lex.EscapeSQLString(ts)), []driver.Value{dbName}) - if err != nil { - // On versions before 20.2, the cluster won't have the - // crdb_internal.create_type_statements table. If we can't find it, - // continue with an empty type context. - if pqErr := (*pq.Error)(nil); errors.As(err, &pqErr) { - if pgcode.MakeCode(string(pqErr.Code)) == pgcode.UndefinedTable { - return &dumpTypeContext{}, nil - } - } - return nil, err - } - vals := make([]driver.Value, 5) - var createStatements []string - typContext := &dumpTypeContext{ - typMap: make(map[types.UserDefinedTypeName]*types.T), - } - for { - if err := rows.Next(vals); err == io.EOF { - break - } else if err != nil { - return nil, err - } - - // Pull out the needed values from the query. - idI := vals[0] - id, ok := idI.(int64) - if !ok { - return nil, errors.AssertionFailedf("unexpected value %T", idI) - } - scNameI := vals[1] - scName, ok := scNameI.(string) - if !ok { - return nil, errors.AssertionFailedf("unexpected value %T", scName) - } - nameI := vals[2] - name, ok := nameI.(string) - if !ok { - return nil, errors.AssertionFailedf("unexpected value %T", name) - } - createStatementI := vals[3] - createStatement, ok := createStatementI.(string) - if !ok { - return nil, errors.AssertionFailedf("unexpected value %T", createStatementI) - } - // Remember all of the create statements. - createStatements = append(createStatements, createStatement) - enumMembersI := vals[4] - enumMembersS, ok := enumMembersI.([]byte) - if !ok { - return nil, errors.AssertionFailedf("unexpected value %T", enumMembersI) - } - var enumMembers []string - if enumMembersS != nil { - // The driver sends back arrays as bytes, so we have to parse the array - // if we want to access its elements. - arr, _, err := tree.ParseDArrayFromString( - tree.NewTestingEvalContext(serverCfg.Settings), string(enumMembersS), types.String) - if err != nil { - return nil, err - } - for _, d := range arr.Array { - enumMembers = append(enumMembers, string(tree.MustBeDString(d))) - } - } - - // Based on the retrieved information, construct an entry of the correct - // type kind in the typing context. - switch { - case len(enumMembers) != 0: - typ := types.MakeEnum(typedesc.TypeIDToOID(descpb.ID(id)), 0 /* arrayTypeOID */) - typ.TypeMeta = types.UserDefinedTypeMetadata{ - Name: &types.UserDefinedTypeName{ - Name: name, - Schema: scName, - }, - EnumData: &types.EnumMetadata{ - LogicalRepresentations: enumMembers, - // Make an array of empty physical representations for the enum. - // We won't need the physical representations while dumping, but enum - // internals expect that there are as many physical representations - // as there are logical representations. - PhysicalRepresentations: make([][]byte, len(enumMembers)), - IsMemberReadOnly: make([]bool, len(enumMembers)), - }, - } - typContext.typMap[*typ.TypeMeta.Name] = typ - default: - // If we don't know about this type kind, then return an error. - return nil, errors.Newf("%q is not a supported type kind", name) - } - } - typContext.createStatements = createStatements - return typContext, nil -} - -// getAsOf converts the input AS OF argument into a usable cluster timestamp, -// or returns a default if the argument was not specified. -func getAsOf(conn *sqlConn, asOf string) (string, error) { - var clusterTS string - if asOf == "" { - vals, err := conn.QueryRow("SELECT cluster_logical_timestamp()", nil) - if err != nil { - return "", err - } - clusterTS = string(vals[0].([]byte)) - } else { - // Validate the timestamp. This prevents SQL injection. - if _, _, err := tree.ParseDTimestamp(nil, asOf, time.Nanosecond); err != nil { - return "", err - } - clusterTS = asOf - } - return clusterTS, nil -} - -type dumpTable struct { - schema string - table string -} - -// getDumpMetadata retrieves the table information for the specified table(s). -// It also retrieves the cluster timestamp at which the metadata was -// retrieved. -func getDumpMetadata( - conn *sqlConn, dbName string, tableNames []string, clusterTS string, -) (mds []basicMetadata, err error) { - var dumpTables []dumpTable - if len(tableNames) == 0 { - var err error - dumpTables, err = getTableNames(conn, dbName, clusterTS) - if err != nil { - return nil, err - } - } else { - // Try and resolve the input table names. - for _, table := range tableNames { - // Attempt to parse the input table name. Note that we use - // ParseTableNameWithQualifiedNames here because input table names to - // dump are not necessarily going to have quoted identifiers. - tableName, err := parser.ParseTableNameWithQualifiedNames(table) - if err != nil { - return nil, err - } - dt := dumpTable{table: tableName.Object()} - switch tableName.NumParts { - case 1: - // If there is no qualification, then the table is assumed to be in - // the public schema. - dt.schema = tree.PublicSchema - case 2: - dt.schema = tableName.Schema() - default: - return nil, errors.Newf("cannot qualify name with database: %s", tableName) - } - dumpTables = append(dumpTables, dt) - } - } - - mds = make([]basicMetadata, len(dumpTables)) - for i, dumpTable := range dumpTables { - basicMD, err := getBasicMetadata(conn, dbName, dumpTable, clusterTS) - if err != nil { - return nil, err - } - mds[i] = basicMD - } - - return mds, nil -} - -// getTableNames retrieves all tables names in the given database. Following -// pg_dump, we ignore all descriptors which are part of the temp schema. This -// includes tables, views and sequences. -func getTableNames(conn *sqlConn, dbName string, ts string) (tableNames []dumpTable, err error) { - rows, err := conn.Query(fmt.Sprintf(` - SELECT schema_name, descriptor_name - FROM "".crdb_internal.create_statements - AS OF SYSTEM TIME %s - WHERE database_name = $1 AND schema_name NOT LIKE $2 - `, lex.EscapeSQLString(ts)), []driver.Value{dbName, sessiondata.PgTempSchemaName + "%"}) - if err != nil { - return nil, err - } - - vals := make([]driver.Value, 2) - for { - if err := rows.Next(vals); err == io.EOF { - break - } else if err != nil { - return nil, err - } - schemaI := vals[0] - schema, ok := schemaI.(string) - if !ok { - return nil, fmt.Errorf("unexpected value: %T", schemaI) - } - nameI := vals[1] - name, ok := nameI.(string) - if !ok { - return nil, fmt.Errorf("unexpected value: %T", nameI) - } - tableNames = append(tableNames, dumpTable{table: name, schema: schema}) - } - - if err := rows.Close(); err != nil { - return nil, err - } - - return tableNames, nil -} - -func getBasicMetadata( - conn *sqlConn, dbName string, table dumpTable, ts string, -) (basicMetadata, error) { - tn := tree.MakeTableNameWithSchema(tree.Name(dbName), tree.Name(table.schema), tree.Name(table.table)) - // Fetch table ID. - dbNameEscaped := tree.NameString(dbName) - vals, err := conn.QueryRow(fmt.Sprintf(` - SELECT - schema_name, - descriptor_id, - create_nofks, - descriptor_type, - alter_statements, - validate_statements - FROM %s.crdb_internal.create_statements - AS OF SYSTEM TIME %s - WHERE database_name = $1 - AND schema_name = $2 - AND descriptor_name = $3 - `, dbNameEscaped, lex.EscapeSQLString(ts)), []driver.Value{dbName, table.schema, table.table}) - if err != nil { - if err == io.EOF { - return basicMetadata{}, errors.Wrap( - errors.Errorf("relation %s does not exist", tree.ErrString(&tn)), - "getBasicMetadata", - ) - } - return basicMetadata{}, errors.Wrap(err, "getBasicMetadata") - } - - // Check the schema to disallow dumping temp tables, views and sequences. This - // will only be triggered if a user explicitly specifies a temp construct as - // one of the arguments to the `cockroach dump` command. When no table names - // are specified on the CLI, we ignore temp tables at the stage where we read - // all table names in getTableNames. - schemaNameI := vals[0] - schemaName, ok := schemaNameI.(string) - if !ok { - return basicMetadata{}, fmt.Errorf("unexpected value: %T", schemaNameI) - } - if strings.HasPrefix(schemaName, sessiondata.PgTempSchemaName) { - return basicMetadata{}, errors.Newf("cannot dump temp table %s", tn.String()) - } - - idI := vals[1] - id, ok := idI.(int64) - if !ok { - return basicMetadata{}, fmt.Errorf("unexpected value: %T", idI) - } - createStatementI := vals[2] - createStatement, ok := createStatementI.(string) - if !ok { - return basicMetadata{}, fmt.Errorf("unexpected value: %T", createStatementI) - } - kindI := vals[3] - kind, ok := kindI.(string) - if !ok { - return basicMetadata{}, fmt.Errorf("unexpected value: %T", kindI) - } - alterStatements, err := extractArray(vals[4]) - if err != nil { - return basicMetadata{}, err - } - validateStatements, err := extractArray(vals[5]) - if err != nil { - return basicMetadata{}, err - } - - // Get dependencies. - rows, err := conn.Query(fmt.Sprintf(` - SELECT dependson_id - FROM %s.crdb_internal.backward_dependencies - AS OF SYSTEM TIME %s - WHERE descriptor_id = $1 - `, dbNameEscaped, lex.EscapeSQLString(ts)), []driver.Value{id}) - if err != nil { - return basicMetadata{}, err - } - vals = make([]driver.Value, 1) - - var refs []int64 - for { - if err := rows.Next(vals); err == io.EOF { - break - } else if err != nil { - return basicMetadata{}, err - } - id := vals[0].(int64) - refs = append(refs, id) - } - if err := rows.Close(); err != nil { - return basicMetadata{}, err - } - - md := basicMetadata{ - ID: id, - name: &tn, - createStmt: createStatement, - dependsOn: refs, - kind: kind, - alter: alterStatements, - validate: validateStatements, - ts: ts, - } - - return md, nil -} - -func extractArray(val interface{}) ([]string, error) { - b, ok := val.([]byte) - if !ok { - return nil, fmt.Errorf("unexpected value: %T", b) - } - evalCtx := tree.NewTestingEvalContext(serverCfg.Settings) - arr, _, err := tree.ParseDArrayFromString(evalCtx, string(b), types.String) - if err != nil { - return nil, err - } - res := make([]string, len(arr.Array)) - for i, v := range arr.Array { - res[i] = string(*v.(*tree.DString)) - } - return res, nil -} - -func makeMetadataQuery(md basicMetadata, columnName string) string { - // This query is parameterized by the column name because of - // 2.0/2.1beta/2.1 trans-version compatibility requirements. See - // below for details. - return fmt.Sprintf(` - SELECT COLUMN_NAME, %s - FROM %s.information_schema.columns - AS OF SYSTEM TIME %s - WHERE TABLE_CATALOG = $1 - AND TABLE_SCHEMA = $2 - AND TABLE_NAME = $3 - AND GENERATION_EXPRESSION = '' - `, columnName, &md.name.CatalogName, lex.EscapeSQLString(md.ts)) -} - -func fetchColumnsNamesAndTypes(conn *sqlConn, md basicMetadata, noHidden bool) (*sqlRows, error) { - query := makeMetadataQuery(md, "CRDB_SQL_TYPE") - if noHidden { - query = query + ` AND IS_HIDDEN = 'NO'` - } - rows, err := conn.Query(query, - []driver.Value{md.name.Catalog(), md.name.Schema(), md.name.Table()}) - if err != nil { - // IS_HIDDEN was introduced in the first 2.1 beta. CRDB_SQL_TYPE - // some time after that. To ensure `cockroach dump` works across - // versions we must try the previous forms if the first form - // fails. - // - // TODO(knz): Remove this fallback logic post-2.2. - if strings.Contains(err.Error(), "column \"crdb_sql_type\" does not exist") { - // Pre-2.1 CRDB_SQL_HIDDEN did not exist in - // information_schema.columns. When it does not exist, - // information_schema.columns.data_type contains a usable SQL - // type name instead. Use that. - query := makeMetadataQuery(md, "DATA_TYPE") - if noHidden { - query = query + ` AND IS_HIDDEN = 'NO'` - } - rows, err = conn.Query(query, - []driver.Value{md.name.Catalog(), md.name.Schema(), md.name.Table()}) - } - if strings.Contains(err.Error(), "column \"is_hidden\" does not exist") { - // Pre-2.1 IS_HIDDEN did not exist in information_schema.columns. - // When it does not exist, information_schema.columns only returns - // non-hidden columns so we can still use that. - rows, err = conn.Query(makeMetadataQuery(md, "DATA_TYPE"), - []driver.Value{md.name.Catalog(), md.name.Schema(), md.name.Table()}) - } - if err != nil { - return nil, err - } - } - return rows, err -} - -func constructTableMetadata(rows *sqlRows, md basicMetadata) (tableMetadata, error) { - vals := make([]driver.Value, 2) - coltypes := make(map[string]tree.ResolvableTypeReference) - colnames := tree.NewFmtCtx(tree.FmtSimple) - defer colnames.Close() - for { - if err := rows.Next(vals); err == io.EOF { - break - } else if err != nil { - return tableMetadata{}, err - } - nameI, typI := vals[0], vals[1] - name, ok := nameI.(string) - if !ok { - return tableMetadata{}, fmt.Errorf("unexpected value: %T", nameI) - } - typ, ok := typI.(string) - if !ok { - return tableMetadata{}, fmt.Errorf("unexpected value: %T", typI) - } - - // Transform the type name to an internal coltype. - sql := fmt.Sprintf("CREATE TABLE woo (x %s)", typ) - stmt, err := parser.ParseOne(sql) - if err != nil { - return tableMetadata{}, fmt.Errorf("type %s is not a valid CockroachDB type", typ) - } - coltypes[name] = stmt.AST.(*tree.CreateTable).Defs[0].(*tree.ColumnTableDef).Type - if colnames.Len() > 0 { - colnames.WriteString(", ") - } - colnames.FormatName(name) - } - if err := rows.Close(); err != nil { - return tableMetadata{}, err - } - - return tableMetadata{ - basicMetadata: md, - - columnNames: colnames.String(), - columnTypes: coltypes, - }, nil -} - -func getMetadataForTable(conn *sqlConn, md basicMetadata) (tableMetadata, error) { - // Fetch column types. - // - // TODO(knz): this approach is flawed, see #28948. - - rows, err := fetchColumnsNamesAndTypes(conn, md, true) - if err != nil { - return tableMetadata{}, err - } - - metadata, err := constructTableMetadata(rows, md) - if err != nil { - return tableMetadata{}, err - } - - if len(metadata.columnNames) == 0 { - rows, err := fetchColumnsNamesAndTypes(conn, md, false) - if err != nil { - return tableMetadata{}, err - } - - return constructTableMetadata(rows, md) - } - return metadata, err -} - -// dumpCreateTable dumps the CREATE statement of the specified table to w. -func dumpCreateTable(w io.Writer, md basicMetadata) error { - if _, err := w.Write([]byte(md.createStmt)); err != nil { - return err - } - if _, err := w.Write([]byte(";\n")); err != nil { - return err - } - return nil -} - -const ( - // insertRows is the number of rows per INSERT statement. - insertRows = 100 -) - -func dumpSequenceData(w io.Writer, conn *sqlConn, bmd basicMetadata) error { - // Get sequence value. - vals, err := conn.QueryRow(fmt.Sprintf( - "SELECT last_value FROM %s AS OF SYSTEM TIME %s", - bmd.name, lex.EscapeSQLString(bmd.ts), - ), nil) - if err != nil { - return err - } - seqVal := vals[0].(int64) - - // Get sequence increment. - // TODO(knz,vilterp): This could use a shortcut via crdb_internal. - vals2, err := conn.QueryRow(fmt.Sprintf( - `SELECT inc - FROM (SELECT s.seqincrement AS inc - FROM %[1]s.pg_catalog.pg_namespace n, %[1]s.pg_catalog.pg_class c, %[1]s.pg_catalog.pg_sequence s - WHERE n.nspname = %[2]s - AND n.oid = c.relnamespace - AND c.relname = %[3]s - AND c.oid = s.seqrelid) - AS OF SYSTEM TIME %[4]s`, - &bmd.name.CatalogName, - lex.EscapeSQLString(bmd.name.Schema()), - lex.EscapeSQLString(bmd.name.Table()), - lex.EscapeSQLString(bmd.ts), - ), nil) - if err != nil { - return err - } - seqInc := vals2[0].(int64) - - fmt.Fprintln(w) - - // Dump `setval(name, val + inc, false)`. This will cause the value to be - // set to `(val + inc) - inc = val`, so that the next value given out by the - // sequence will be `val`. This also avoids the minval check -- a sequence with - // a minval of 1 will have its value saved in KV as 0, so that the next value - // given out is 1. - fmt.Fprintf( - w, "SELECT setval(%s, %d, false);\n", - lex.EscapeSQLString(tree.NameString(bmd.name.Table())), seqVal+seqInc, - ) - - return nil -} - -// dumpTableData dumps the data of the specified table to w. -func dumpTableData( - w io.Writer, conn *sqlConn, typContext tree.TypeReferenceResolver, bmd basicMetadata, -) error { - md, err := getMetadataForTable(conn, bmd) - if err != nil { - return err - } - var collationEnv tree.CollationEnvironment - bs := fmt.Sprintf("SELECT %s FROM %s AS OF SYSTEM TIME %s ORDER BY PRIMARY KEY %[2]s", - md.columnNames, - md.name, - lex.EscapeSQLString(md.ts), - ) - inserts := make([]string, 0, insertRows) - rows, err := conn.Query(bs, nil) - if err != nil { - return err - } - cols := rows.Columns() - // Make 2 []driver.Values and alternate sending them on the chan. This is - // needed so val encoding can proceed at the same time as fetching a new - // row. There's no benefit to having more than 2 because that's all we can - // encode at once if we want to preserve the select order. - var valArray [2][]driver.Value - for i := range valArray { - valArray[i] = make([]driver.Value, len(cols)) - } - g := ctxgroup.WithContext(context.Background()) - valsCh := make(chan []driver.Value) - // stringsCh receives VALUES lines and batches them before writing to the - // output. Buffering this chan allows the val encoding to proceed during - // writes. - stringsCh := make(chan string, insertRows) - - g.GoCtx(func(ctx context.Context) error { - // Fetch SQL rows and put them onto valsCh. - defer close(valsCh) - done := ctx.Done() - for i := 0; ; i++ { - vals := valArray[i%len(valArray)] - if err := rows.Next(vals); err == io.EOF { - return rows.Close() - } else if err != nil { - return err - } - select { - case <-done: - return ctx.Err() - case valsCh <- vals: - } - } - }) - g.GoCtx(func(ctx context.Context) error { - // Convert SQL rows into VALUE strings. - defer close(stringsCh) - f := tree.NewFmtCtx(tree.FmtParsableNumerics) - defer f.Close() - done := ctx.Done() - for vals := range valsCh { - f.Reset() - // Values need to be correctly encoded for INSERT statements in a text file. - for si, sv := range vals { - if si > 0 { - f.WriteString(", ") - } - ct, err := tree.ResolveType(ctx, md.columnTypes[cols[si]], typContext) - if err != nil { - return err - } - var d tree.Datum - // TODO(knz): this approach is brittle+flawed, see #28948. - // TODO(mjibson): can we use tree.ParseDatumStringAs here? - switch t := sv.(type) { - case nil: - d = tree.DNull - case bool: - d = tree.MakeDBool(tree.DBool(t)) - case int64: - d = tree.NewDInt(tree.DInt(t)) - case float64: - d = tree.NewDFloat(tree.DFloat(t)) - case string: - switch ct.Family() { - case types.StringFamily: - d = tree.NewDString(t) - case types.CollatedStringFamily: - d, err = tree.NewDCollatedString(t, ct.Locale(), &collationEnv) - if err != nil { - return err - } - default: - return errors.AssertionFailedf("unknown string type %s", ct) - } - case []byte: - // TODO(knz): this approach is brittle+flawed, see #28948. - switch ct.Family() { - case types.IntervalFamily: - d, err = tree.ParseDInterval(string(t)) - if err != nil { - return err - } - case types.BytesFamily: - d = tree.NewDBytes(tree.DBytes(t)) - case types.UuidFamily: - d, err = tree.ParseDUuidFromString(string(t)) - if err != nil { - return err - } - case types.INetFamily: - d, err = tree.ParseDIPAddrFromINetString(string(t)) - if err != nil { - return err - } - case types.Box2DFamily: - d, err = tree.ParseDBox2D(string(t)) - if err != nil { - return err - } - case types.GeographyFamily: - d, err = tree.ParseDGeography(string(t)) - if err != nil { - return err - } - case types.GeometryFamily: - d, err = tree.ParseDGeometry(string(t)) - if err != nil { - return err - } - case types.JsonFamily: - d, err = tree.ParseDJSON(string(t)) - if err != nil { - return err - } - case types.ArrayFamily: - // We can only observe ARRAY types by their [] suffix. - d, _, err = tree.ParseDArrayFromString( - tree.NewTestingEvalContext(serverCfg.Settings), string(t), ct.ArrayContents()) - if err != nil { - return err - } - case types.StringFamily: - // STRING types can have optional length suffixes, so only - // examine the prefix of the type. - d = tree.NewDString(string(t)) - case types.DecimalFamily: - // DECIMAL types can have optional length suffixes, so only - // examine the prefix of the type. - d, err = tree.ParseDDecimal(string(t)) - if err != nil { - return err - } - case types.OidFamily: - var i *tree.DInt - i, err = tree.ParseDInt(string(t)) - if err != nil { - return err - } - d = tree.NewDOid(*i) - case types.EnumFamily: - // Enum values are streamed back in their logical representation. - d, err = tree.MakeDEnumFromLogicalRepresentation(ct, string(t)) - if err != nil { - return err - } - case types.BitFamily: - d, err = tree.ParseDBitArray(string(t)) - if err != nil { - return err - } - default: - return errors.Errorf("unknown []byte type: %s, %v: %s", t, cols[si], md.columnTypes[cols[si]]) - } - case time.Time: - switch ct.Family() { - case types.DateFamily: - d, err = tree.NewDDateFromTime(t) - if err != nil { - return err - } - case types.TimeFamily: - // pq awkwardly represents TIME as a time.Time with date 0000-01-01. - d = tree.MakeDTime(timeofday.FromTimeAllow2400(t)) - case types.TimeTZFamily: - d = tree.NewDTimeTZ(timetz.MakeTimeTZFromTimeAllow2400(t)) - case types.TimestampFamily: - d, err = tree.MakeDTimestamp(t, time.Nanosecond) - if err != nil { - return err - } - case types.TimestampTZFamily: - d, err = tree.MakeDTimestampTZ(t, time.Nanosecond) - if err != nil { - return err - } - default: - return errors.Errorf("unknown timestamp type: %s, %v: %s", t, cols[si], md.columnTypes[cols[si]]) - } - default: - return errors.Errorf("unknown field type: %T (%s)", t, cols[si]) - } - d.Format(f) - } - select { - case <-done: - return ctx.Err() - case stringsCh <- f.String(): - } - } - return nil - }) - g.Go(func() error { - // Batch SQL strings into groups and write to output. - for s := range stringsCh { - inserts = append(inserts, s) - if len(inserts) == cap(inserts) { - writeInserts(w, md, inserts) - inserts = inserts[:0] - } - } - if len(inserts) != 0 { - writeInserts(w, md, inserts) - inserts = inserts[:0] - } - return nil - }) - return g.Wait() -} - -func writeInserts(w io.Writer, tmd tableMetadata, inserts []string) { - // Ensure that the table name gets formatted with its schema. - tn := tree.MakeTableNameWithSchema(tmd.name.CatalogName, tmd.name.SchemaName, tmd.name.ObjectName) - tn.ExplicitCatalog = false - fmt.Fprintf(w, "\nINSERT INTO %s (%s) VALUES", &tn, tmd.columnNames) - for idx, values := range inserts { - if idx > 0 { - fmt.Fprint(w, ",") - } - fmt.Fprintf(w, "\n\t(%s)", values) - } - fmt.Fprintln(w, ";") -} diff --git a/pkg/cli/dump_test.go b/pkg/cli/dump_test.go deleted file mode 100644 index 0aa8d6fa82da..000000000000 --- a/pkg/cli/dump_test.go +++ /dev/null @@ -1,1053 +0,0 @@ -// Copyright 2016 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "context" - gosql "database/sql" - "database/sql/driver" - "fmt" - "io" - "net/url" - "path/filepath" - "reflect" - "strings" - "testing" - "time" - "unicode/utf8" - - "github.com/cockroachdb/apd/v2" - "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/testutils/skip" - "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" - "github.com/cockroachdb/cockroach/pkg/util/bitarray" - "github.com/cockroachdb/cockroach/pkg/util/duration" - "github.com/cockroachdb/cockroach/pkg/util/ipaddr" - "github.com/cockroachdb/cockroach/pkg/util/json" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/randutil" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" - "github.com/cockroachdb/cockroach/pkg/util/uuid" - "github.com/cockroachdb/datadriven" - "github.com/spf13/pflag" - "github.com/stretchr/testify/require" -) - -// TestDumpData uses the testdata/dump directory to execute SQL statements -// and compare dump output with expected output. File format is from the -// datadriven package. -// -// The commands supported in the data files are: -// -// sql: execute the commands in the input section; no arguments supported. -// -// dump: runs the CLI dump command with the given arguments, using its -// output as the expected result. Then loads the data back into an empty -// server and dumps it again to ensure the dump is roundtrippable. If the -// input section is equal to `noroundtrip` the roundtrip step is skipped -// (i.e., only the first dump is done). After a roundtripped dump, the tmp -// database may be examined to verify correctness. -func TestDumpData(t *testing.T) { - defer leaktest.AfterTest(t)() - skip.UnderRace(t, "takes >1min under race") - - datadriven.Walk(t, filepath.Join("testdata", "dump"), func(t *testing.T, path string) { - c := newCLITest(cliTestParams{t: t}) - c.omitArgs = true - defer c.cleanup() - - datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { - args := []string{d.Cmd} - switch d.Cmd { - case "sql": - args = append(args, "-e", d.Input) - case "dump": - for _, a := range d.CmdArgs { - args = append(args, a.String()) - } - default: - d.Fatalf(t, "unknown command: %s", d.Cmd) - } - s, err := c.RunWithCaptureArgs(args) - if err != nil { - d.Fatalf(t, "%v", err) - } - if d.Cmd == "dump" && d.Input != "noroundtrip" { - if s != d.Expected { - return s - } - - c.RunWithArgs([]string{"sql", "-e", "drop database if exists tmp; create database tmp"}) - if out, err := c.RunWithCaptureArgs([]string{"sql", "-d", "tmp", "-e", s}); err != nil { - d.Fatalf(t, "%v", err) - } else { - log.Infof(context.Background(), - "TestDumpData: executed SQL: %s\nresult: %s", s, out) - } - args[1] = "tmp" - roundtrip, err := c.RunWithCaptureArgs(args) - if err != nil { - d.Fatalf(t, "%v", err) - } - if roundtrip != s { - d.Fatalf(t, "roundtrip results unexpected: %s, expected: %s", roundtrip, s) - } - } - return s - }) - }) -} - -func dumpSingleTable(w io.Writer, conn *sqlConn, dbName string, tName string) error { - clusterTS, err := getAsOf(conn, "" /* asOf */) - if err != nil { - return err - } - mds, err := getDumpMetadata(conn, dbName, []string{tName}, clusterTS) - if err != nil { - return err - } - if err := dumpCreateTable(w, mds[0]); err != nil { - return err - } - return dumpTableData(w, conn, nil /* typContext */, mds[0]) -} - -func TestDumpBytes(t *testing.T) { - defer leaktest.AfterTest(t)() - - c := newCLITest(cliTestParams{t: t}) - defer c.cleanup() - - url, cleanup := sqlutils.PGUrl(t, c.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) - defer cleanup() - - conn := makeSQLConn(url.String()) - defer conn.Close() - - if err := conn.Exec(` - CREATE DATABASE d; - SET DATABASE = d; - CREATE TABLE t (b BYTES PRIMARY KEY); - `, nil); err != nil { - t.Fatal(err) - } - - for i := int64(0); i < 256; i++ { - if err := conn.Exec("INSERT INTO t VALUES ($1)", []driver.Value{[]byte{byte(i)}}); err != nil { - t.Fatal(err) - } - } - - var b bytes.Buffer - if err := dumpSingleTable(&b, conn, "d", "t"); err != nil { - t.Fatal(err) - } - dump := b.String() - b.Reset() - - if err := conn.Exec(` - CREATE DATABASE o; - SET DATABASE = o; - `, nil); err != nil { - t.Fatal(err) - } - if err := conn.Exec(dump, nil); err != nil { - t.Fatal(err) - } - if err := dumpSingleTable(&b, conn, "o", "t"); err != nil { - t.Fatal(err) - } - dump2 := b.String() - if dump != dump2 { - t.Fatalf("unmatching dumps:\n%s\n%s", dump, dump2) - } -} - -const durationRandom = "duration-random" - -var randomTestTime = pflag.Duration(durationRandom, time.Second, "duration for randomized dump test to run") - -func init() { - pflag.Lookup(durationRandom).Hidden = true -} - -// TestDumpRandom generates a random number of random rows with all data -// types. This data is dumped, inserted, and dumped again. The two dumps -// are compared for exactness. The data from the inserted dump is then -// SELECT'd and compared to the original generated data to ensure it is -// round-trippable. -func TestDumpRandom(t *testing.T) { - defer leaktest.AfterTest(t)() - - c := newCLITest(cliTestParams{t: t}) - defer c.cleanup() - - url, cleanup := sqlutils.PGUrl(t, c.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) - defer cleanup() - - conn := makeSQLConn(url.String()) - defer conn.Close() - - if err := conn.Exec(` - CREATE DATABASE d; - CREATE DATABASE o; - CREATE TABLE d.t ( - rowid int, - i int, - si smallint, - bi bigint, - f float, - fr real, - d date, - m timestamp, - mtz timestamptz, - n interval, - o bool, - e decimal, - s string, - b bytes, - u uuid, - ip inet, - j json, - single_bit bit, - var_bits varbit, - ba varbit[], - PRIMARY KEY (rowid, i, si, bi, f, fr, d, m, mtz, n, o, e, s, b, u, ip, single_bit, var_bits) - ); - SET extra_float_digits = 3; - `, nil); err != nil { - t.Fatal(err) - } - - rnd, seed := randutil.NewPseudoRand() - t.Logf("random seed: %v", seed) - - start := timeutil.Now() - - for iteration := 0; timeutil.Since(start) < *randomTestTime; iteration++ { - if err := conn.Exec(`DELETE FROM d.t`, nil); err != nil { - t.Fatal(err) - } - var generatedRows [][]driver.Value - count := rnd.Int63n(500) - t.Logf("random iteration %v: %v rows", iteration, count) - for _i := int64(0); _i < count; _i++ { - // Generate a random number of random inserts. - i := rnd.Int63() - f := rnd.Float64() - d, _ := pgdate.MakeCompatibleDateFromDisk(rnd.Int63n(10000)).ToTime() - m := timeutil.Unix(0, rnd.Int63()).Round(time.Microsecond) - sign := 1 - rnd.Int63n(2)*2 - dur := duration.MakeDuration(sign*rnd.Int63(), sign*rnd.Int63n(1000), sign*rnd.Int63n(1000)) - n := dur.String() - o := rnd.Intn(2) == 1 - e := apd.New(rnd.Int63(), rnd.Int31n(20)-10).String() - sr := make([]byte, rnd.Intn(500)) - if _, err := rnd.Read(sr); err != nil { - t.Fatal(err) - } - s := make([]byte, 0, len(sr)) - for _, b := range sr { - r := rune(b) - if !utf8.ValidRune(r) { - continue - } - s = append(s, []byte(string(r))...) - } - b := make([]byte, rnd.Intn(500)) - if _, err := rnd.Read(b); err != nil { - t.Fatal(err) - } - - uuidBytes := make([]byte, 16) - if _, err := rnd.Read(b); err != nil { - t.Fatal(err) - } - u, err := uuid.FromBytes(uuidBytes) - if err != nil { - t.Fatal(err) - } - - ip := ipaddr.RandIPAddr(rnd) - j, err := json.Random(20, rnd) - if err != nil { - t.Fatal(err) - } - singleBit := bitarray.Rand(rnd, 1) - varBits := bitarray.Rand(rnd, uint(rnd.Intn(128)) /* bit length*/).String() - // Constructing arrays of this format cannot have bitarrays of len 0. - varbitArray := fmt.Sprintf("{%s,%s}", varBits+"1", varBits+"0") - - vals := []driver.Value{ - _i, - i, - i & 0x7fff, // si - i, // bi - f, - f, // fr - d, - m, - m, - []byte(n), // intervals come out as `[]byte`s - o, - []byte(e), // decimals come out as `[]byte`s - string(s), - b, - []byte(u.String()), - []byte(ip.String()), - []byte(j.String()), - []byte(singleBit.String()), - []byte(varBits), - []byte(varbitArray), - } - if err := conn.Exec("INSERT INTO d.t VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)", vals); err != nil { - t.Fatal(err) - } - generatedRows = append(generatedRows, vals[1:]) - } - - check := func(table string) { - q := fmt.Sprintf("SELECT i, si, bi, f, fr, d, m, mtz, n, o, e, s, b, u, ip, j, single_bit, var_bits, ba FROM %s ORDER BY rowid", table) - nrows, err := conn.Query(q, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := nrows.Close(); err != nil { - t.Fatal(err) - } - }() - for gi, generatedRow := range generatedRows { - fetched := make([]driver.Value, len(nrows.Columns())) - if err := nrows.Next(fetched); err != nil { - t.Fatal(err) - } - - for i, fetchedVal := range fetched { - generatedVal := generatedRow[i] - if t, ok := fetchedVal.(time.Time); ok { - // dates and timestamps come out with offset zero (but - // not UTC specifically). - fetchedVal = t.UTC() - } - if !reflect.DeepEqual(fetchedVal, generatedVal) { - t.Errorf("NOT EQUAL: table %s, row %d, col %d\ngenerated (%T): %v (%s)\nselected (%T): %v (%s)\n", table, gi, i, generatedVal, generatedVal, generatedVal, fetchedVal, fetchedVal, fetchedVal) - } - } - if t.Failed() { - t.FailNow() - } - } - } - - check("d.t") - - var buf bytes.Buffer - if err := dumpSingleTable(&buf, conn, "d", "t"); err != nil { - t.Fatal(err) - } - dump := buf.String() - buf.Reset() - - if err := conn.Exec(` - SET DATABASE = o; - DROP TABLE IF EXISTS t; - `, nil); err != nil { - t.Fatal(err) - } - if err := conn.Exec(dump, nil); err != nil { - t.Fatal(err) - } - - check("o.t") - - if err := dumpSingleTable(&buf, conn, "o", "t"); err != nil { - t.Fatal(err) - } - dump2 := buf.String() - if dump != dump2 { - t.Fatalf("unmatching dumps:\nFIRST:\n%s\n\nSECOND:\n%s", dump, dump2) - } - } -} - -func TestDumpAsOf(t *testing.T) { - defer leaktest.AfterTest(t)() - - c := newCLITest(cliTestParams{t: t}) - defer c.cleanup() - - const create = ` - CREATE DATABASE d; - CREATE TABLE d.t (i int8); - INSERT INTO d.t VALUES (1); - SELECT now(); -` - - out, err := c.RunWithCaptureArgs([]string{"sql", "-e", create}) - if err != nil { - t.Fatal(err) - } - - // Last line is the timestamp. - fs := strings.Split(strings.TrimSpace(out), "\n") - ts := fs[len(fs)-1] - - dump1, err := c.RunWithCaptureArgs([]string{"dump", "d", "t"}) - if err != nil { - t.Fatal(err) - } - - const want1 = `dump d t -CREATE TABLE public.t ( - i INT8 NULL, - FAMILY "primary" (i, rowid) -); - -INSERT INTO public.t (i) VALUES - (1); -` - if dump1 != want1 { - t.Fatalf("expected: %s\ngot: %s", want1, dump1) - } - - c.RunWithArgs([]string{"sql", "-e", ` - ALTER TABLE d.t ADD COLUMN j int8 DEFAULT 2; - INSERT INTO d.t VALUES (3, 4); - `}) - - dump2, err := c.RunWithCaptureArgs([]string{"dump", "d", "t"}) - if err != nil { - t.Fatal(err) - } - - const want2 = `dump d t -CREATE TABLE public.t ( - i INT8 NULL, - j INT8 NULL DEFAULT 2:::INT8, - FAMILY "primary" (i, rowid, j) -); - -INSERT INTO public.t (i, j) VALUES - (1, 2), - (3, 4); -` - if dump2 != want2 { - t.Fatalf("expected: %s\ngot: %s", want2, dump2) - } - - dumpAsOf, err := c.RunWithCaptureArgs([]string{"dump", "d", "t", "--as-of", ts}) - if err != nil { - t.Fatal(err) - } - // Remove the timestamp from the first line. - dumpAsOf = fmt.Sprintf("dump d t\n%s", strings.SplitN(dumpAsOf, "\n", 2)[1]) - if dumpAsOf != want1 { - t.Fatalf("expected: %s\ngot: %s", want1, dumpAsOf) - } - - if out, err := c.RunWithCaptureArgs([]string{"dump", "d", "t", "--as-of", "2000-01-01 00:00:00"}); err != nil { - t.Fatal(err) - } else if !strings.Contains(out, `database "d" does not exist`) { - t.Fatalf("unexpected output: %s", out) - } -} - -func TestDumpInterleavedTables(t *testing.T) { - defer leaktest.AfterTest(t)() - - c := newCLITest(cliTestParams{t: t}) - defer c.cleanup() - - const create = ` -CREATE DATABASE d; -CREATE TABLE d.customers (id INT PRIMARY KEY, name STRING(50)); -CREATE TABLE d.orders ( - customer INT, - id INT, - total DECIMAL(20, 5), - PRIMARY KEY (customer, id), - CONSTRAINT fk_customer FOREIGN KEY (customer) REFERENCES d.customers -) INTERLEAVE IN PARENT d.customers (customer); -CREATE INDEX i ON d.orders (customer, total) INTERLEAVE IN PARENT d.customers (customer); -` - - _, err := c.RunWithCaptureArgs([]string{"sql", "-e", create}) - if err != nil { - t.Fatal(err) - } - - dump1, err := c.RunWithCaptureArgs([]string{"dump", "d", "orders"}) - if err != nil { - t.Fatal(err) - } - - const want1 = `dump d orders -CREATE TABLE public.orders ( - customer INT8 NOT NULL, - id INT8 NOT NULL, - total DECIMAL(20,5) NULL, - CONSTRAINT "primary" PRIMARY KEY (customer ASC, id ASC), - FAMILY "primary" (customer, id, total) -) INTERLEAVE IN PARENT public.customers (customer); - -ALTER TABLE public.orders ADD CONSTRAINT fk_customer FOREIGN KEY (customer) REFERENCES public.customers(id); -CREATE INDEX i ON public.orders (customer ASC, total ASC) INTERLEAVE IN PARENT public.customers (customer); - --- Validate foreign key constraints. These can fail if there was unvalidated data during the dump. -ALTER TABLE public.orders VALIDATE CONSTRAINT fk_customer; -` - - if dump1 != want1 { - t.Fatalf("expected: %s\ngot: %s", want1, dump1) - } -} - -func TestDatabaseDumpCommand(t *testing.T) { - defer leaktest.AfterTest(t)() - skip.UnderRace(t, "takes >1min under race") - - tests := []struct { - name string - create string - expected string - }{ - { - name: "columnsless_table", - create: ` -CREATE DATABASE bar; -USE bar; -CREATE TABLE foo (); -`, - expected: `CREATE TABLE public.foo (FAMILY "primary" (rowid) -); -`, - }, - { - name: "table_with_columns", - create: ` -CREATE DATABASE bar; -USE bar; -CREATE TABLE foo (id int primary key, text string not null); -`, - expected: `CREATE TABLE public.foo ( - id INT8 NOT NULL, - text STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (id ASC), - FAMILY "primary" (id, text) -); -`, - }, - { - name: "autogenerate_hidden_colum", - create: ` -CREATE DATABASE bar; -USE bar; -CREATE TABLE foo(id int); -`, - expected: `CREATE TABLE public.foo ( - id INT8 NULL, - FAMILY "primary" (id, rowid) -); -`, - }, - { - name: "columns_less_table_with_data", - create: ` -CREATE DATABASE bar; -USE bar; -CREATE TABLE foo(id int); - -INSERT INTO foo(id) VALUES(1); -INSERT INTO foo(id) VALUES(2); -INSERT INTO foo(id) VALUES(3); - -ALTER TABLE foo DROP COLUMN id; -`, - expected: `CREATE TABLE public.foo (FAMILY "primary" (rowid) -); -`, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c := newCLITest(cliTestParams{t: t}) - c.omitArgs = true - defer c.cleanup() - - _, err := c.RunWithCaptureArgs([]string{"sql", "-e", test.create}) - if err != nil { - t.Fatal(err) - } - - dump, err := c.RunWithCaptureArgs([]string{"dump", "bar", "--dump-mode=schema"}) - if err != nil { - t.Fatal(err) - } - - if dump != test.expected { - t.Fatalf("expected: %s\ngot: %s", test.expected, dump) - } - - dumpWithData, err := c.RunWithCaptureArgs([]string{"dump", "bar", "--dump-mode=data"}) - if err != nil { - t.Fatal(err) - } - - // check we can actually reuse dump output - _, err = c.RunWithCaptureArgs([]string{"sql", "-e", fmt.Sprintf(`CREATE DATABASE TEST; -USE TEST; -%s -%s`, dump, dumpWithData)}) - if err != nil { - t.Fatal(err) - } - - result1, err := c.RunWithCaptureArgs([]string{"sql", "-e", "select * from bar.foo"}) - if err != nil { - t.Fatal(err) - } - - result2, err := c.RunWithCaptureArgs([]string{"sql", "-e", "select * from test.foo"}) - if err != nil { - t.Fatal(err) - } - - if result1 != result2 { - t.Fatalf("expected: %s\ngot: %s", test.expected, dump) - } - }) - } -} - -func TestDumpAllTables(t *testing.T) { - defer leaktest.AfterTest(t)() - skip.UnderRace(t, "takes >1min under race") - - tests := []struct { - name string - args []string - recreate bool - create string - expected string - clean string - }{ - { - name: " dump_all", - create: ` -CREATE DATABASE db1; -USE db1; -CREATE TABLE t1(id INT NOT NULL, pkey STRING PRIMARY KEY); - -INSERT INTO t1(id, pkey) VALUES(1, 'db1-aaaa'); -INSERT INTO t1(id, pkey) VALUES(2, 'db1-bbbb'); - -CREATE DATABASE db2; -USE db2; -CREATE TABLE t2(id INT NOT NULL, pkey STRING PRIMARY KEY); - -INSERT INTO t2(id, pkey) VALUES(1, 'db2-aaaa'); -INSERT INTO t2(id, pkey) VALUES(2, 'db2-bbbb'); -`, - expected: ` -CREATE DATABASE IF NOT EXISTS db1; -USE db1; - -CREATE TABLE public.t1 ( - id INT8 NOT NULL, - pkey STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (pkey ASC), - FAMILY "primary" (id, pkey) -); - -INSERT INTO public.t1 (id, pkey) VALUES - (1, 'db1-aaaa'), - (2, 'db1-bbbb'); - -CREATE DATABASE IF NOT EXISTS db2; -USE db2; - -CREATE TABLE public.t2 ( - id INT8 NOT NULL, - pkey STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (pkey ASC), - FAMILY "primary" (id, pkey) -); - -INSERT INTO public.t2 (id, pkey) VALUES - (1, 'db2-aaaa'), - (2, 'db2-bbbb'); -`, - }, - { - name: " dump_all_only_data", - args: []string{"--dump-mode=data"}, - create: ` -CREATE DATABASE db1; -USE db1; -CREATE TABLE t1(id INT NOT NULL, pkey STRING PRIMARY KEY); - -INSERT INTO t1(id, pkey) VALUES(1, 'db1-aaaa'); -INSERT INTO t1(id, pkey) VALUES(2, 'db1-bbbb'); - -CREATE DATABASE db2; -USE db2; -CREATE TABLE t2(id INT NOT NULL, pkey STRING PRIMARY KEY); - -INSERT INTO t2(id, pkey) VALUES(1, 'db2-aaaa'); -INSERT INTO t2(id, pkey) VALUES(2, 'db2-bbbb'); -`, - expected: ` -INSERT INTO public.t1 (id, pkey) VALUES - (1, 'db1-aaaa'), - (2, 'db1-bbbb'); - -INSERT INTO public.t2 (id, pkey) VALUES - (1, 'db2-aaaa'), - (2, 'db2-bbbb'); -`, - }, - { - name: "dump_cross_references", - recreate: true, - create: ` -SET CLUSTER SETTING sql.cross_db_fks.enabled = TRUE; -SET CLUSTER SETTING sql.cross_db_views.enabled = TRUE; -CREATE DATABASE dbB; -USE dbB; - -CREATE TABLE person( - id int PRIMARY KEY, - name string NOT NULL); - -INSERT INTO person(id, name) VALUES(1, 'John Smith'); -INSERT INTO person(id, name) VALUES(2, 'Joe Dow'); - -CREATE DATABASE dbA; -USE dbA; - -CREATE TABLE account( - id int PRIMARY KEY, - person_id int REFERENCES dbB.person(id), - accountNo int NOT NULL); - -INSERT INTO account(id, person_id, accountNo) VALUES(1, 1, 1111); -INSERT INTO account(id, person_id, accountNo) VALUES(2, 2, 2222); -`, - expected: ` -CREATE DATABASE IF NOT EXISTS dba; -USE dba; - -CREATE TABLE public.account ( - id INT8 NOT NULL, - person_id INT8 NULL, - accountno INT8 NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (id ASC), - FAMILY "primary" (id, person_id, accountno) -); - -INSERT INTO public.account (id, person_id, accountno) VALUES - (1, 1, 1111), - (2, 2, 2222); - -CREATE DATABASE IF NOT EXISTS dbb; -USE dbb; - -CREATE TABLE public.person ( - id INT8 NOT NULL, - name STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (id ASC), - FAMILY "primary" (id, name) -); - -INSERT INTO public.person (id, name) VALUES - (1, 'John Smith'), - (2, 'Joe Dow'); - -ALTER TABLE public.account ADD CONSTRAINT fk_person_id_ref_person FOREIGN KEY (person_id) REFERENCES dbb.public.person(id); - --- Validate foreign key constraints. These can fail if there was unvalidated data during the dump. -ALTER TABLE public.account VALIDATE CONSTRAINT fk_person_id_ref_person; -`, - clean: ` -DROP DATABASE dba; -DROP DATABASE dbb; -`, - }, - { - name: "verify_defaultdb_dump", - create: ` -CREATE TABLE foo(id INT NOT NULL); - -INSERT INTO foo(id) VALUES(1); -INSERT INTO foo(id) VALUES(2); -INSERT INTO foo(id) VALUES(3); - -CREATE DATABASE dba; -USE dba; - -CREATE TABLE bar(id INT NOT NULL); - -INSERT INTO bar(id) VALUES(1); -INSERT INTO bar(id) VALUES(2); -`, - clean: ` - USE defaultdb; - DROP TABLE foo; - DROP DATABASE dba; -`, - recreate: true, - expected: ` -CREATE DATABASE IF NOT EXISTS dba; -USE dba; - -CREATE TABLE public.bar ( - id INT8 NOT NULL, - FAMILY "primary" (id, rowid) -); - -INSERT INTO public.bar (id) VALUES - (1), - (2); - -CREATE DATABASE IF NOT EXISTS defaultdb; -USE defaultdb; - -CREATE TABLE public.foo ( - id INT8 NOT NULL, - FAMILY "primary" (id, rowid) -); - -INSERT INTO public.foo (id) VALUES - (1), - (2), - (3); -`, - }, - } - for _, test := range tests { - tt := test - t.Run(tt.name, func(t *testing.T) { - - c := newCLITest(cliTestParams{t: t}) - c.omitArgs = true - defer c.cleanup() - - _, err := c.RunWithCaptureArgs([]string{"sql", "-e", tt.create}) - if err != nil { - t.Fatal(err) - } - - args := []string{"dump", "--dump-all"} - args = append(args, tt.args...) - dump, err := c.RunWithCaptureArgs(args) - if err != nil { - t.Fatal(err) - } - - if dump != tt.expected { - t.Fatalf("expected: %s\ngot: %s", tt.expected, dump) - } - - // attempt to recreate from dump if test case defines - //clean up procedure - if tt.recreate { - _, err := c.RunWithCaptureArgs([]string{"sql", "-e", tt.clean}) - if err != nil { - t.Fatal(err) - } - - _, err = c.RunWithCaptureArgs([]string{"sql", "-e", dump}) - if err != nil { - t.Fatal(err) - } - } - }) - } -} - -// TestDumpTempTables tests how `cockroach dump` handles temporary tables, views -// and sequences. -// This could not be written as a datadriven test because temp objects do not -// persist across RunWithCaptureArgs() invocations. Therefore, the datadriven -// infra cannot test our handling of temp objects since they get deleted between -// sql and dump runs anyways. -func TestDumpTempTables(t *testing.T) { - defer leaktest.AfterTest(t)() - testInput := []struct { - name string - create string - expected string - args []string - }{ - { - name: "dump_db_only_temp", - create: ` -SET experimental_enable_temp_tables = 'on'; -CREATE DATABASE foo; -USE foo; -CREATE TEMP TABLE tmpbar (id INT PRIMARY KEY); -INSERT INTO tmpbar VALUES (1); - -CREATE TEMP VIEW tmpview (id) AS SELECT id FROM tmpbar; -CREATE TEMP SEQUENCE tmpseq START 1 INCREMENT 1; -`, - expected: ``, - args: []string{"foo"}, - }, - { - name: "dump_db_mix_permanent_and_temp", - create: ` -SET experimental_enable_temp_tables = 'on'; -CREATE DATABASE foo; -USE foo; -CREATE TABLE bar (id int primary key, text string not null); -INSERT INTO bar VALUES (1, 'a'); - -CREATE TEMP TABLE tmpbar (id int primary key); -`, - expected: `CREATE TABLE public.bar ( - id INT8 NOT NULL, - text STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (id ASC), - FAMILY "primary" (id, text) -); - -INSERT INTO public.bar (id, text) VALUES - (1, 'a'); -`, - args: []string{"foo"}, - }, - { - name: "dump_tmp_table_explicit", - create: ` -SET experimental_enable_temp_tables = 'on'; -CREATE DATABASE foo; -USE foo; - -CREATE TABLE bar (id INT PRIMARY KEY); - -CREATE TEMP TABLE tmpbar (id INT PRIMARY KEY); -INSERT INTO tmpbar VALUES (1); -`, - expected: "ERROR: getBasicMetadata: relation foo.public.tmpbar does not exist\n", - args: []string{"foo", "bar", "tmpbar"}, - }, - { - name: "dump_tmp_view_explicit", - create: ` -SET experimental_enable_temp_tables = 'on'; -CREATE DATABASE foo; -USE foo; - -CREATE TABLE bar (id INT PRIMARY KEY); - -CREATE TEMP VIEW tmpview (id) AS SELECT id FROM bar; -`, - expected: "ERROR: getBasicMetadata: relation foo.public.tmpview does not exist\n", - args: []string{"foo", "tmpview"}, - }, - { - name: "dump_tmp_sequence_explicit", - create: ` -SET experimental_enable_temp_tables = 'on'; -CREATE DATABASE foo; -USE foo; - -CREATE TEMP SEQUENCE tmpseq START 1 INCREMENT 1; -`, - expected: "ERROR: getBasicMetadata: relation foo.public.tmpseq does not exist\n", - args: []string{"foo", "tmpseq"}, - }, - { - name: "dump_all_with_tmp", - create: ` -SET experimental_enable_temp_tables = 'on'; -CREATE DATABASE db1; -USE db1; -CREATE TABLE t1(id INT NOT NULL, pkey STRING PRIMARY KEY); - -INSERT INTO t1(id, pkey) VALUES(1, 'db1-aaaa'); - -CREATE DATABASE db2; -USE db2; -CREATE TEMP TABLE t2(id INT NOT NULL, pkey STRING PRIMARY KEY); - -CREATE TABLE t3(id INT NOT NULL, pkey STRING PRIMARY KEY); -INSERT INTO t3(id, pkey) VALUES(1, 'db2-aaaa'); -`, - expected: ` -CREATE DATABASE IF NOT EXISTS db1; -USE db1; - -CREATE TABLE public.t1 ( - id INT8 NOT NULL, - pkey STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (pkey ASC), - FAMILY "primary" (id, pkey) -); - -INSERT INTO public.t1 (id, pkey) VALUES - (1, 'db1-aaaa'); - -CREATE DATABASE IF NOT EXISTS db2; -USE db2; - -CREATE TABLE public.t3 ( - id INT8 NOT NULL, - pkey STRING NOT NULL, - CONSTRAINT "primary" PRIMARY KEY (pkey ASC), - FAMILY "primary" (id, pkey) -); - -INSERT INTO public.t3 (id, pkey) VALUES - (1, 'db2-aaaa'); -`, - args: []string{"--dump-all"}, - }, - } - - for _, test := range testInput { - t.Run(test.name, func(t *testing.T) { - c := newCLITest(cliTestParams{t: t}) - c.omitArgs = true - defer c.cleanup() - - pgURL, cleanupFunc := sqlutils.PGUrl( - t, c.ServingSQLAddr(), t.Name(), - url.User(security.RootUser), - ) - defer cleanupFunc() - db, err := gosql.Open("postgres", pgURL.String()) - require.NoError(t, err) - - // Create the tables. - _, err = db.Exec(test.create) - require.NoError(t, err) - - var args []string - args = append(args, "dump") - args = append(args, test.args...) - args = append(args, "--dump-mode=both") - dump, err := c.RunWithCaptureArgs(args) - require.NoError(t, err) - if dump != test.expected { - t.Fatalf("expected: %s\ngot: %s", test.expected, dump) - } - - require.NoError(t, db.Close()) - }) - } -} diff --git a/pkg/cli/flags.go b/pkg/cli/flags.go index b1bd1a3431b5..04838557f257 100644 --- a/pkg/cli/flags.go +++ b/pkg/cli/flags.go @@ -543,7 +543,6 @@ func init() { debugTimeSeriesDumpCmd, debugZipCmd, doctorClusterCmd, - dumpCmd, genHAProxyCmd, initCmd, quitCmd, @@ -663,14 +662,9 @@ func init() { boolFlag(f, &sqlCtx.debugMode, cliflags.CliDebugMode) } - varFlag(dumpCmd.Flags(), &dumpCtx.dumpMode, cliflags.DumpMode) - stringFlag(dumpCmd.Flags(), &dumpCtx.asOf, cliflags.DumpTime) - boolFlag(dumpCmd.Flags(), &dumpCtx.dumpAll, cliflags.DumpAll) - // Commands that establish a SQL connection. sqlCmds := []*cobra.Command{ sqlShellCmd, - dumpCmd, demoCmd, doctorClusterCmd, lsNodesCmd, diff --git a/pkg/cli/flags_test.go b/pkg/cli/flags_test.go index 5505d5e724ea..c07f561d5813 100644 --- a/pkg/cli/flags_test.go +++ b/pkg/cli/flags_test.go @@ -242,9 +242,9 @@ func TestClientURLFlagEquivalence(t *testing.T) { anyCmd := []string{"sql", "quit"} anyNonSQL := []string{"quit", "init"} - anySQL := []string{"sql", "dump"} + anySQL := []string{"sql"} sqlShell := []string{"sql"} - anyNonSQLShell := []string{"dump", "quit"} + anyNonSQLShell := []string{"quit"} testData := []struct { cmds []string diff --git a/pkg/cli/sql_util.go b/pkg/cli/sql_util.go index 9e07bfb2afae..61610a0c09d5 100644 --- a/pkg/cli/sql_util.go +++ b/pkg/cli/sql_util.go @@ -345,24 +345,6 @@ func (c *sqlConn) checkServerMetadata() error { return nil } -// requireServerVersion returns an error if the version of the connected server -// is not at least the given version. -func (c *sqlConn) requireServerVersion(required *version.Version) error { - _, versionString, _, err := c.getServerMetadata() - if err != nil { - return err - } - vers, err := version.Parse(versionString) - if err != nil { - return fmt.Errorf("unable to parse server version %q", versionString) - } - if !vers.AtLeast(required) { - return fmt.Errorf("incompatible client and server versions (detected server version: %s, required: %s)", - vers, required) - } - return nil -} - // getServerValue retrieves the first driverValue returned by the // given sql query. If the query fails or does not return a single // column, `false` is returned in the second result. diff --git a/pkg/cmd/roachtest/BUILD.bazel b/pkg/cmd/roachtest/BUILD.bazel index 83aa3b3839d3..25cc3a5972d2 100644 --- a/pkg/cmd/roachtest/BUILD.bazel +++ b/pkg/cmd/roachtest/BUILD.bazel @@ -31,7 +31,6 @@ go_library( "django.go", "django_blocklist.go", "drop.go", - "dump.go", "election.go", "encryption.go", "engine_switch.go", diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index ed39f7ca472b..6aa7bdb64ed8 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -567,20 +567,6 @@ func execCmdWithBuffer(ctx context.Context, l *logger, args ...string) ([]byte, return out, nil } -// execCmdWithStdout executes the given command and returns its stdout -// output. If the return code is not 0, an error is also returned. -// l is used to log the command before running it. No output is logged. -func execCmdWithStdout(ctx context.Context, l *logger, args ...string) ([]byte, error) { - l.Printf("> %s\n", strings.Join(args, " ")) - cmd := exec.CommandContext(ctx, args[0], args[1:]...) - - out, err := cmd.Output() - if err != nil { - return out, errors.Wrapf(err, `%s`, strings.Join(args, ` `)) - } - return out, nil -} - func makeGCEClusterName(name string) string { name = strings.ToLower(name) name = regexp.MustCompile(`[^-a-z0-9]+`).ReplaceAllString(name, "-") @@ -2328,18 +2314,6 @@ func (c *cluster) RunWithBuffer( append([]string{roachprod, "run", c.makeNodes(node), "--"}, args...)...) } -// RunWithStdout runs a command on the specified node, returning the resulting -// stdout. -func (c *cluster) RunWithStdout( - ctx context.Context, l *logger, node nodeListOption, args ...string, -) ([]byte, error) { - if err := errors.Wrap(ctx.Err(), "cluster.RunWithStdout"); err != nil { - return nil, err - } - return execCmdWithStdout(ctx, l, - append([]string{roachprod, "run", c.makeNodes(node), "--"}, args...)...) -} - // pgURL returns the Postgres endpoint for the specified node. It accepts a flag // specifying whether the URL should include the node's internal or external IP // address. In general, inter-cluster communication and should use internal IPs diff --git a/pkg/cmd/roachtest/dump.go b/pkg/cmd/roachtest/dump.go deleted file mode 100644 index 6ea5ae1f1cd5..000000000000 --- a/pkg/cmd/roachtest/dump.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package main - -import ( - "context" - "strings" -) - -// runDumpBackwardsCompat ensures that `cockroach dump` can be run on lesser -// version clusters successfully. The goal is to ensure that new metadata -// queries that `cockroach dump` performs can handle errors when run on a -// cluster that cannot yet handle those metadata queries. -func runDumpBackwardsCompat(ctx context.Context, t *test, c *cluster, predecessorVersion string) { - expected := `CREATE TABLE t ( - x INT8 NULL, - y INT8 NULL, - FAMILY "primary" (x, y, rowid) -); - -INSERT INTO public.t (x, y) VALUES - (1, 1), - (2, 2);` - roachNodes := c.All() - // An empty string means that the cockroach binary specified by flag - // `cockroach` will be used. - const mainVersion = "" - u := newVersionUpgradeTest(c, - uploadAndStart(roachNodes, predecessorVersion), - waitForUpgradeStep(roachNodes), - // Fill some data in the cluster. - fillData(), - // Get an upgraded version of the Cockroach Binary, and try to dump. - runDump(roachNodes, mainVersion, expected), - ) - - u.run(ctx, t) -} - -func fillData() versionStep { - return func(ctx context.Context, t *test, u *versionUpgradeTest) { - conn := u.conn(ctx, t, 1) - if _, err := conn.Exec(` -CREATE DATABASE d; -CREATE TABLE d.t (x INT, y INT); -INSERT INTO d.t VALUES (1, 1), (2, 2); -`); err != nil { - t.Fatal(err) - } - } -} - -func runDump(nodes nodeListOption, mainVersion, expected string) versionStep { - return func(ctx context.Context, t *test, u *versionUpgradeTest) { - // Put the new version of Cockroach onto the node. - u.uploadVersion(ctx, t, nodes, mainVersion) - raw, err := u.c.RunWithStdout(ctx, t.logger(), nodes, `./cockroach dump --insecure d`) - if err != nil { - t.Fatal(err) - } - output := strings.TrimSpace(string(raw)) - if output != expected { - t.Errorf("expected %s, but found %s", expected, output) - } - } -} - -func registerDumpBackwardsCompat(r *testRegistry) { - r.Add(testSpec{ - Name: "dump-backwards-compatibility", - Owner: OwnerBulkIO, - Cluster: makeClusterSpec(1), - MinVersion: "v20.2.0", - Run: func(ctx context.Context, t *test, c *cluster) { - predV, err := PredecessorVersion(r.buildVersion) - if err != nil { - t.Fatal(err) - } - runDumpBackwardsCompat(ctx, t, c, predV) - }, - }) -} diff --git a/pkg/cmd/roachtest/registry.go b/pkg/cmd/roachtest/registry.go index f563af17f001..3251fd8a536a 100644 --- a/pkg/cmd/roachtest/registry.go +++ b/pkg/cmd/roachtest/registry.go @@ -32,7 +32,6 @@ func registerTests(r *testRegistry) { registerDiskStalledDetection(r) registerDjango(r) registerDrop(r) - registerDumpBackwardsCompat(r) registerElectionAfterRestart(r) registerEncryption(r) registerEngineSwitch(r)