diff --git a/Dockerfile b/Dockerfile index 121ebcdcd..a821bc255 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,6 +15,7 @@ RUN dpkg --add-architecture ${TARGETARCH:-arm64} && apt update \ valgrind \ build-essential \ libedit-dev \ + libgc-dev \ libicu-dev \ libkrb5-dev \ liblz4-dev \ @@ -63,11 +64,12 @@ LABEL org.opencontainers.image.source https://github.com/dimitri/pgcopydb RUN dpkg --add-architecture ${TARGETARCH:-arm64} && apt update \ && apt install -qqy --no-install-suggests --no-install-recommends \ sudo \ - passwd \ + passwd \ ca-certificates \ + libgc1 \ libpq5 \ - libsqlite3-0 \ - lsof \ + libsqlite3-0 \ + lsof \ tmux \ watch \ psmisc \ diff --git a/debian/control b/debian/control index a6bffe2be..ce7de6f68 100644 --- a/debian/control +++ b/debian/control @@ -7,6 +7,7 @@ Uploaders: Build-Depends: debhelper-compat (= 13), libedit-dev, + libgc-dev, libkrb5-dev, liblz4-dev, libncurses-dev, diff --git a/src/bin/lib/subcommands.c/runprogram.h b/src/bin/lib/subcommands.c/runprogram.h index 3ef94387a..d8a302f0b 100644 --- a/src/bin/lib/subcommands.c/runprogram.h +++ b/src/bin/lib/subcommands.c/runprogram.h @@ -18,6 +18,9 @@ #include "pqexpbuffer.h" +/* + * Now setup internal constants + */ #define BUFSIZE 1024 #define ARGS_INCREMENT 12 @@ -51,7 +54,7 @@ typedef struct char *stdErr; } Program; -Program run_program(const char *program, ...); +Program *run_program(const char *program, ...); void initialize_program(Program *prog, char **args, bool setsid); void execute_subprogram(Program *prog); void execute_program(Program *prog); @@ -77,51 +80,55 @@ static void waitprogram(Program *prog, pid_t childPid); * the run and then return a Program struct instance with the result of running * the program. */ -Program +Program * run_program(const char *program, ...) { int nb_args = 0; va_list args; const char *param; - Program prog = { 0 }; - - prog.program = strdup(program); - prog.returnCode = -1; - prog.error = 0; - prog.setsid = false; - prog.capture = true; - prog.tty = false; - prog.processBuffer = NULL; - prog.stdOutFd = -1; - prog.stdErrFd = -1; - prog.stdOut = NULL; - prog.stdErr = NULL; - - prog.args = (char **) malloc(ARGS_INCREMENT * sizeof(char *)); - prog.args[nb_args++] = prog.program; + Program *prog = (Program *) malloc(sizeof(Program)); + + if (prog == NULL) + { + return NULL; + } + + prog->program = strdup(program); + prog->returnCode = -1; + prog->error = 0; + prog->setsid = false; + prog->capture = true; + prog->tty = false; + prog->processBuffer = NULL; + prog->stdOutFd = -1; + prog->stdErrFd = -1; + prog->stdOut = NULL; + prog->stdErr = NULL; + + prog->args = (char **) malloc(ARGS_INCREMENT * sizeof(char *)); + prog->args[nb_args++] = prog->program; va_start(args, program); while ((param = va_arg(args, const char *)) != NULL) { if (nb_args % ARGS_INCREMENT == 0) { - char **newargs = (char **) malloc((ARGS_INCREMENT * - (nb_args / ARGS_INCREMENT + 1)) * - sizeof(char *)); - for (int i = 0; i < nb_args; i++) + int newcount = (ARGS_INCREMENT * (nb_args / ARGS_INCREMENT + 1)); + size_t newsize = newcount * sizeof(char *); + + prog->args = (char **) realloc(prog->args, newsize); + + if (prog->args == NULL) { - newargs[i] = prog.args[i]; + return NULL; } - free(prog.args); - - prog.args = newargs; } - prog.args[nb_args++] = strdup(param); + prog->args[nb_args++] = strdup(param); } va_end(args); - prog.args[nb_args] = NULL; + prog->args[nb_args] = NULL; - execute_subprogram(&prog); + execute_subprogram(prog); return prog; } diff --git a/src/bin/pgcopydb/Makefile b/src/bin/pgcopydb/Makefile index 929fe4432..376ef6b9f 100644 --- a/src/bin/pgcopydb/Makefile +++ b/src/bin/pgcopydb/Makefile @@ -80,6 +80,7 @@ LIBS += $(shell $(PG_CONFIG) --libs) LIBS += -lpq LIBS += -lncurses LIBS += -lsqlite3 +LIBS += -lgc all: $(PGCOPYDB) ; diff --git a/src/bin/pgcopydb/catalog.c b/src/bin/pgcopydb/catalog.c index 6f205618f..f4ef9622b 100644 --- a/src/bin/pgcopydb/catalog.c +++ b/src/bin/pgcopydb/catalog.c @@ -571,7 +571,6 @@ catalog_register_setup_from_specs(CopyDataSpec *copySpecs) { /* errors have already been logged */ json_free_serialized_string(json); - json_value_free(jsFilters); return false; } } @@ -688,7 +687,6 @@ catalog_register_setup_from_specs(CopyDataSpec *copySpecs) } json_free_serialized_string(json); - json_value_free(jsFilters); return true; } @@ -2504,7 +2502,6 @@ catalog_iter_s_table(DatabaseCatalog *catalog, if (!catalog_iter_s_table_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2513,7 +2510,6 @@ catalog_iter_s_table(DatabaseCatalog *catalog, if (!catalog_iter_s_table_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2524,7 +2520,6 @@ catalog_iter_s_table(DatabaseCatalog *catalog, if (!catalog_iter_s_table_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2540,7 +2535,6 @@ catalog_iter_s_table(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -2563,7 +2557,6 @@ catalog_iter_s_table_nopk(DatabaseCatalog *catalog, if (!catalog_iter_s_table_nopk_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2572,7 +2565,6 @@ catalog_iter_s_table_nopk(DatabaseCatalog *catalog, if (!catalog_iter_s_table_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2583,7 +2575,6 @@ catalog_iter_s_table_nopk(DatabaseCatalog *catalog, if (!catalog_iter_s_table_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2599,7 +2590,6 @@ catalog_iter_s_table_nopk(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -2756,7 +2746,6 @@ catalog_iter_s_table_next(SourceTableIterator *iter) if (rc == SQLITE_DONE) { - free(iter->table); iter->table = NULL; return true; @@ -2888,12 +2877,6 @@ catalog_iter_s_table_finish(SourceTableIterator *iter) { SQLiteQuery *query = &(iter->query); - /* in case we finish before reaching the DONE step */ - if (iter->table != NULL) - { - free(iter->table); - } - if (!catalog_sql_finalize(query)) { /* errors have already been logged */ @@ -2923,7 +2906,6 @@ catalog_iter_s_table_parts(DatabaseCatalog *catalog, if (!catalog_iter_s_table_part_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2932,7 +2914,6 @@ catalog_iter_s_table_parts(DatabaseCatalog *catalog, if (!catalog_iter_s_table_part_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2943,7 +2924,6 @@ catalog_iter_s_table_parts(DatabaseCatalog *catalog, if (!catalog_iter_s_table_part_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2959,7 +2939,6 @@ catalog_iter_s_table_parts(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -3031,7 +3010,6 @@ catalog_iter_s_table_part_next(SourceTablePartsIterator *iter) if (rc == SQLITE_DONE) { - free(iter->part); iter->part = NULL; return true; @@ -3079,12 +3057,6 @@ catalog_iter_s_table_part_finish(SourceTablePartsIterator *iter) { SQLiteQuery *query = &(iter->query); - /* in case we finish before reaching the DONE step */ - if (iter->part != NULL) - { - free(iter->part); - } - if (!catalog_sql_finalize(query)) { /* errors have already been logged */ @@ -3118,7 +3090,6 @@ catalog_s_table_fetch_attrs(DatabaseCatalog *catalog, SourceTable *table) if (!catalog_iter_s_table_attrs_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -3127,7 +3098,6 @@ catalog_s_table_fetch_attrs(DatabaseCatalog *catalog, SourceTable *table) if (!catalog_iter_s_table_attrs_next(iter)) { /* errors have already been logged */ - free(iter); return false; } } @@ -3135,11 +3105,9 @@ catalog_s_table_fetch_attrs(DatabaseCatalog *catalog, SourceTable *table) if (!catalog_iter_s_table_attrs_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } - free(iter); return true; } @@ -3791,7 +3759,6 @@ catalog_iter_s_index(DatabaseCatalog *catalog, if (!catalog_iter_s_index_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -3800,7 +3767,6 @@ catalog_iter_s_index(DatabaseCatalog *catalog, if (!catalog_iter_s_index_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -3811,7 +3777,6 @@ catalog_iter_s_index(DatabaseCatalog *catalog, if (!catalog_iter_s_index_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -3827,7 +3792,6 @@ catalog_iter_s_index(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -3859,7 +3823,6 @@ catalog_iter_s_index_table(DatabaseCatalog *catalog, if (!catalog_iter_s_index_table_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -3868,7 +3831,6 @@ catalog_iter_s_index_table(DatabaseCatalog *catalog, if (!catalog_iter_s_index_next(iter)) { /* errors have already been logged */ - free(iter); (void) semaphore_unlock(&(catalog->sema)); return false; } @@ -3880,7 +3842,6 @@ catalog_iter_s_index_table(DatabaseCatalog *catalog, if (!catalog_iter_s_index_finish(iter)) { /* errors have already been logged */ - free(iter); (void) semaphore_unlock(&(catalog->sema)); return false; } @@ -3893,13 +3854,11 @@ catalog_iter_s_index_table(DatabaseCatalog *catalog, { log_error("Failed to iterate over list of indexes, " "see above for details"); - free(iter); (void) semaphore_unlock(&(catalog->sema)); return false; } } - free(iter); (void) semaphore_unlock(&(catalog->sema)); return true; @@ -4029,11 +3988,6 @@ catalog_iter_s_index_next(SourceIndexIterator *iter) if (rc == SQLITE_DONE) { - free(iter->index->indexDef); - free(iter->index->indexColumns); - free(iter->index->constraintDef); - free(iter->index); - iter->index = NULL; return true; @@ -4062,11 +4016,6 @@ catalog_iter_s_index_finish(SourceIndexIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->index != NULL) { - free(iter->index->indexDef); - free(iter->index->indexColumns); - free(iter->index->constraintDef); - free(iter->index); - iter->index = NULL; } @@ -4449,7 +4398,6 @@ catalog_iter_s_seq(DatabaseCatalog *catalog, if (!catalog_iter_s_seq_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -4458,7 +4406,6 @@ catalog_iter_s_seq(DatabaseCatalog *catalog, if (!catalog_iter_s_seq_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -4469,7 +4416,6 @@ catalog_iter_s_seq(DatabaseCatalog *catalog, if (!catalog_iter_s_seq_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -4485,7 +4431,6 @@ catalog_iter_s_seq(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -4548,7 +4493,6 @@ catalog_iter_s_seq_next(SourceSeqIterator *iter) if (rc == SQLITE_DONE) { - free(iter->seq); iter->seq = NULL; return true; @@ -4617,7 +4561,6 @@ catalog_iter_s_seq_finish(SourceSeqIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->seq != NULL) { - free(iter->seq); iter->seq = NULL; } @@ -5080,7 +5023,6 @@ catalog_iter_s_database(DatabaseCatalog *catalog, if (!catalog_iter_s_database_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5089,7 +5031,6 @@ catalog_iter_s_database(DatabaseCatalog *catalog, if (!catalog_iter_s_database_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5100,7 +5041,6 @@ catalog_iter_s_database(DatabaseCatalog *catalog, if (!catalog_iter_s_database_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5116,7 +5056,6 @@ catalog_iter_s_database(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -5178,7 +5117,6 @@ catalog_iter_s_database_next(SourceDatabaseIterator *iter) if (rc == SQLITE_DONE) { - free(iter->dat); iter->dat = NULL; return true; @@ -5235,7 +5173,6 @@ catalog_iter_s_database_finish(SourceDatabaseIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->dat != NULL) { - free(iter->dat); iter->dat = NULL; } @@ -5268,7 +5205,6 @@ catalog_iter_s_database_guc(DatabaseCatalog *catalog, if (!catalog_iter_s_database_guc_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5277,7 +5213,6 @@ catalog_iter_s_database_guc(DatabaseCatalog *catalog, if (!catalog_iter_s_database_guc_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5288,7 +5223,6 @@ catalog_iter_s_database_guc(DatabaseCatalog *catalog, if (!catalog_iter_s_database_guc_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5300,12 +5234,10 @@ catalog_iter_s_database_guc(DatabaseCatalog *catalog, { log_error("Failed to iterate over list of dats, " "see above for details"); - free(iter); return false; } } - free(iter); return true; } @@ -5380,8 +5312,6 @@ catalog_iter_s_database_guc_next(SourcePropertyIterator *iter) if (rc == SQLITE_DONE) { - free(iter->property->setconfig); - free(iter->property); iter->property = NULL; return true; @@ -5460,8 +5390,6 @@ catalog_iter_s_database_guc_finish(SourcePropertyIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->property != NULL) { - free(iter->property->setconfig); - free(iter->property); iter->property = NULL; } @@ -5547,7 +5475,6 @@ catalog_iter_s_coll(DatabaseCatalog *catalog, if (!catalog_iter_s_coll_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5556,7 +5483,6 @@ catalog_iter_s_coll(DatabaseCatalog *catalog, if (!catalog_iter_s_coll_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5567,7 +5493,6 @@ catalog_iter_s_coll(DatabaseCatalog *catalog, if (!catalog_iter_s_coll_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -5583,7 +5508,6 @@ catalog_iter_s_coll(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -5645,8 +5569,6 @@ catalog_iter_s_coll_next(SourceCollationIterator *iter) if (rc == SQLITE_DONE) { - free(iter->coll->desc); - free(iter->coll); iter->coll = NULL; return true; @@ -5720,8 +5642,6 @@ catalog_iter_s_coll_finish(SourceCollationIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->coll != NULL) { - free(iter->coll->desc); - free(iter->coll); iter->coll = NULL; } @@ -6002,7 +5922,6 @@ catalog_iter_s_extension(DatabaseCatalog *catalog, if (!catalog_iter_s_extension_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6011,7 +5930,6 @@ catalog_iter_s_extension(DatabaseCatalog *catalog, if (!catalog_iter_s_extension_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6022,7 +5940,6 @@ catalog_iter_s_extension(DatabaseCatalog *catalog, if (!catalog_iter_s_extension_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6038,7 +5955,6 @@ catalog_iter_s_extension(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -6100,7 +6016,6 @@ catalog_iter_s_extension_next(SourceExtensionIterator *iter) if (rc == SQLITE_DONE) { - free(iter->ext); iter->ext = NULL; return true; @@ -6157,7 +6072,6 @@ catalog_iter_s_extension_finish(SourceExtensionIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->ext != NULL) { - free(iter->ext); iter->ext = NULL; } @@ -6193,7 +6107,6 @@ catalog_s_ext_fetch_extconfig(DatabaseCatalog *catalog, SourceExtension *ext) if (!catalog_iter_s_ext_extconfig_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6202,7 +6115,6 @@ catalog_s_ext_fetch_extconfig(DatabaseCatalog *catalog, SourceExtension *ext) if (!catalog_iter_s_ext_extconfig_next(iter)) { /* errors have already been logged */ - free(iter); return false; } } @@ -6210,11 +6122,9 @@ catalog_s_ext_fetch_extconfig(DatabaseCatalog *catalog, SourceExtension *ext) if (!catalog_iter_s_ext_extconfig_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } - free(iter); return true; } @@ -6593,7 +6503,6 @@ catalog_iter_s_depend(DatabaseCatalog *catalog, if (!catalog_iter_s_depend_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6602,7 +6511,6 @@ catalog_iter_s_depend(DatabaseCatalog *catalog, if (!catalog_iter_s_depend_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6613,7 +6521,6 @@ catalog_iter_s_depend(DatabaseCatalog *catalog, if (!catalog_iter_s_depend_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6629,7 +6536,6 @@ catalog_iter_s_depend(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -6692,7 +6598,6 @@ catalog_iter_s_depend_next(SourceDependIterator *iter) if (rc == SQLITE_DONE) { - free(iter->dep); iter->dep = NULL; return true; @@ -6769,7 +6674,6 @@ catalog_iter_s_depend_finish(SourceDependIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->dep != NULL) { - free(iter->dep); iter->dep = NULL; } @@ -6927,7 +6831,6 @@ catalog_iter_s_table_in_copy(DatabaseCatalog *catalog, if (!catalog_iter_s_table_in_copy_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6936,7 +6839,6 @@ catalog_iter_s_table_in_copy(DatabaseCatalog *catalog, if (!catalog_iter_s_table_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6947,7 +6849,6 @@ catalog_iter_s_table_in_copy(DatabaseCatalog *catalog, if (!catalog_iter_s_table_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -6963,7 +6864,6 @@ catalog_iter_s_table_in_copy(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -7046,7 +6946,6 @@ catalog_iter_s_index_in_progress(DatabaseCatalog *catalog, if (!catalog_iter_s_index_in_progress_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -7055,7 +6954,6 @@ catalog_iter_s_index_in_progress(DatabaseCatalog *catalog, if (!catalog_iter_s_index_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -7066,7 +6964,6 @@ catalog_iter_s_index_in_progress(DatabaseCatalog *catalog, if (!catalog_iter_s_index_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -7082,7 +6979,6 @@ catalog_iter_s_index_in_progress(DatabaseCatalog *catalog, } } - free(iter); return true; } diff --git a/src/bin/pgcopydb/cli_common.c b/src/bin/pgcopydb/cli_common.c index d7830ccc4..832ce570a 100644 --- a/src/bin/pgcopydb/cli_common.c +++ b/src/bin/pgcopydb/cli_common.c @@ -144,7 +144,6 @@ cli_pprint_json(JSON_Value *js) /* free intermediate memory */ json_free_serialized_string(serialized_string); - json_value_free(js); } @@ -694,7 +693,7 @@ cli_read_one_line(const char *filename, /* make sure to use only the first line of the file, without \n */ LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, contents, true)) + if (!splitLines(&lbuf, contents)) { /* errors have already been logged */ return false; @@ -703,7 +702,6 @@ cli_read_one_line(const char *filename, if (lbuf.count != 1) { log_error("Failed to parse %s file \"%s\"", name, filename); - FreeLinesBuffer(&lbuf); return false; } @@ -715,15 +713,12 @@ cli_read_one_line(const char *filename, lbuf.lines[0], (long long) strlen(lbuf.lines[0]) + 1, (long long) size); - FreeLinesBuffer(&lbuf); return false; } /* publish the one line to the snapshot variable */ strlcpy(target, lbuf.lines[0], size); - FreeLinesBuffer(&lbuf); - return true; } diff --git a/src/bin/pgcopydb/cli_compare.c b/src/bin/pgcopydb/cli_compare.c index 64f3c8ebe..03f0e4629 100644 --- a/src/bin/pgcopydb/cli_compare.c +++ b/src/bin/pgcopydb/cli_compare.c @@ -392,7 +392,6 @@ cli_compare_data(int argc, char **argv) fformat(stdout, "%s\n", serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } else { diff --git a/src/bin/pgcopydb/cli_list.c b/src/bin/pgcopydb/cli_list.c index bd6ff66b6..98b5f9370 100644 --- a/src/bin/pgcopydb/cli_list.c +++ b/src/bin/pgcopydb/cli_list.c @@ -718,7 +718,6 @@ cli_list_extensions(int argc, char **argv) fformat(stdout, "%s\n", serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } else { @@ -887,7 +886,6 @@ cli_list_extension_versions(int argc, char **argv) fformat(stdout, "%s\n", serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } else { @@ -971,7 +969,6 @@ cli_list_extension_requirements(int argc, char **argv) fformat(stdout, "%s\n", serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } else { @@ -1829,7 +1826,6 @@ cli_list_schema(int argc, char **argv) } fformat(stdout, "%s\n", json); - free(json); } } @@ -1929,7 +1925,6 @@ cli_list_progress(int argc, char **argv) fformat(stdout, "%s\n", serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } else { diff --git a/src/bin/pgcopydb/cli_restore.c b/src/bin/pgcopydb/cli_restore.c index 1fa1d4ed7..1010a7b2b 100644 --- a/src/bin/pgcopydb/cli_restore.c +++ b/src/bin/pgcopydb/cli_restore.c @@ -585,8 +585,6 @@ cli_restore_schema_parse_list(int argc, char **argv) item->restoreListName ? item->restoreListName : ""); } - FreeArchiveContentArray(&contents); - exit(EXIT_CODE_QUIT); } diff --git a/src/bin/pgcopydb/cli_sentinel.c b/src/bin/pgcopydb/cli_sentinel.c index d58f527b3..ac97875f7 100644 --- a/src/bin/pgcopydb/cli_sentinel.c +++ b/src/bin/pgcopydb/cli_sentinel.c @@ -642,7 +642,6 @@ cli_sentinel_get(int argc, char **argv) fformat(stdout, "%s\n", serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } else { diff --git a/src/bin/pgcopydb/compare.c b/src/bin/pgcopydb/compare.c index 97c15e618..c0e82d498 100644 --- a/src/bin/pgcopydb/compare.c +++ b/src/bin/pgcopydb/compare.c @@ -359,7 +359,6 @@ compare_data_by_table_oid(CopyDataSpec *copySpecs, uint32_t oid) log_error("Failed to lookup for table %u in our internal catalogs", oid); - free(table); return false; } @@ -368,7 +367,6 @@ compare_data_by_table_oid(CopyDataSpec *copySpecs, uint32_t oid) log_error("Failed to find table with oid %u in our internal catalogs", oid); - free(table); return false; } @@ -558,7 +556,6 @@ compare_schemas(CopyDataSpec *copySpecs) /* * Now prepare two specifications with only the source uri. * - * We don't free() any memory here as the two CopyDataSpecs copies are * going to share pointers to memory allocated in the main copySpecs * instance. */ @@ -659,7 +656,6 @@ compare_schemas_table_hook(void *ctx, SourceTable *sourceTable) sourceTable->nspname, sourceTable->relname); - free(targetTable); return false; } @@ -710,7 +706,6 @@ compare_schemas_table_hook(void *ctx, SourceTable *sourceTable) } } - free(targetTable); log_notice("Matched table %s with %d columns ok", sourceTable->qname, @@ -746,7 +741,6 @@ compare_schemas_index_hook(void *ctx, SourceIndex *sourceIndex) sourceIndex->indexNamespace, sourceIndex->indexRelname); - free(targetIndex); return false; } @@ -839,7 +833,6 @@ compare_schemas_index_hook(void *ctx, SourceIndex *sourceIndex) targetIndex->constraintDef); } - free(targetIndex); log_notice("Matched index %s ok (%s, %s)", sourceIndex->indexQname, @@ -877,7 +870,6 @@ compare_schemas_seq_hook(void *ctx, SourceSequence *sourceSeq) sourceSeq->nspname, sourceSeq->relname); - free(targetSeq); return false; } @@ -910,7 +902,6 @@ compare_schemas_seq_hook(void *ctx, SourceSequence *sourceSeq) sourceSeq->qname, (long long) sourceSeq->lastValue); - free(targetSeq); return true; } @@ -1075,7 +1066,6 @@ compare_write_checksum(SourceTable *table, const char *filename) bool success = write_file(serialized_string, len, filename); json_free_serialized_string(serialized_string); - json_value_free(js); if (!success) { @@ -1109,7 +1099,6 @@ compare_read_checksum(SourceTable *table, const char *filename) table->oid, table->qname, filename); - json_value_free(json); return false; } diff --git a/src/bin/pgcopydb/copydb.h b/src/bin/pgcopydb/copydb.h index f3aed632c..c7cab1f73 100644 --- a/src/bin/pgcopydb/copydb.h +++ b/src/bin/pgcopydb/copydb.h @@ -306,8 +306,6 @@ bool copydb_init_table_specs(CopyTableDataSpec *tableSpecs, SourceTable *source, int partNumber); -void FreeCopyTableDataSpec(CopyTableDataSpec *tableSpecs); - bool copydb_export_snapshot(TransactionSnapshot *snapshot); bool copydb_fatal_exit(void); diff --git a/src/bin/pgcopydb/defaults.h b/src/bin/pgcopydb/defaults.h index 196a23fb4..9a63dfa28 100644 --- a/src/bin/pgcopydb/defaults.h +++ b/src/bin/pgcopydb/defaults.h @@ -6,6 +6,26 @@ #ifndef DEFAULTS_H #define DEFAULTS_H +/* + * Setup Boehm-Demers-Weiser conservative garbage collector as a garbage + * collecting replacement for malloc. See https://www.hboehm.info/gc/ + */ +#include + +#define malloc(n) GC_malloc(n) +#define calloc(m, n) GC_malloc((m) * (n)) +#define free(p) GC_free(p) +#define realloc(p, n) GC_realloc((p), (n)) + +/* + * The GC API can also be used as leak detector, thanks to using the following + * macro, as per https://www.hboehm.info/gc/leak.html + */ +#define CHECK_LEAKS() GC_gcollect() + +/* + * Now install the version string. + */ #include "git-version.h" /* additional version information for printing version on CLI */ diff --git a/src/bin/pgcopydb/dump_restore.c b/src/bin/pgcopydb/dump_restore.c index 35cc1c8ba..b94c0244a 100644 --- a/src/bin/pgcopydb/dump_restore.c +++ b/src/bin/pgcopydb/dump_restore.c @@ -281,8 +281,8 @@ copydb_copy_database_properties_hook(void *ctx, SourceProperty *property) PGSQL *dst = context->dst; PGconn *conn = dst->connection; - const char *t_dbname = specs->connStrings.safeTargetPGURI.uriParams.dbname; - const char *t_escaped_dbname = pgsql_escape_identifier(dst, t_dbname); + char *t_dbname = specs->connStrings.safeTargetPGURI.uriParams.dbname; + char *t_escaped_dbname = pgsql_escape_identifier(dst, t_dbname); if (t_escaped_dbname == NULL) { @@ -308,7 +308,6 @@ copydb_copy_database_properties_hook(void *ctx, SourceProperty *property) if (!catalog_lookup_s_role_by_name(targetDB, property->rolname, role)) { /* errors have already been logged */ - free(role); return false; } @@ -333,7 +332,6 @@ copydb_copy_database_properties_hook(void *ctx, SourceProperty *property) if (!pgsql_execute(dst, command->data)) { /* errors have already been logged */ - free(role); return false; } @@ -345,8 +343,6 @@ copydb_copy_database_properties_hook(void *ctx, SourceProperty *property) "does not exists on the target database", property->rolname); } - - free(role); } /* @@ -602,7 +598,6 @@ copydb_write_restore_list(CopyDataSpec *specs, PostgresDumpSection section) &contents)) { /* errors have already been logged */ - FreeArchiveContentArray(&contents); return false; } @@ -717,7 +712,6 @@ copydb_write_restore_list(CopyDataSpec *specs, PostgresDumpSection section) { log_error("Failed to create pg_restore list file: out of memory"); destroyPQExpBuffer(listContents); - FreeArchiveContentArray(&contents); return false; } @@ -727,12 +721,10 @@ copydb_write_restore_list(CopyDataSpec *specs, PostgresDumpSection section) { /* errors have already been logged */ destroyPQExpBuffer(listContents); - FreeArchiveContentArray(&contents); return false; } destroyPQExpBuffer(listContents); - FreeArchiveContentArray(&contents); return true; } diff --git a/src/bin/pgcopydb/extensions.c b/src/bin/pgcopydb/extensions.c index 2f0ee619b..ecb5d4bbe 100644 --- a/src/bin/pgcopydb/extensions.c +++ b/src/bin/pgcopydb/extensions.c @@ -372,7 +372,6 @@ copydb_parse_extensions_requirements(CopyDataSpec *copySpecs, char *filename) return false; } - json_value_free(schema); JSON_Array *jsReqArray = json_value_get_array(json); size_t count = json_array_get_count(jsReqArray); diff --git a/src/bin/pgcopydb/file_utils.c b/src/bin/pgcopydb/file_utils.c index 5529fecad..4af5592cf 100644 --- a/src/bin/pgcopydb/file_utils.c +++ b/src/bin/pgcopydb/file_utils.c @@ -356,14 +356,12 @@ read_file_internal(FILE *fileStream, { log_error("Failed to read file \"%s\": %m", filePath); fclose(fileStream); - free(data); return false; } if (fclose(fileStream) == EOF) { log_error("Failed to read file \"%s\"", filePath); - free(data); return false; } @@ -530,12 +528,10 @@ read_from_stream(FILE *stream, ReadFromStreamContext *context) if (bytes == -1) { log_error("Failed to read from input stream: %m"); - free(buf); return false; } else if (bytes == 0) { - free(buf); doneReading = true; continue; } @@ -547,7 +543,7 @@ read_from_stream(FILE *stream, ReadFromStreamContext *context) bool partialRead = buf[bytes - 1] != '\n'; LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, buf, true)) + if (!splitLines(&lbuf, buf)) { /* errors have already been logged */ return false; @@ -669,7 +665,6 @@ read_from_stream(FILE *stream, ReadFromStreamContext *context) if (!(*context->callback)(context->ctx, line, &stop)) { - FreeLinesBuffer(&lbuf); destroyPQExpBuffer(multiPartBuffer); return false; } @@ -689,8 +684,6 @@ read_from_stream(FILE *stream, ReadFromStreamContext *context) } } } - - FreeLinesBuffer(&lbuf); } /* doneReading might have been set from the user callback already */ @@ -793,7 +786,6 @@ duplicate_file(char *sourcePath, char *destinationPath) bool foundError = !write_file(fileContents, fileSize, destinationPath); - free(fileContents); if (foundError) { diff --git a/src/bin/pgcopydb/filtering.c b/src/bin/pgcopydb/filtering.c index 63288d917..9c6b5e82a 100644 --- a/src/bin/pgcopydb/filtering.c +++ b/src/bin/pgcopydb/filtering.c @@ -137,7 +137,6 @@ parse_filters(const char *filename, SourceFilters *filters) } ini_t *ini = ini_load(fileContents, NULL); - free(fileContents); /* * The index in the sections array matches the SourceFilterSection enum diff --git a/src/bin/pgcopydb/indexes.c b/src/bin/pgcopydb/indexes.c index a3fad4e73..9a789fb76 100644 --- a/src/bin/pgcopydb/indexes.c +++ b/src/bin/pgcopydb/indexes.c @@ -339,16 +339,12 @@ copydb_create_index_by_oid(CopyDataSpec *specs, PGSQL *dst, uint32_t indexOid) if (!catalog_lookup_s_index(sourceDB, indexOid, index)) { log_error("Failed to lookup index %u in our catalogs", indexOid); - free(index); - free(table); return false; } if (!catalog_lookup_s_table(sourceDB, index->tableOid, 0, table)) { log_error("Failed to lookup table %u in our catalogs", index->tableOid); - free(index); - free(table); return false; } @@ -390,8 +386,6 @@ copydb_create_index_by_oid(CopyDataSpec *specs, PGSQL *dst, uint32_t indexOid) if (!copydb_create_index(specs, dst, index, ifNotExists)) { /* errors have already been logged */ - free(index); - free(table); return false; } @@ -410,8 +404,6 @@ copydb_create_index_by_oid(CopyDataSpec *specs, PGSQL *dst, uint32_t indexOid) &constraintsAreBeingBuilt)) { /* errors have already been logged */ - free(index); - free(table); return false; } @@ -428,8 +420,6 @@ copydb_create_index_by_oid(CopyDataSpec *specs, PGSQL *dst, uint32_t indexOid) { log_error("Failed to create constraints for table %s", table->qname); - free(index); - free(table); return false; } @@ -440,15 +430,11 @@ copydb_create_index_by_oid(CopyDataSpec *specs, PGSQL *dst, uint32_t indexOid) log_error("Failed to queue VACUUM ANALYZE %s [%u]", table->qname, table->oid); - free(index); - free(table); return false; } } } - free(index); - free(table); return true; } @@ -1060,7 +1046,6 @@ copydb_create_constraints(CopyDataSpec *specs, PGSQL *dst, SourceTable *table) { log_error("Failed to count indexes for table %s in our target catalog", targetTable->qname); - free(targetTable); return false; } @@ -1083,7 +1068,6 @@ copydb_create_constraints(CopyDataSpec *specs, PGSQL *dst, SourceTable *table) table->qname); } - free(targetTable); /* * Now iterate over the source database catalog list of indexes attached to diff --git a/src/bin/pgcopydb/ld_apply.c b/src/bin/pgcopydb/ld_apply.c index 9eade42cf..5bc913f06 100644 --- a/src/bin/pgcopydb/ld_apply.c +++ b/src/bin/pgcopydb/ld_apply.c @@ -448,7 +448,7 @@ stream_apply_file(StreamApplyContext *context) return false; } - if (!splitLines(&(content.lbuf), contents, true)) + if (!splitLines(&(content.lbuf), contents)) { /* errors have already been logged */ return false; @@ -484,8 +484,6 @@ stream_apply_file(StreamApplyContext *context) if (!parseSQLAction(sql, metadata)) { /* errors have already been logged */ - FreeLinesBuffer(&(content.lbuf)); - return false; } @@ -502,8 +500,6 @@ stream_apply_file(StreamApplyContext *context) (long long) i + 1, (long long) content.lbuf.count); - FreeLinesBuffer(&(content.lbuf)); - return false; } @@ -528,15 +524,10 @@ stream_apply_file(StreamApplyContext *context) "see above for details", content.filename); - FreeLinesBuffer(&(content.lbuf)); - return false; } } - /* free dynamic memory that's not needed anymore */ - FreeLinesBuffer(&(content.lbuf)); - /* * Each time we are done applying a file, we update our progress and * fetch new values from the pgcopydb sentinel. Errors are warning @@ -1117,12 +1108,8 @@ stream_apply_sql(StreamApplyContext *context, /* errors have already been logged */ return false; } - - free(paramValues); } - free(metadata->jsonBuffer); - json_value_free(js); break; } @@ -1443,11 +1430,9 @@ parseSQLAction(const char *query, LogicalMessageMetadata *metadata) if (!parseMessageMetadata(metadata, message, json, true)) { /* errors have already been logged */ - json_value_free(json); return false; } - json_value_free(json); return true; } @@ -1656,7 +1641,6 @@ stream_apply_find_durable_lsn(StreamApplyContext *context, uint64_t *durableLSN) while (tail != NULL) { LSNTracking *previous = tail->previous; - free(tail); tail = previous; } @@ -1746,12 +1730,9 @@ parseTxnMetadataFile(const char *filename, LogicalMessageMetadata *metadata) if (!parseMessageMetadata(metadata, txnMetadataContent, json, true)) { /* errors have already been logged */ - json_value_free(json); return false; } - json_value_free(json); - free(txnMetadataContent); if (metadata->txnCommitLSN == InvalidXLogRecPtr || metadata->xid != xid || diff --git a/src/bin/pgcopydb/ld_stream.c b/src/bin/pgcopydb/ld_stream.c index 046d0ea32..8f47f9cf3 100644 --- a/src/bin/pgcopydb/ld_stream.c +++ b/src/bin/pgcopydb/ld_stream.c @@ -658,8 +658,6 @@ streamCheckResumePosition(StreamSpecs *specs) log_notice("Resume replication from latest message: %s", latestMessage); } - FreeLinesBuffer(&(latestStreamedContent.lbuf)); - PGSQL src = { 0 }; if (!pgsql_init(&src, specs->connStrings->source_pguri, PGSQL_CONN_SOURCE)) @@ -863,7 +861,6 @@ stream_write_json(LogicalStreamContext *context, bool previous) } destroyPQExpBuffer(buffer); - free(metadata->jsonBuffer); /* * Maintain the transaction progress based on the BEGIN and COMMIT messages @@ -1941,7 +1938,7 @@ stream_read_file(StreamContent *content) return false; } - if (!splitLines(&(content->lbuf), contents, true)) + if (!splitLines(&(content->lbuf), contents)) { /* errors have already been logged */ return false; @@ -1976,11 +1973,8 @@ stream_read_file(StreamContent *content) if (!parseMessageMetadata(metadata, message, json, false)) { /* errors have already been logged */ - json_value_free(json); return false; } - - json_value_free(json); } return true; @@ -2143,11 +2137,9 @@ buildReplicationURI(const char *pguri, char **repl_pguri) if (!buildPostgresURIfromPieces(¶ms, repl_pguri)) { log_error("Failed to produce the replication connection string"); - freeURIParams(¶ms); return false; } - freeURIParams(¶ms); return true; } @@ -2760,33 +2752,21 @@ stream_read_context(CDCPaths *paths, if (!stringToUInt(wal_segment_size, WalSegSz)) { /* errors have already been logged */ - free(wal_segment_size); - free(tli); - free(history); return false; } if (!stringToUInt(tli, &(system->timeline))) { /* errors have already been logged */ - free(wal_segment_size); - free(tli); - free(history); return false; } if (!parseTimeLineHistory(paths->tlihistfile, history, system)) { /* errors have already been logged */ - free(wal_segment_size); - free(tli); - free(history); return false; } - free(wal_segment_size); - free(tli); - free(history); return true; } diff --git a/src/bin/pgcopydb/ld_stream.h b/src/bin/pgcopydb/ld_stream.h index b0baf6911..277094102 100644 --- a/src/bin/pgcopydb/ld_stream.h +++ b/src/bin/pgcopydb/ld_stream.h @@ -624,11 +624,6 @@ bool parseMessage(StreamContext *privateContext, char *message, JSON_Value *json bool streamLogicalTransactionAppendStatement(LogicalTransaction *txn, LogicalTransactionStatement *stmt); -void FreeLogicalMessage(LogicalMessage *msg); -void FreeLogicalTransaction(LogicalTransaction *tx); -void FreeLogicalMessageTupleArray(LogicalMessageTupleArray *tupleArray); -void FreeLogicalMessageRelation(LogicalMessageRelation *table); -void FreeLogicalMessageTuple(LogicalMessageTuple *tuple); bool AllocateLogicalMessageTuple(LogicalMessageTuple *tuple, int count); /* ld_test_decoding.c */ diff --git a/src/bin/pgcopydb/ld_test_decoding.c b/src/bin/pgcopydb/ld_test_decoding.c index 48627f401..79dc919fb 100644 --- a/src/bin/pgcopydb/ld_test_decoding.c +++ b/src/bin/pgcopydb/ld_test_decoding.c @@ -110,7 +110,6 @@ prepareTestDecodingMessage(LogicalStreamContext *context) return false; } - json_value_free(js); json_free_serialized_string(jsonstr); return true; @@ -627,21 +626,6 @@ SetColumnNamesAndValues(LogicalMessageTuple *tuple, TestDecodingHeader *header) return false; } - /* - * Free the TestDecodingColumns memory that we allocated: only the - * structure itself, the rest is just a bunch of pointers to parts of the - * messages. - */ - TestDecodingColumns *c = cols; - - for (; c != NULL;) - { - TestDecodingColumns *next = c->next; - - free(c); - c = next; - } - return true; } @@ -984,7 +968,6 @@ prepareUpdateTuppleArrays(StreamContext *privateContext, table)) { /* errors have already been logged */ - free(table); return false; } @@ -1121,7 +1104,5 @@ prepareUpdateTuppleArrays(StreamContext *privateContext, cols->values.array[0].array[c].val.str = NULL; } - (void) FreeLogicalMessageTuple(cols); - return true; } diff --git a/src/bin/pgcopydb/ld_transform.c b/src/bin/pgcopydb/ld_transform.c index b64aaa13d..8b40cf65b 100644 --- a/src/bin/pgcopydb/ld_transform.c +++ b/src/bin/pgcopydb/ld_transform.c @@ -456,8 +456,6 @@ stream_transform_write_message(StreamContext *privateContext, return false; } - (void) FreeLogicalMessage(currentMsg); - if (metadata->action == STREAM_ACTION_COMMIT || metadata->action == STREAM_ACTION_ROLLBACK) { @@ -515,7 +513,6 @@ stream_transform_message(StreamContext *privateContext, char *message) if (!parseMessageMetadata(metadata, message, json, false)) { /* errors have already been logged */ - json_value_free(json); return false; } @@ -524,11 +521,9 @@ stream_transform_message(StreamContext *privateContext, char *message) log_error("Failed to parse JSON message: %.1024s%s", message, strlen(message) > 1024 ? "..." : ""); - json_value_free(json); return false; } - json_value_free(json); return true; } @@ -907,7 +902,7 @@ stream_transform_file(StreamSpecs *specs, char *jsonfilename, char *sqlfilename) return false; } - if (!splitLines(&(content.lbuf), contents, true)) + if (!splitLines(&(content.lbuf), contents)) { /* errors have already been logged */ return false; @@ -941,7 +936,6 @@ stream_transform_file(StreamSpecs *specs, char *jsonfilename, char *sqlfilename) if (privateContext->sqlFile == NULL) { log_error("Failed to open file \"%s\"", tempfilename); - FreeLinesBuffer(&(content.lbuf)); return false; } @@ -970,8 +964,6 @@ stream_transform_file(StreamSpecs *specs, char *jsonfilename, char *sqlfilename) if (!parseMessageMetadata(metadata, message, json, false)) { /* errors have already been logged */ - json_value_free(json); - FreeLinesBuffer(&(content.lbuf)); return false; } @@ -1003,12 +995,9 @@ stream_transform_file(StreamSpecs *specs, char *jsonfilename, char *sqlfilename) if (!parseMessage(privateContext, message, json)) { log_error("Failed to parse JSON message: %s", message); - json_value_free(json); - FreeLinesBuffer(&(content.lbuf)); return false; } - json_value_free(json); /* * Prepare a new message when we just read the COMMIT message of an @@ -1020,7 +1009,6 @@ stream_transform_file(StreamSpecs *specs, char *jsonfilename, char *sqlfilename) { log_error("Failed to transform and flush the current message, " "see above for details"); - FreeLinesBuffer(&(content.lbuf)); return false; } @@ -1034,8 +1022,6 @@ stream_transform_file(StreamSpecs *specs, char *jsonfilename, char *sqlfilename) } } - FreeLinesBuffer(&(content.lbuf)); - if (fclose(privateContext->sqlFile) == EOF) { log_error("Failed to close file \"%s\"", tempfilename); @@ -1244,7 +1230,6 @@ parseMessage(StreamContext *privateContext, char *message, JSON_Value *json) /* copy the stmt over, then free the extra allocated memory */ mesg->action = metadata->action; mesg->command.switchwal = stmt->stmt.switchwal; - free(stmt); } break; @@ -1268,7 +1253,6 @@ parseMessage(StreamContext *privateContext, char *message, JSON_Value *json) /* copy the stmt over, then free the extra allocated memory */ mesg->action = metadata->action; mesg->command.keepalive = stmt->stmt.keepalive; - free(stmt); } break; @@ -1287,7 +1271,6 @@ parseMessage(StreamContext *privateContext, char *message, JSON_Value *json) /* copy the stmt over, then free the extra allocated memory */ mesg->action = metadata->action; mesg->command.endpos = stmt->stmt.endpos; - free(stmt); } break; @@ -1421,10 +1404,6 @@ coalesceLogicalTransactionStatement(LogicalTransaction *txn, lastValuesArray->array[lastValuesArray->count++] = newValuesArray->array[0]; newValuesArray->count = 0; - /* Deallocate the tuple and the new statement */ - FreeLogicalMessageTupleArray(&(new->stmt.insert.new)); - free(new); - return true; } @@ -1565,151 +1544,6 @@ streamLogicalTransactionAppendStatement(LogicalTransaction *txn, } -/* - * FreeLogicalMessage frees the malloc'ated memory areas of a LogicalMessage. - */ -void -FreeLogicalMessage(LogicalMessage *msg) -{ - if (msg->isTransaction) - { - FreeLogicalTransaction(&(msg->command.tx)); - } -} - - -/* - * FreeLogicalTransaction frees the malloc'ated memory areas of a - * LogicalTransaction. - */ -void -FreeLogicalTransaction(LogicalTransaction *tx) -{ - LogicalTransactionStatement *currentStmt = tx->first; - - for (; currentStmt != NULL;) - { - switch (currentStmt->action) - { - case STREAM_ACTION_INSERT: - { - FreeLogicalMessageRelation(&(currentStmt->stmt.insert.table)); - FreeLogicalMessageTupleArray(&(currentStmt->stmt.insert.new)); - break; - } - - case STREAM_ACTION_UPDATE: - { - FreeLogicalMessageRelation(&(currentStmt->stmt.update.table)); - FreeLogicalMessageTupleArray(&(currentStmt->stmt.update.old)); - FreeLogicalMessageTupleArray(&(currentStmt->stmt.update.new)); - break; - } - - case STREAM_ACTION_DELETE: - { - FreeLogicalMessageRelation(&(currentStmt->stmt.delete.table)); - FreeLogicalMessageTupleArray(&(currentStmt->stmt.delete.old)); - break; - } - - case STREAM_ACTION_TRUNCATE: - { - FreeLogicalMessageRelation(&(currentStmt->stmt.truncate.table)); - break; - } - - /* no malloc'ated area in a BEGIN, COMMIT, or TRUNCATE statement */ - default: - { - break; - } - } - - LogicalTransactionStatement *stmt = currentStmt; - currentStmt = currentStmt->next; - - free(stmt); - } - - tx->first = NULL; -} - - -/* - * FreeLogicalMessageRelation frees the malloc'ated memory areas of - * LogicalMessageRelation. - */ -void -FreeLogicalMessageRelation(LogicalMessageRelation *table) -{ - if (table->pqMemory) - { - /* use PQfreemem for memory allocated by PQescapeIdentifer */ - PQfreemem(table->nspname); - PQfreemem(table->relname); - } - else - { - free(table->nspname); - free(table->relname); - } -} - - -/* - * FreeLogicalMessageTupleArray frees the malloc'ated memory areas of a - * LogicalMessageTupleArray. - */ -void -FreeLogicalMessageTupleArray(LogicalMessageTupleArray *tupleArray) -{ - for (int s = 0; s < tupleArray->count; s++) - { - LogicalMessageTuple *tuple = &(tupleArray->array[s]); - - (void) FreeLogicalMessageTuple(tuple); - } - - free(tupleArray->array); -} - - -/* - * FreeLogicalMessageTuple frees the malloc'ated memory areas of a - * LogicalMessageTuple. - */ -void -FreeLogicalMessageTuple(LogicalMessageTuple *tuple) -{ - for (int i = 0; i < tuple->cols; i++) - { - free(tuple->columns[i]); - } - free(tuple->columns); - - for (int r = 0; r < tuple->values.count; r++) - { - LogicalMessageValues *values = &(tuple->values.array[r]); - - for (int v = 0; v < values->cols; v++) - { - LogicalMessageValue *value = &(values->array[v]); - - if ((value->oid == TEXTOID || value->oid == BYTEAOID) && - !value->isNull) - { - free(value->val.str); - } - } - - free(values->array); - } - - free(tuple->values.array); -} - - /* * allocateLogicalMessageTuple allocates memory for count columns (and values) * for the given LogicalMessageTuple. @@ -2267,7 +2101,6 @@ stream_write_insert(FILE *out, LogicalMessageInsert *insert) FFORMAT(out, "EXECUTE %x%s;\n", hash, serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } return true; @@ -2457,7 +2290,6 @@ stream_write_update(FILE *out, LogicalMessageUpdate *update) FFORMAT(out, "EXECUTE %x%s;\n", hash, serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } return true; @@ -2543,7 +2375,6 @@ stream_write_delete(FILE *out, LogicalMessageDelete *delete) FFORMAT(out, "EXECUTE %x%s;\n", hash, serialized_string); json_free_serialized_string(serialized_string); - json_value_free(js); } return true; diff --git a/src/bin/pgcopydb/ld_wal2json.c b/src/bin/pgcopydb/ld_wal2json.c index f32849d52..fdee7005b 100644 --- a/src/bin/pgcopydb/ld_wal2json.c +++ b/src/bin/pgcopydb/ld_wal2json.c @@ -101,7 +101,6 @@ parseWal2jsonMessageActionAndXid(LogicalStreamContext *context) metadata->xid = (uint32_t) xid; } - json_value_free(json); return true; } diff --git a/src/bin/pgcopydb/lock_utils.c b/src/bin/pgcopydb/lock_utils.c index 02c048f1f..bc9b822e1 100644 --- a/src/bin/pgcopydb/lock_utils.c +++ b/src/bin/pgcopydb/lock_utils.c @@ -253,7 +253,7 @@ semaphore_cleanup(const char *pidfile) LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, fileContents, true)) + if (!splitLines(&lbuf, fileContents)) { /* errors have already been logged */ return false; @@ -266,19 +266,15 @@ semaphore_cleanup(const char *pidfile) pidfile, (long long) lbuf.count, PIDFILE_LINE_SEM_ID); - FreeLinesBuffer(&lbuf); return false; } if (!stringToInt(lbuf.lines[PIDFILE_LINE_SEM_ID], &(semaphore.semId))) { /* errors have already been logged */ - FreeLinesBuffer(&lbuf); return false; } - FreeLinesBuffer(&lbuf); - log_trace("Read semaphore id %d from stale pidfile", semaphore.semId); return semaphore_unlink(&semaphore); diff --git a/src/bin/pgcopydb/lsn_tracking.c b/src/bin/pgcopydb/lsn_tracking.c index 18b5365f4..a42bac547 100644 --- a/src/bin/pgcopydb/lsn_tracking.c +++ b/src/bin/pgcopydb/lsn_tracking.c @@ -205,7 +205,6 @@ lsn_tracking_iter(DatabaseCatalog *catalog, if (!lsn_tracking_iter_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -214,7 +213,6 @@ lsn_tracking_iter(DatabaseCatalog *catalog, if (!lsn_tracking_iter_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -225,7 +223,6 @@ lsn_tracking_iter(DatabaseCatalog *catalog, if (!lsn_tracking_iter_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -241,7 +238,6 @@ lsn_tracking_iter(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -299,7 +295,6 @@ lsn_tracking_iter_next(LSNTrackingIterator *iter) if (rc == SQLITE_DONE) { - free(iter->lsnTracking); iter->lsnTracking = NULL; return true; diff --git a/src/bin/pgcopydb/main.c b/src/bin/pgcopydb/main.c index ecce186d5..855c1bb7a 100644 --- a/src/bin/pgcopydb/main.c +++ b/src/bin/pgcopydb/main.c @@ -6,12 +6,16 @@ #include #include +#include + #include "postgres.h" #if (PG_VERSION_NUM >= 120000) #include "common/logging.h" #endif +#include "parson.h" + #include "cli_root.h" #include "copydb.h" #include "defaults.h" @@ -59,6 +63,9 @@ main(int argc, char **argv) /* allows changing process title in ps/top/ptree etc */ (void) init_ps_buffer(argc, argv); + /* set memory allocation function for JSON parson lib to use libgc */ + (void) json_set_allocation_functions(GC_malloc, GC_free); + /* set our logging infrastructure */ (void) set_logger(); diff --git a/src/bin/pgcopydb/parsing_utils.c b/src/bin/pgcopydb/parsing_utils.c index 3018c48ad..05f29338b 100644 --- a/src/bin/pgcopydb/parsing_utils.c +++ b/src/bin/pgcopydb/parsing_utils.c @@ -86,7 +86,7 @@ regexp_first_match(const char *string, const char *regex) regoff_t start = m[1].rm_so; regoff_t finish = m[1].rm_eo; int length = finish - start + 1; - char *result = (char *) malloc(length * sizeof(char)); + char *result = (char *) calloc(length, sizeof(char)); if (result == NULL) { @@ -127,11 +127,9 @@ parse_version_number(const char *version_string, if (!parse_pg_version_string(pg_version_string, pg_version)) { /* errors have already been logged */ - free(match); return false; } - free(match); return true; } @@ -710,7 +708,6 @@ buildPostgresBareURIfromPieces(URIParams *uriParams, char **pguri) return false; } appendPQExpBuffer(uri, "%s@", escaped); - free(escaped); } if (uriParams->hostname) @@ -724,7 +721,6 @@ buildPostgresBareURIfromPieces(URIParams *uriParams, char **pguri) return false; } appendPQExpBuffer(uri, "%s", escaped); - free(escaped); } if (uriParams->port) @@ -746,7 +742,6 @@ buildPostgresBareURIfromPieces(URIParams *uriParams, char **pguri) return false; } appendPQExpBuffer(uri, "%s", escaped); - free(escaped); } if (PQExpBufferBroken(uri)) @@ -780,7 +775,6 @@ buildPostgresURIfromPieces(URIParams *uriParams, char **pguri) /* prepare the mandatory part of the Postgres URI */ appendPQExpBufferStr(uri, *pguri); - free(*pguri); /* now add optional parameters to the Postgres URI */ for (int index = 0; index < uriParams->parameters.count; index++) @@ -812,8 +806,6 @@ buildPostgresURIfromPieces(URIParams *uriParams, char **pguri) index == 0 ? "?" : "&", keyword, escapedValue); - - free(escapedValue); } else { @@ -1095,56 +1087,3 @@ bareConnectionString(const char *pguri, SafeURI *safeURI) return true; } - - -/* - * freeSafeURI frees the dynamic memory allocated for handling the safe URI. - */ -void -freeSafeURI(SafeURI *safeURI) -{ - free(safeURI->pguri); - free(safeURI->password); - freeURIParams(&(safeURI->uriParams)); - - safeURI->pguri = NULL; - safeURI->password = NULL; -} - - -/* - * freeURIParams frees the dynamic memory allocated for handling URI params. - */ -void -freeURIParams(URIParams *params) -{ - free(params->username); - free(params->hostname); - free(params->port); - free(params->dbname); - freeKeyVal(&(params->parameters)); - - params->username = NULL; - params->hostname = NULL; - params->port = NULL; - params->dbname = NULL; -} - - -/* - * freeKeyVal frees the dynamic memory allocated for handling KeyVal parameters - */ -void -freeKeyVal(KeyVal *parameters) -{ - for (int i = 0; i < parameters->count; i++) - { - free(parameters->keywords[i]); - free(parameters->values[i]); - - parameters->keywords[i] = NULL; - parameters->values[i] = NULL; - } - - parameters->count = 0; -} diff --git a/src/bin/pgcopydb/parsing_utils.h b/src/bin/pgcopydb/parsing_utils.h index edfc5041c..67b96d4e2 100644 --- a/src/bin/pgcopydb/parsing_utils.h +++ b/src/bin/pgcopydb/parsing_utils.h @@ -112,8 +112,4 @@ bool parse_and_scrub_connection_string(const char *pguri, SafeURI *safeURI); bool bareConnectionString(const char *pguri, SafeURI *safeURI); -void freeSafeURI(SafeURI *safeURI); -void freeURIParams(URIParams *params); -void freeKeyVal(KeyVal *parameters); - #endif /* PARSING_UTILS_H */ diff --git a/src/bin/pgcopydb/pgcmd.c b/src/bin/pgcopydb/pgcmd.c index a6d78e50f..11805aa8b 100644 --- a/src/bin/pgcopydb/pgcmd.c +++ b/src/bin/pgcopydb/pgcmd.c @@ -28,6 +28,14 @@ #include "signals.h" #include "string_utils.h" +/* + * Because we did include defaults.h previously in the same compilation unit (a + * .c file), then the runprogram.h code is linked to the Garbage-Collector: + * calls to malloc() in there are replaced by calls to GC_malloc() as per + * defaults.h #define instructions. + * + * As a result we never call free_program(). + */ #define RUN_PROGRAM_IMPLEMENTATION #include "runprogram.h" @@ -40,29 +48,32 @@ static bool pg_dump_db_extension_namespace_hook(void *ctx, SourceExtension *ext) bool psql_version(PostgresPaths *pgPaths) { - Program prog = run_program(pgPaths->psql, "--version", NULL); + Program *prog = run_program(pgPaths->psql, "--version", NULL); char pg_version_string[PG_VERSION_STRING_MAX] = { 0 }; int pg_version = 0; - if (prog.returnCode != 0) + if (prog == NULL) { - errno = prog.error; + log_error(ALLOCATION_FAILED_ERROR); + return false; + } + + if (prog->returnCode != 0) + { + errno = prog->error; log_error("Failed to run \"psql --version\" using program \"%s\": %m", pgPaths->psql); - free_program(&prog); return false; } - if (!parse_version_number(prog.stdOut, + if (!parse_version_number(prog->stdOut, pg_version_string, PG_VERSION_STRING_MAX, &pg_version)) { /* errors have already been logged */ - free_program(&prog); return false; } - free_program(&prog); strlcpy(pgPaths->pg_version, pg_version_string, PG_VERSION_STRING_MAX); @@ -193,44 +204,42 @@ set_psql_from_config_bindir(PostgresPaths *pgPaths, const char *pg_config) return false; } - Program prog = run_program(pg_config, "--bindir", NULL); + Program *prog = run_program(pg_config, "--bindir", NULL); + + if (prog == NULL) + { + log_error(ALLOCATION_FAILED_ERROR); + return false; + } - if (prog.returnCode != 0) + if (prog->returnCode != 0) { - errno = prog.error; + errno = prog->error; log_error("Failed to run \"pg_config --bindir\" using program \"%s\": %m", pg_config); - free_program(&prog); return false; } LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, prog.stdOut, false) || lbuf.count != 1) + if (!splitLines(&lbuf, prog->stdOut) || lbuf.count != 1) { log_error("Unable to parse output from pg_config --bindir"); - free_program(&prog); - FreeLinesBuffer(&lbuf); return false; } char *bindir = lbuf.lines[0]; join_path_components(psql, bindir, "psql"); - /* we're now done with the Program and its output */ - free_program(&prog); - if (!file_exists(psql)) { log_error("Failed to find psql at \"%s\" from PG_CONFIG at \"%s\"", pgPaths->psql, pg_config); - FreeLinesBuffer(&lbuf); return false; } strlcpy(pgPaths->psql, psql, sizeof(pgPaths->psql)); - FreeLinesBuffer(&lbuf); return true; } @@ -500,12 +509,6 @@ pg_dump_db(PostgresPaths *pgPaths, (void) initialize_program(&program, args, false); program.processBuffer = &processBufferCallback; - /* free the extension namespace string values */ - for (int i = 0; i < extNamespaceCount; i++) - { - free(extNamespaces[i]); - } - /* log the exact command line we're using */ int commandSize = snprintf_program_command_line(&program, command, BUFSIZE); @@ -531,12 +534,10 @@ pg_dump_db(PostgresPaths *pgPaths, if (program.returnCode != 0) { log_error("Failed to run pg_dump: exit code %d", program.returnCode); - free_program(&program); return false; } - free_program(&program); return true; } @@ -656,12 +657,10 @@ pg_dumpall_roles(PostgresPaths *pgPaths, if (program.returnCode != 0) { log_error("Failed to run pg_dump: exit code %d", program.returnCode); - free_program(&program); return false; } - free_program(&program); return true; } @@ -708,7 +707,7 @@ pg_restore_roles(PostgresPaths *pgPaths, LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, content, true)) + if (!splitLines(&lbuf, content)) { /* errors have already been logged */ return false; @@ -776,7 +775,6 @@ pg_restore_roles(PostgresPaths *pgPaths, { log_error("Failed to parse create role statement \"%s\"", currentLine); - FreeLinesBuffer(&lbuf); return false; } @@ -793,7 +791,6 @@ pg_restore_roles(PostgresPaths *pgPaths, if (!pgsql_role_exists(&pgsql, roleName, &exists)) { /* errors have already been logged */ - FreeLinesBuffer(&lbuf); return false; } @@ -814,7 +811,6 @@ pg_restore_roles(PostgresPaths *pgPaths, if (!pgsql_execute(&pgsql, createRole)) { /* errors have already been logged */ - FreeLinesBuffer(&lbuf); return false; } } @@ -825,14 +821,11 @@ pg_restore_roles(PostgresPaths *pgPaths, if (!pgsql_execute(&pgsql, currentLine)) { /* errors have already been logged */ - FreeLinesBuffer(&lbuf); return false; } } } - FreeLinesBuffer(&lbuf); - if (!pgsql_commit(&pgsql)) { /* errors have already been logged */ @@ -1012,12 +1005,10 @@ pg_restore_db(PostgresPaths *pgPaths, if (program.returnCode != 0) { log_error("Failed to run pg_restore: exit code %d", program.returnCode); - free_program(&program); return false; } - free_program(&program); return true; } @@ -1064,7 +1055,6 @@ pg_restore_list(PostgresPaths *pgPaths, if (program.returnCode != 0) { log_error("Failed to run pg_restore: exit code %d", program.returnCode); - free_program(&program); return false; } @@ -1072,11 +1062,9 @@ pg_restore_list(PostgresPaths *pgPaths, if (!parse_archive_list(listFilename, archive)) { /* errors have already been logged */ - free_program(&program); return false; } - free_program(&program); return true; } @@ -1192,7 +1180,7 @@ parse_archive_list(const char *filename, ArchiveContentArray *contents) LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, buffer, true)) + if (!splitLines(&lbuf, buffer)) { /* errors have already been logged */ return false; @@ -1229,7 +1217,6 @@ parse_archive_list(const char *filename, ArchiveContentArray *contents) "see above for details", (long long) lineNumber, filename); - FreeLinesBuffer(&lbuf); return false; } @@ -1250,8 +1237,6 @@ parse_archive_list(const char *filename, ArchiveContentArray *contents) ++contents->count; } - FreeLinesBuffer(&lbuf); - return true; } @@ -1649,27 +1634,3 @@ parse_archive_acl_or_comment(char *ptr, ArchiveContentItem *item) return true; } - - -/* - * FreeArchiveContentArray frees the memory allocated in the given - * ArchiveContentArray. - */ -bool -FreeArchiveContentArray(ArchiveContentArray *contents) -{ - for (int i = 0; i < contents->count; i++) - { - ArchiveContentItem *item = &(contents->array[i]); - - free(item->description); - free(item->restoreListName); - } - - free(contents->array); - - contents->count = 0; - contents->array = NULL; - - return true; -} diff --git a/src/bin/pgcopydb/pgcmd.h b/src/bin/pgcopydb/pgcmd.h index 5fe837ecf..b7fa3447e 100644 --- a/src/bin/pgcopydb/pgcmd.h +++ b/src/bin/pgcopydb/pgcmd.h @@ -239,6 +239,4 @@ bool parse_archive_acl_or_comment(char *ptr, ArchiveContentItem *item); bool parse_archive_list_entry(ArchiveContentItem *item, const char *line); bool tokenize_archive_list_entry(ArchiveToken *token); -bool FreeArchiveContentArray(ArchiveContentArray *contents); - #endif /* PGCMD_H */ diff --git a/src/bin/pgcopydb/pgsql.c b/src/bin/pgcopydb/pgsql.c index d98daa1a2..940f8d2f4 100644 --- a/src/bin/pgcopydb/pgsql.c +++ b/src/bin/pgcopydb/pgsql.c @@ -413,9 +413,6 @@ pgsql_finish(PGSQL *pgsql) pgsql->pgversion[0] = '\0'; pgsql->pgversion_num = 0; - /* we don't need the print-safe URL anymore */ - freeSafeURI(&(pgsql->safeURI)); - /* * When we fail to connect, on the way out we call pgsql_finish to * reset the connection to NULL. We still want the callers to be able @@ -443,7 +440,7 @@ log_connection_error(PGconn *connection, int logLevel) char *message = PQerrorMessage(connection); LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, message, false)) + if (!splitLines(&lbuf, message)) { /* errors have already been logged */ return; @@ -462,8 +459,6 @@ log_connection_error(PGconn *connection, int logLevel) log_level(logLevel, "%s", line); } } - - FreeLinesBuffer(&lbuf); } @@ -854,7 +849,7 @@ pgAutoCtlDefaultNoticeProcessor(void *arg, const char *message) { LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, (char *) message, false)) + if (!splitLines(&lbuf, (char *) message)) { /* errors have already been logged */ return; @@ -864,8 +859,6 @@ pgAutoCtlDefaultNoticeProcessor(void *arg, const char *message) { log_warn("%s", lbuf.lines[lineNumber]); } - - FreeLinesBuffer(&lbuf); } @@ -878,7 +871,7 @@ pgAutoCtlDebugNoticeProcessor(void *arg, const char *message) { LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, (char *) message, false)) + if (!splitLines(&lbuf, (char *) message)) { /* errors have already been logged */ return; @@ -888,8 +881,6 @@ pgAutoCtlDebugNoticeProcessor(void *arg, const char *message) { log_sql("%s", lbuf.lines[lineNumber]); } - - FreeLinesBuffer(&lbuf); } @@ -1679,7 +1670,7 @@ pgsql_send_with_params(PGSQL *pgsql, const char *sql, int paramCount, LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, message, false)) + if (!splitLines(&lbuf, message)) { /* errors have already been logged */ return false; @@ -1704,7 +1695,6 @@ pgsql_send_with_params(PGSQL *pgsql, const char *sql, int paramCount, } destroyPQExpBuffer(debugParameters); - FreeLinesBuffer(&lbuf); clear_results(pgsql); return false; @@ -2027,7 +2017,7 @@ pgsql_execute_log_error(PGSQL *pgsql, LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, message, false)) + if (!splitLines(&lbuf, message)) { /* errors have already been logged */ return; @@ -2041,8 +2031,6 @@ pgsql_execute_log_error(PGSQL *pgsql, lbuf.lines[lineNumber]); } - FreeLinesBuffer(&lbuf); - if (pgsql->logSQL) { /* when using send/fetch async queries, fetch doesn't have the sql */ @@ -2235,7 +2223,7 @@ clear_results(PGSQL *pgsql) LinesBuffer lbuf = { 0 }; char *pqmessage = PQerrorMessage(connection); - if (!splitLines(&lbuf, pqmessage, false)) + if (!splitLines(&lbuf, pqmessage)) { /* errors have already been logged */ return false; @@ -2248,7 +2236,6 @@ clear_results(PGSQL *pgsql) PQclear(result); pgsql_finish(pgsql); - FreeLinesBuffer(&lbuf); return false; } @@ -2819,7 +2806,7 @@ pgcopy_log_error(PGSQL *pgsql, PGresult *res, const char *context) LinesBuffer lbuf = { 0 }; char *message = PQerrorMessage(pgsql->connection); - if (!splitLines(&lbuf, message, false)) + if (!splitLines(&lbuf, message)) { /* errors have already been logged */ return; @@ -2862,8 +2849,6 @@ pgcopy_log_error(PGSQL *pgsql, PGresult *res, const char *context) PQbackendPID(pgsql->connection), context); - FreeLinesBuffer(&lbuf); - if (res != NULL) { PQclear(res); @@ -3230,7 +3215,7 @@ parseTimeLineHistory(const char *filename, const char *content, { LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, (char *) content, false)) + if (!splitLines(&lbuf, (char *) content)) { /* errors have already been logged */ return false; @@ -3283,7 +3268,6 @@ parseTimeLineHistory(const char *filename, const char *content, { log_error("Failed to parse history file line %lld: \"%s\"", (long long) lineNumber, ptr); - FreeLinesBuffer(&lbuf); return false; } @@ -3292,7 +3276,6 @@ parseTimeLineHistory(const char *filename, const char *content, if (!stringToUInt(lbuf.lines[lineNumber], &(entry->tli))) { log_error("Failed to parse history timeline \"%s\"", tabptr); - FreeLinesBuffer(&lbuf); return false; } @@ -3311,7 +3294,6 @@ parseTimeLineHistory(const char *filename, const char *content, { log_error("Failed to parse history timeline %d LSN \"%s\"", entry->tli, lsn); - FreeLinesBuffer(&lbuf); return false; } @@ -3327,8 +3309,6 @@ parseTimeLineHistory(const char *filename, const char *content, entry = &(system->timelines.history[++system->timelines.count]); } - FreeLinesBuffer(&lbuf); - /* * Create one more entry for the "tip" of the timeline, which has no entry * in the history file. @@ -3556,8 +3536,6 @@ pg_copy_large_object(PGSQL *src, return false; } - (void) free(buffer); - *bytesTransmitted += bytesRead; } while (bytesRead > 0); @@ -4423,7 +4401,7 @@ pgsql_stream_log_error(PGSQL *pgsql, PGresult *res, const char *message) { LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, pqmessage, false)) + if (!splitLines(&lbuf, pqmessage)) { /* errors have already been logged */ return; @@ -4446,8 +4424,6 @@ pgsql_stream_log_error(PGSQL *pgsql, PGresult *res, const char *message) log_error("%s", lbuf.lines[lineNumber]); } } - - FreeLinesBuffer(&lbuf); } if (res != NULL) @@ -4879,7 +4855,6 @@ pgsql_replication_origin_progress(PGSQL *pgsql, context.strVal, nodeName, flush ? "true" : "false"); - free(context.strVal); return false; } @@ -4968,7 +4943,6 @@ pgsql_replication_slot_exists(PGSQL *pgsql, const char *slotName, "confirmed_flush_lsn for slot \"%s\"", context.strVal, slotName); - free(context.strVal); return false; } @@ -5220,12 +5194,9 @@ pgsql_current_wal_flush_lsn(PGSQL *pgsql, uint64_t *lsn) log_error("Failed to parse LSN \"%s\" returned from " "pg_current_wal_flush_lsn()", context.strVal); - free(context.strVal); return false; } - - free(context.strVal); } return true; @@ -5275,12 +5246,9 @@ pgsql_current_wal_insert_lsn(PGSQL *pgsql, uint64_t *lsn) log_error("Failed to parse LSN \"%s\" returned from " "pg_current_wal_insert_lsn()", context.strVal); - free(context.strVal); return false; } - - free(context.strVal); } return true; diff --git a/src/bin/pgcopydb/pidfile.c b/src/bin/pgcopydb/pidfile.c index 7ee5bc8ad..40f4e3d03 100644 --- a/src/bin/pgcopydb/pidfile.c +++ b/src/bin/pgcopydb/pidfile.c @@ -118,7 +118,7 @@ read_pidfile(const char *pidfile, pid_t *pid) LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, fileContents, true) || lbuf.count != 1) + if (!splitLines(&lbuf, fileContents) || lbuf.count != 1) { /* errors have already been logged */ return false; @@ -128,8 +128,6 @@ read_pidfile(const char *pidfile, pid_t *pid) *pid = pidnum; - FreeLinesBuffer(&lbuf); - if (pid <= 0) { log_debug("Read negative pid %d in file \"%s\", removing it", diff --git a/src/bin/pgcopydb/progress.c b/src/bin/pgcopydb/progress.c index 1bc78ebb6..2415206d6 100644 --- a/src/bin/pgcopydb/progress.c +++ b/src/bin/pgcopydb/progress.c @@ -119,7 +119,6 @@ copydb_prepare_schema_json_file(CopyDataSpec *copySpecs) } json_free_serialized_string(serialized_string); - json_value_free(js); return true; } diff --git a/src/bin/pgcopydb/schema.c b/src/bin/pgcopydb/schema.c index 816d739cf..ae956762a 100644 --- a/src/bin/pgcopydb/schema.c +++ b/src/bin/pgcopydb/schema.c @@ -3932,12 +3932,9 @@ getSchemaList(void *ctx, PGresult *result) { /* errors have already been logged */ ++errors; - free(schema); break; } } - - free(schema); } context->parsedOk = errors == 0; @@ -4003,12 +4000,9 @@ getRoleList(void *ctx, PGresult *result) { /* errors have already been logged */ ++errors; - free(role); break; } } - - free(role); } context->parsedOk = errors == 0; @@ -4042,7 +4036,6 @@ getDatabaseList(void *ctx, PGresult *result) if (!parseCurrentDatabase(result, rowNumber, database)) { parsedOk = false; - free(database); break; } @@ -4052,12 +4045,9 @@ getDatabaseList(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(database); break; } } - - free(database); } context->parsedOk = parsedOk; @@ -4159,8 +4149,6 @@ getDatabaseProperties(void *ctx, PGresult *result) if (!parseDatabaseProperty(result, rowNumber, property)) { parsedOk = false; - free(property->setconfig); - free(property); break; } @@ -4170,14 +4158,9 @@ getDatabaseProperties(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(property->setconfig); - free(property); break; } } - - free(property->setconfig); - free(property); } context->parsedOk = parsedOk; @@ -4356,7 +4339,6 @@ getExtensionList(void *ctx, PGresult *result) if (!parseCurrentExtensionConfig(result, rowNumber, config)) { parsedOk = false; - free(config); break; } @@ -4368,16 +4350,12 @@ getExtensionList(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(config); break; } } - - free(config); } } - free(extension); context->parsedOk = parsedOk; } @@ -4567,7 +4545,6 @@ getExtensionsVersions(void *ctx, PGresult *result) /* issue a warning but let's try anyway */ log_warn("BUG? context's array is not null in getExtensionsVersions"); - free(context->evArray->array); context->evArray->array = NULL; } @@ -4729,12 +4706,9 @@ getCollationList(void *ctx, PGresult *result) { /* errors have already been logged */ ++errors; - free(collation); break; } } - - free(collation); } context->parsedOk = errors == 0; @@ -4767,7 +4741,6 @@ getTableArray(void *ctx, PGresult *result) if (!parseCurrentSourceTable(result, rowNumber, table)) { parsedOk = false; - free(table); break; } @@ -4777,12 +4750,9 @@ getTableArray(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(table); break; } } - - free(table); } context->parsedOk = parsedOk; @@ -5008,8 +4978,6 @@ parseCurrentSourceTable(PGresult *result, int rowNumber, SourceTable *table) value); ++errors; } - - json_value_free(json); } log_trace("parseCurrentSourceTable: %s.%s", table->nspname, table->relname); @@ -5098,7 +5066,6 @@ getSequenceArray(void *ctx, PGresult *result) if (!parseCurrentSourceSequence(result, rowNumber, seq)) { parsedOk = false; - free(seq); break; } @@ -5108,12 +5075,9 @@ getSequenceArray(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(seq); break; } } - - free(seq); } context->parsedOk = parsedOk; @@ -5268,7 +5232,6 @@ getIndexArray(void *ctx, PGresult *result) if (!parseCurrentSourceIndex(result, rowNumber, index)) { parsedOk = false; - free(index); break; } @@ -5278,7 +5241,6 @@ getIndexArray(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(index); break; } @@ -5289,13 +5251,10 @@ getIndexArray(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(index); break; } } } - - free(index); } context->parsedOk = parsedOk; @@ -5590,12 +5549,9 @@ getDependArray(void *ctx, PGresult *result) { /* errors have already been logged */ parsedOk = false; - free(depend); break; } } - - free(depend); } context->parsedOk = parsedOk; diff --git a/src/bin/pgcopydb/snapshot.c b/src/bin/pgcopydb/snapshot.c index 6dc5ad30f..fee84ba67 100644 --- a/src/bin/pgcopydb/snapshot.c +++ b/src/bin/pgcopydb/snapshot.c @@ -508,7 +508,7 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) /* make sure to use only the first line of the file, without \n */ LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, contents, true)) + if (!splitLines(&lbuf, contents)) { /* errors have already been logged */ return false; @@ -517,7 +517,6 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) if (lbuf.count != 5) { log_error("Failed to parse replication slot file \"%s\"", filename); - free(contents); return false; } @@ -532,7 +531,6 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) filename, (long long) strlen(lbuf.lines[0]), (long long) sizeof(slot->slotName)); - free(contents); return false; } @@ -542,7 +540,6 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) log_error("Failed to parse LSN \"%s\" from file \"%s\"", lbuf.lines[1], filename); - free(contents); return false; } @@ -557,7 +554,6 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) filename, (long long) strlen(lbuf.lines[2]), (long long) sizeof(slot->snapshot)); - free(contents); return false; } @@ -569,7 +565,6 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) log_error("Failed to read plugin \"%s\" from file \"%s\"", lbuf.lines[3], filename); - free(contents); return false; } @@ -585,7 +580,6 @@ snapshot_read_slot(const char *filename, ReplicationSlot *slot) filename); } - free(contents); log_notice("Read replication slot file \"%s\" with snapshot \"%s\", " "slot \"%s\", lsn %X/%X, and plugin \"%s\"", diff --git a/src/bin/pgcopydb/string_utils.c b/src/bin/pgcopydb/string_utils.c index 1821b6bc1..ed661822f 100644 --- a/src/bin/pgcopydb/string_utils.c +++ b/src/bin/pgcopydb/string_utils.c @@ -589,10 +589,9 @@ countLines(char *buffer) * area. */ bool -splitLines(LinesBuffer *lbuf, char *buffer, bool ownsBuffer) +splitLines(LinesBuffer *lbuf, char *buffer) { lbuf->buffer = buffer; - lbuf->ownsBuffer = ownsBuffer; lbuf->count = countLines(lbuf->buffer); if (lbuf->buffer == NULL || lbuf->count == 0) @@ -642,21 +641,6 @@ splitLines(LinesBuffer *lbuf, char *buffer, bool ownsBuffer) } -/* - * FreeLinesBuffer frees the allocated memory for LinesBuffer instance. - */ -void -FreeLinesBuffer(LinesBuffer *lbuf) -{ - free(lbuf->lines); - - if (lbuf->ownsBuffer) - { - free(lbuf->buffer); - } -} - - /* * processBufferCallback is a function callback to use with the subcommands.c * library when we want to output a command's output as it's running, such as @@ -668,7 +652,7 @@ processBufferCallback(const char *buffer, bool error) const char *warningPattern = "^(pg_dump: warning:|pg_restore: warning:)"; LinesBuffer lbuf = { 0 }; - if (!splitLines(&lbuf, (char *) buffer, true)) + if (!splitLines(&lbuf, (char *) buffer)) { /* errors have already been logged */ return; @@ -685,8 +669,6 @@ processBufferCallback(const char *buffer, bool error) log_level(logLevel, "%s", line); } } - - FreeLinesBuffer(&lbuf); } diff --git a/src/bin/pgcopydb/string_utils.h b/src/bin/pgcopydb/string_utils.h index 4b01c3b90..1d6536b23 100644 --- a/src/bin/pgcopydb/string_utils.h +++ b/src/bin/pgcopydb/string_utils.h @@ -46,15 +46,13 @@ bool IntervalToString(uint64_t millisecs, char *buffer, size_t size); typedef struct LinesBuffer { - bool ownsBuffer; char *buffer; uint64_t count; char **lines; /* malloc'ed area */ } LinesBuffer; uint64_t countLines(char *buffer); -bool splitLines(LinesBuffer *lbuf, char *buffer, bool ownsBuffer); -void FreeLinesBuffer(LinesBuffer *lbuf); +bool splitLines(LinesBuffer *lbuf, char *buffer); void processBufferCallback(const char *buffer, bool error); diff --git a/src/bin/pgcopydb/summary.c b/src/bin/pgcopydb/summary.c index e066b9c62..f947e4a3e 100644 --- a/src/bin/pgcopydb/summary.c +++ b/src/bin/pgcopydb/summary.c @@ -2265,7 +2265,6 @@ summary_iter_timing(DatabaseCatalog *catalog, if (!summary_iter_timing_init(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2274,7 +2273,6 @@ summary_iter_timing(DatabaseCatalog *catalog, if (!summary_iter_timing_next(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2285,7 +2283,6 @@ summary_iter_timing(DatabaseCatalog *catalog, if (!summary_iter_timing_finish(iter)) { /* errors have already been logged */ - free(iter); return false; } @@ -2301,7 +2298,6 @@ summary_iter_timing(DatabaseCatalog *catalog, } } - free(iter); return true; } @@ -2363,8 +2359,6 @@ summary_iter_timing_next(TimingIterator *iter) if (rc == SQLITE_DONE) { - free(iter->timing->label); - free(iter->timing); iter->timing = NULL; return true; @@ -2442,8 +2436,6 @@ summary_iter_timing_finish(TimingIterator *iter) /* in case we finish before reaching the DONE step */ if (iter->timing != NULL) { - free(iter->timing->label); - free(iter->timing); iter->timing = NULL; } @@ -2963,7 +2955,6 @@ print_summary_as_json(Summary *summary, const char *filename) } json_free_serialized_string(serialized_string); - json_value_free(js); } diff --git a/src/bin/pgcopydb/table-data.c b/src/bin/pgcopydb/table-data.c index d0119de69..1b6835d31 100644 --- a/src/bin/pgcopydb/table-data.c +++ b/src/bin/pgcopydb/table-data.c @@ -27,7 +27,6 @@ static bool copydb_copy_supervisor_add_table_hook(void *ctx, SourceTable *table); -static void FreeCopyArgs(CopyArgs *args); /* * copydb_table_data fetches the list of tables from the source database and @@ -855,7 +854,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, "see above for details", oid); - free(table); return false; } @@ -871,7 +869,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, if (!copydb_init_table_specs(tableSpecs, specs, table, part)) { /* errors have already been logged */ - FreeCopyTableDataSpec(tableSpecs); return false; } @@ -904,7 +901,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, if (!copydb_table_create_lockfile(specs, tableSpecs, &isDone)) { /* errors have already been logged */ - FreeCopyTableDataSpec(tableSpecs); return false; } @@ -913,7 +909,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, log_info("Skipping table %s (%u), already done on a previous run", tableSpecs->sourceTable->qname, tableSpecs->sourceTable->oid); - FreeCopyTableDataSpec(tableSpecs); return true; } @@ -925,7 +920,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, if (!copydb_copy_table(specs, src, dst, tableSpecs)) { /* errors have already been logged */ - FreeCopyTableDataSpec(tableSpecs); return false; } } @@ -933,7 +927,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, if (!copydb_mark_table_as_done(specs, tableSpecs)) { /* errors have already been logged */ - FreeCopyTableDataSpec(tableSpecs); return false; } @@ -967,7 +960,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, &indexesAreBeingProcessed)) { /* errors have already been logged */ - FreeCopyTableDataSpec(tableSpecs); return false; } else if (allPartsDone && !indexesAreBeingProcessed) @@ -987,7 +979,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, { log_error("Failed to count indexes attached to table %s", tableSpecs->sourceTable->qname); - FreeCopyTableDataSpec(tableSpecs); return false; } @@ -1002,7 +993,6 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, log_error("Failed to queue VACUUM ANALYZE %s [%u]", sourceTable->qname, sourceTable->oid); - FreeCopyTableDataSpec(tableSpecs); return false; } } @@ -1012,14 +1002,11 @@ copydb_copy_data_by_oid(CopyDataSpec *specs, PGSQL *src, PGSQL *dst, log_error("Failed to add the indexes for %s, " "see above for details", tableSpecs->sourceTable->qname); - FreeCopyTableDataSpec(tableSpecs); return false; } } } - FreeCopyTableDataSpec(tableSpecs); - return true; } @@ -1494,28 +1481,3 @@ copydb_prepare_summary_command(CopyTableDataSpec *tableSpecs) return true; } - - -/* - * FreeCopyTableDataSpec takes care of free'ing the allocated memory for the - * CopyTableDataSpec. - */ -void -FreeCopyTableDataSpec(CopyTableDataSpec *tableSpecs) -{ - free(tableSpecs->sourceTable); - free(tableSpecs->summary.command); - FreeCopyArgs(&(tableSpecs->copyArgs)); -} - - -/* - * FreeCopyArgs takes care of free'ing the allocated memory for the CopyArgs. - */ -static void -FreeCopyArgs(CopyArgs *args) -{ - free(args->srcAttrList); - free(args->srcWhereClause); - free(args->dstAttrList); -} diff --git a/tests/Dockerfile.pagila b/tests/Dockerfile.pagila index bc5edea19..6ab78f334 100644 --- a/tests/Dockerfile.pagila +++ b/tests/Dockerfile.pagila @@ -18,6 +18,7 @@ RUN apt-get update \ strace \ gdb \ sqlite3 \ + libgc1 \ libpq5 \ postgresql-client-common \ curl \