From 217d27b1b9d6ba692625d44999fc22ff0f7bf5b3 Mon Sep 17 00:00:00 2001 From: dwelch-spike <53876192+dwelch-spike@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:57:14 -0700 Subject: [PATCH] build: tools-2643 add make shared, builds asbackup as a dynamic lib (#66) * build: add make shared, builds asbackup as a dynamic lib * feat: expose restore_run and backup_run for dynamic lib use * tools-2667 set s3 configs in run functions Co-authored-by: Jesse S --------- Co-authored-by: Jesse S --- Makefile | 37 ++++++- README.md | 4 +- include/backup.h | 4 + include/restore.h | 2 + src/backup.c | 66 +++++++++--- src/backup_config.c | 17 ---- src/restore.c | 236 ++++++++++++++++++++++++++++--------------- src/restore_config.c | 16 --- 8 files changed, 252 insertions(+), 130 deletions(-) diff --git a/Makefile b/Makefile index 4f438fe0..68263f20 100644 --- a/Makefile +++ b/Makefile @@ -48,6 +48,15 @@ ifdef M1_HOME_BREW OPENSSL_PREFIX = /opt/homebrew/opt/openssl endif +ifeq ($(OS),Darwin) + DYNAMIC_SUFFIX = dylib + DYNAMIC_FLAG = -dynamiclib +else + DYNAMIC_SUFFIX = so + DYNAMIC_FLAG = -shared +endif +DYNAMIC_OPTIONS = + CC ?= cc DWARF := $(shell $(CC) -Wall -Wextra -O2 -o /tmp/asflags_$${$$} src/flags.c; \ @@ -279,6 +288,9 @@ BACKUP := $(DIR_BIN)/asbackup RESTORE := $(DIR_BIN)/asrestore TOML := $(DIR_TOML)/libtoml.a +BACKUP_DYNAMIC := $(DIR_BIN)/asbackup.$(DYNAMIC_SUFFIX) +RESTORE_DYNAMIC := $(DIR_BIN)/asrestore.$(DYNAMIC_SUFFIX) + SRCS := $(BACKUP_SRC) $(RESTORE_SRC) OBJS := $(BACKUP_OBJ) $(RESTORE_OBJ) DEPS := $(BACKUP_DEP) $(RESTORE_DEP) @@ -320,11 +332,24 @@ TEST_DEPS := $(sort $(TEST_DEPS)) .PHONY: all all: $(BINS) +# used as a pre-requisite for make shared +# this rule is not meant for manual use by a user +.PHONY: _set_dynamic_options +_set_dynamic_options: $(TOML) + $(eval DYNAMIC_OPTIONS = -fPIC) + +# builds asbackup and asrestore as shared libraries +# asbackup is designed as a standalone exe, use at your own risk +# run this with the same options you would use in a normal build +.PHONY: shared +shared: _set_dynamic_options $(BACKUP_DYNAMIC) $(RESTORE_DYNAMIC) + $(eval DYNAMIC_OPTIONS =) + .PHONY: clean clean: $(MAKE) -C $(DIR_TOML) clean $(MAKE) -C $(DIR_C_CLIENT) clean - rm -f $(DEPS) $(OBJS) $(BINS) $(TEST_OBJS) $(TEST_DEPS) $(TEST_BINS) + rm -f $(DEPS) $(OBJS) $(BINS) $(TEST_OBJS) $(TEST_DEPS) $(TEST_BINS) $(BACKUP_DYNAMIC) $(RESTORE_DYNAMIC) if [ -d $(DIR_OBJ) ]; then rmdir $(DIR_OBJ); fi if [ -d $(DIR_BIN) ]; then rmdir $(DIR_BIN); fi if [ -d $(DIR_TEST_OBJ) ]; then rm -r $(DIR_TEST_OBJ); fi @@ -367,10 +392,10 @@ $(DIR_BIN): mkdir $(DIR_BIN) $(DIR_OBJ)/%_c.o: $(DIR_SRC)/%.c | $(DIR_OBJ) - $(CC) $(CFLAGS) -MMD -o $@ -c $(INCLUDES) $< + $(CC) $(DYNAMIC_OPTIONS) $(CFLAGS) -MMD -o $@ -c $(INCLUDES) $< $(DIR_OBJ)/%_cc.o: $(DIR_SRC)/%.cc | $(DIR_OBJ) - $(CXX) $(CXXFLAGS) -MMD -o $@ -c $(INCLUDES) $< + $(CXX) $(DYNAMIC_OPTIONS) $(CXXFLAGS) -MMD -o $@ -c $(INCLUDES) $< $(BACKUP): $(BACKUP_OBJ) $(TOML) $(C_CLIENT_LIB) | $(DIR_BIN) $(CXX) $(LDFLAGS) -o $(BACKUP) $(BACKUP_OBJ) $(LIBRARIES) @@ -378,6 +403,12 @@ $(BACKUP): $(BACKUP_OBJ) $(TOML) $(C_CLIENT_LIB) | $(DIR_BIN) $(RESTORE): $(RESTORE_OBJ) $(TOML) $(C_CLIENT_LIB) | $(DIR_BIN) $(CXX) $(LDFLAGS) -o $(RESTORE) $(RESTORE_OBJ) $(LIBRARIES) +$(BACKUP_DYNAMIC): $(BACKUP_OBJ) $(TOML) $(C_CLIENT_LIB) | $(DIR_BIN) + $(CXX) $(DYNAMIC_FLAG) $(LDFLAGS) -o $(BACKUP_DYNAMIC) $(BACKUP_OBJ) $(LIBRARIES) + +$(RESTORE_DYNAMIC): $(RESTORE_OBJ) $(TOML) $(C_CLIENT_LIB) | $(DIR_BIN) + $(CXX) $(DYNAMIC_FLAG) $(LDFLAGS) -o $(RESTORE_DYNAMIC) $(RESTORE_OBJ) $(LIBRARIES) + $(TOML): $(MAKE) -C $(DIR_TOML) diff --git a/README.md b/README.md index 991cdf3b..33a2fe74 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ apt-get update apt-get install build-essential libssl-dev libuv1-dev libcurl4-openssl-dev libzstd-dev # for aws-sdk-cpp build -apt-get install cmake +apt-get install cmake pkg-config zlib1g-dev # download aws sdk git clone https://github.com/aws/aws-sdk-cpp.git @@ -96,7 +96,7 @@ mkdir build cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_ONLY="s3" -DBUILD_SHARED_LIBS=ON -DENABLE_TESTING=OFF -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_INSTALL_LIBDIR=lib make -C build -# install aws static sdk +# install aws dynamic sdk cd build make install cd ../.. diff --git a/include/backup.h b/include/backup.h index 54bc3280..f1ad520b 100644 --- a/include/backup.h +++ b/include/backup.h @@ -71,6 +71,9 @@ extern "C" { // Estimate total backup file sizes with 99.9% confidence. #define BACKUP_FILE_ESTIMATE_CONFIDENCE_LEVEL 0.999 +#define RUN_BACKUP_SUCCESS ((void*) 0) +#define RUN_BACKUP_FAILURE ((void*) -1lu) + /* * The struct used to maintain state information about a backup file which was * not completely filled from a backup task @@ -87,6 +90,7 @@ typedef struct queued_backup_fd { // int32_t backup_main(int32_t argc, char **argv); +backup_status_t* backup_run(backup_config_t* conf); /* * Returns the backup config/status struct being used by the currently running diff --git a/include/restore.h b/include/restore.h index 8c1b7439..20c2b53a 100644 --- a/include/restore.h +++ b/include/restore.h @@ -66,6 +66,7 @@ extern "C" { // The interval for logging per-thread timing stats. #define STAT_INTERVAL 10 +#define RUN_RESTORE_FAILURE ((void*) -1lu) /* * The backup file information pushed to the job queue and picked up by the restore threads. @@ -142,6 +143,7 @@ typedef enum { // int32_t restore_main(int32_t argc, char **argv); +restore_status_t* restore_run(restore_config_t *conf); #ifdef __cplusplus } diff --git a/src/backup.c b/src/backup.c index 901d6af7..ed69ac5c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -50,15 +50,11 @@ typedef struct backup_globals { // of the vector). static as_vector g_globals; -#define RUN_BACKUP_SUCCESS ((void*) 0) -#define RUN_BACKUP_FAILURE ((void*) -1lu) - - //========================================================== // Forward Declarations. // -static backup_status_t* run_backup(backup_config_t* conf); +static backup_status_t* start_backup(backup_config_t* conf); typedef struct distr_stats { uint64_t total; @@ -195,6 +191,7 @@ static void show_estimate(FILE* mach_fd, uint64_t* samples, uint32_t n_samples, uint64_t rec_count_estimate, io_write_proxy_t* fd); static void sig_hand(int32_t sig); static void no_op(int32_t sig); +static void set_s3_configs(const backup_config_t*); //========================================================== @@ -220,7 +217,7 @@ backup_main(int32_t argc, char **argv) goto cleanup; } - backup_status_t* status = run_backup(&conf); + backup_status_t* status = start_backup(&conf); if (status == RUN_BACKUP_SUCCESS) { res = EXIT_SUCCESS; } @@ -234,14 +231,37 @@ backup_main(int32_t argc, char **argv) cleanup: file_proxy_cloud_shutdown(); - as_vector_destroy(&g_globals); - ver("Exiting with status code %d", res); - return res; } +/* + * FOR USE WITH ASBACKUP AS A LIBRARY (Use at your own risk) + * + * Runs a backup job with the given configuration. This method is not thread + * safe and should not be called multiple times in parallel, as it uses global + * variables to handle signal interruption. + * + * The passed in backup config must be destroyed by the caller using backup_config_destroy() + * To enable C client logging, call enable_client_log() before calling this function + * + * Returns the backup_status struct used during the run which must be freed by the + * caller using backup_status_destroy(), then free(). + * Only free the return value if it is != RUN_BACKUP_FAILURE || != RUN_BACKUP_SUCCESS + */ +backup_status_t* +backup_run(backup_config_t* conf) { + as_vector_init(&g_globals, sizeof(backup_globals_t), 1); + + backup_status_t* status = start_backup(conf); + + file_proxy_cloud_shutdown(); + as_vector_destroy(&g_globals); + + return status; +} + backup_config_t* get_g_backup_conf(void) { @@ -272,7 +292,7 @@ get_g_backup_status(void) * caller). */ static backup_status_t* -run_backup(backup_config_t* conf) +start_backup(backup_config_t* conf) { int32_t res = EXIT_FAILURE; bool do_backup_save_state = false; @@ -281,6 +301,8 @@ run_backup(backup_config_t* conf) push_backup_globals(conf, NULL); + set_s3_configs(conf); + if (conf->remove_artifacts) { if (conf->output_file != NULL) { @@ -491,14 +513,14 @@ run_backup(backup_config_t* conf) bool cur_silent_val = g_silent; g_silent = true; - backup_status_t* estimate_status = run_backup(estimate_conf); + backup_status_t* estimate_status = start_backup(estimate_conf); g_silent = cur_silent_val; backup_config_destroy(estimate_conf); cf_free(estimate_conf); // re-enable signal handling, since it was disabled at the end of - // the estimate run in run_backup. + // the estimate run in start_backup. set_sigaction(sig_hand); if (estimate_status == RUN_BACKUP_FAILURE) { @@ -2558,3 +2580,23 @@ no_op(int32_t sig) (void) sig; } +static void +set_s3_configs(const backup_config_t* conf) +{ + if (conf->s3_region != NULL) { + s3_set_region(conf->s3_region); + } + + if (conf->s3_profile != NULL) { + s3_set_profile(conf->s3_profile); + } + + if (conf->s3_endpoint_override != NULL) { + s3_set_endpoint(conf->s3_endpoint_override); + } + + s3_set_max_async_downloads(conf->s3_max_async_downloads); + s3_set_max_async_uploads(conf->s3_max_async_uploads); + s3_set_connect_timeout_ms(conf->s3_connect_timeout); + s3_set_log_level(conf->s3_log_level); +} \ No newline at end of file diff --git a/src/backup_config.c b/src/backup_config.c index 0c9fa74d..5632a296 100644 --- a/src/backup_config.c +++ b/src/backup_config.c @@ -744,23 +744,6 @@ backup_config_init(int argc, char* argv[], backup_config_t* conf) return BACKUP_CONFIG_INIT_FAILURE; } - if (conf->s3_region != NULL) { - s3_set_region(conf->s3_region); - } - - if (conf->s3_profile != NULL) { - s3_set_profile(conf->s3_profile); - } - - if (conf->s3_endpoint_override != NULL) { - s3_set_endpoint(conf->s3_endpoint_override); - } - - s3_set_max_async_downloads(conf->s3_max_async_downloads); - s3_set_max_async_uploads(conf->s3_max_async_uploads); - s3_set_connect_timeout_ms(conf->s3_connect_timeout); - s3_set_log_level(conf->s3_log_level); - if (conf->estimate) { if (conf->filter_exp != NULL || conf->node_list != NULL || conf->mod_after > 0 || conf->mod_before > 0 || conf->ttl_zero || diff --git a/src/restore.c b/src/restore.c index facb1024..010def49 100644 --- a/src/restore.c +++ b/src/restore.c @@ -38,11 +38,11 @@ static restore_config_t* g_conf; static restore_status_t* g_status; - //========================================================== // Forward Declarations. // +static restore_status_t* start_restore(restore_config_t *conf); static bool has_stopped(void); static void stop(void); static int update_file_pos(per_thread_context_t* ptc); @@ -67,6 +67,7 @@ static bool wait_udf(aerospike *as, udf_param *udf, uint32_t timeout); static void sig_hand(int32_t sig); //static void print_stat(per_thread_context_t *ptc, cf_clock *prev_log, // uint64_t *prev_records, cf_clock *now, cf_clock *store_time, cf_clock *read_time); +static void set_s3_configs(const restore_config_t* conf); //========================================================== @@ -81,19 +82,81 @@ restore_main(int32_t argc, char **argv) enable_client_log(); restore_config_t conf; - g_conf = &conf; int restore_config_res = restore_config_init(argc, argv, &conf); if (restore_config_res != 0) { if (restore_config_res == RESTORE_CONFIG_INIT_EXIT) { res = EXIT_SUCCESS; } + goto cleanup; + } + + restore_status_t *status = start_restore(&conf); + if (status != RUN_RESTORE_FAILURE) { + restore_status_destroy(status); + cf_free(status); + res = EXIT_SUCCESS; + } + + restore_config_destroy(&conf); + +cleanup: + file_proxy_cloud_shutdown(); + ver("Exiting with status code %d", res); + return res; +} + +/* + * FOR USE WITH ASRESTORE AS A LIBRARY (Use at your own risk) + * + * Runs a restore job with the given configuration. This method is not thread + * safe and should not be called multiple times in parallel, as it uses global + * variables to handle signal interruption. + * + * The passed in restore_config must be freed by the caller using restore_config_destroy(). + * To enable C client logging, call enable_client_log() before calling this function + * + * Returns the restore_status struct used during the run which must be freed by the + * caller. Only free the return value if it is != RUN_RESTORE_FAILURE + */ +restore_status_t* +restore_run(restore_config_t *conf) { + restore_status_t *status = start_restore(conf); + file_proxy_cloud_shutdown(); + + return status; +} + +//========================================================== +// Local helpers. +// + +/* + * Runs a restore job with the given configuration. This method is not thread + * safe and should not be called multiple times in parallel, as it uses global + * variables to handle signal interruption. + * + * Returns the restore_status struct used during the run which must be freed by the + * caller. + * Only free the return value if it is != RUN_RESTORE_FAILURE + */ +static restore_status_t* +start_restore(restore_config_t *conf) +{ + int32_t res = EXIT_FAILURE; + g_conf = conf; + + restore_status_t *status = (restore_status_t*) malloc(sizeof(restore_status_t)); + if (status == NULL) { + err("Failed to allocate %zu bytes for restore status struct", + sizeof(restore_status_t)); goto cleanup1; } - restore_status_t status; - g_status = &status; - if (!restore_status_init(&status, &conf)) { + set_s3_configs(conf); + + g_status = status; + if (!restore_status_init(status, conf)) { err("Failed to initialize restore status"); goto cleanup1; } @@ -101,41 +164,41 @@ restore_main(int32_t argc, char **argv) signal(SIGINT, sig_hand); signal(SIGTERM, sig_hand); - if (conf.validate) { + if (conf->validate) { inf("Starting validation of %s", - conf.input_file != NULL ? - file_proxy_is_std_path(conf.input_file) ? "[stdin]" : conf.input_file : - conf.directory); + conf->input_file != NULL ? + file_proxy_is_std_path(conf->input_file) ? "[stdin]" : conf->input_file : + conf->directory); } else { - inf("Starting restore to %s (bins: %s, sets: %s) from %s", conf.host, - conf.bin_list == NULL ? "[all]" : conf.bin_list, - conf.set_list == NULL ? "[all]" : conf.set_list, - conf.input_file != NULL ? - file_proxy_is_std_path(conf.input_file) ? "[stdin]" : conf.input_file : - conf.directory); + inf("Starting restore to %s (bins: %s, sets: %s) from %s", conf->host, + conf->bin_list == NULL ? "[all]" : conf->bin_list, + conf->set_list == NULL ? "[all]" : conf->set_list, + conf->input_file != NULL ? + file_proxy_is_std_path(conf->input_file) ? "[stdin]" : conf->input_file : + conf->directory); } FILE *mach_fd = NULL; - if (conf.machine != NULL && (mach_fd = fopen(conf.machine, "a")) == NULL) { - err_code("Error while opening machine-readable file %s", conf.machine); + if (conf->machine != NULL && (mach_fd = fopen(conf->machine, "a")) == NULL) { + err_code("Error while opening machine-readable file %s", conf->machine); goto cleanup2; } char (*node_names)[][AS_NODE_NAME_SIZE] = NULL; uint32_t n_node_names = 0; - if (!conf.validate) { - get_node_names(status.as->cluster, NULL, 0, &node_names, &n_node_names); + if (!conf->validate) { + get_node_names(status->as->cluster, NULL, 0, &node_names, &n_node_names); inf("Processing %u node(s)", n_node_names); } pthread_t counter_thread; counter_thread_args counter_args; - counter_args.conf = &conf; - counter_args.status = &status; + counter_args.conf = conf; + counter_args.status = status; counter_args.node_names = node_names; counter_args.n_node_names = n_node_names; counter_args.mach_fd = mach_fd; @@ -149,8 +212,8 @@ restore_main(int32_t argc, char **argv) pthread_t restore_threads[MAX_THREADS]; restore_thread_args_t restore_args; - restore_args.conf = &conf; - restore_args.status = &status; + restore_args.conf = conf; + restore_args.status = status; restore_args.path = NULL; restore_args.shared_fd = NULL; restore_args.line_no = NULL; @@ -169,44 +232,44 @@ restore_main(int32_t argc, char **argv) off_t total_file_size = 0; // restoring from multiple directories - if (conf.directory_list != NULL) { + if (conf->directory_list != NULL) { - char *dir_clone = safe_strdup(conf.directory_list); + char *dir_clone = safe_strdup(conf->directory_list); split_string(dir_clone, ',', false, &directories); for (uint32_t i = 0; i < directories.size; i++) { char *dir = as_vector_get_ptr(&directories, i); - if (conf.parent_directory) { + if (conf->parent_directory) { - size_t parent_dir_size = strlen(conf.parent_directory); + size_t parent_dir_size = strlen(conf->parent_directory); size_t path_size = parent_dir_size + strlen(dir) + 1; char *fmt = "%s%s"; - if (conf.parent_directory[parent_dir_size - 1] != '/') { + if (conf->parent_directory[parent_dir_size - 1] != '/') { ++path_size; fmt = "%s/%s"; } char *tmp_dir = dir; dir = cf_malloc(path_size); - snprintf(dir, path_size, fmt, conf.parent_directory, tmp_dir); + snprintf(dir, path_size, fmt, conf->parent_directory, tmp_dir); } - total_file_size = get_backup_files(dir, &status.file_vec); + total_file_size = get_backup_files(dir, &status->file_vec); if (total_file_size < 0) { err("Error while getting backup files from directory_list entry: %s", dir); cf_free(dir_clone); - if (conf.parent_directory) { + if (conf->parent_directory) { cf_free(dir); } goto cleanup5; } - if (conf.parent_directory) { + if (conf->parent_directory) { cf_free(dir); } } @@ -215,9 +278,9 @@ restore_main(int32_t argc, char **argv) } // restoring from a directory - if (conf.directory != NULL) { + if (conf->directory != NULL) { - total_file_size = get_backup_files(conf.directory, &status.file_vec); + total_file_size = get_backup_files(conf->directory, &status->file_vec); if (total_file_size < 0) { err("Error while getting backup files from directory"); goto cleanup5; @@ -225,28 +288,28 @@ restore_main(int32_t argc, char **argv) } // directory and directory_list are mutually exclusive but share this logic - if (conf.directory != NULL || conf.directory_list != NULL) { + if (conf->directory != NULL || conf->directory_list != NULL) { - if (status.file_vec.size == 0) { + if (status->file_vec.size == 0) { err("No backup files found"); goto cleanup5; } - if (!conf.no_records) { - ver("Triaging %u backup file(s)", status.file_vec.size); - status.estimated_bytes = total_file_size; - ver("Estimated total backup file size: %lli bytes", status.estimated_bytes); + if (!conf->no_records) { + ver("Triaging %u backup file(s)", status->file_vec.size); + status->estimated_bytes = total_file_size; + ver("Estimated total backup file size: %lli bytes", status->estimated_bytes); } - if (conf.validate) { + if (conf->validate) { inf("Validating backup files"); } - ver("Pushing %u exclusive job(s) to job queue", status.file_vec.size); + ver("Pushing %u exclusive job(s) to job queue", status->file_vec.size); // push a job for each backup file - for (uint32_t i = 0; i < status.file_vec.size; ++i) { - restore_args.path = as_vector_get_ptr(&status.file_vec, i); + for (uint32_t i = 0; i < status->file_vec.size; ++i) { + restore_args.path = as_vector_get_ptr(&status->file_vec, i); if (cf_queue_push(job_queue, &restore_args) != CF_QUEUE_OK) { err("Error while queueing restore job"); @@ -254,37 +317,37 @@ restore_main(int32_t argc, char **argv) } } - if (status.file_vec.size < conf.parallel) { - conf.parallel = status.file_vec.size; + if (status->file_vec.size < conf->parallel) { + conf->parallel = status->file_vec.size; } } // restoring from a single backup file else { inf( "%s %s", - conf.validate ? "Validating" : "Restoring", - conf.input_file + conf->validate ? "Validating" : "Restoring", + conf->input_file ); restore_args.shared_fd = (io_read_proxy_t*) cf_malloc(sizeof(io_read_proxy_t)); // open the file, file descriptor goes to restore_args.shared_fd - if (!open_file(conf.input_file, &status.ns_vec, restore_args.shared_fd, + if (!open_file(conf->input_file, &status->ns_vec, restore_args.shared_fd, &restore_args.legacy, &line_no, NULL, - conf.no_records ? NULL : &status.estimated_bytes, - conf.compress_mode, conf.encrypt_mode, conf.pkey)) { + conf->no_records ? NULL : &status->estimated_bytes, + conf->compress_mode, conf->encrypt_mode, conf->pkey)) { err("Error while opening shared backup file"); cf_free(restore_args.shared_fd); goto cleanup5; } - ver("Pushing %u shared job(s) to job queue", conf.parallel); + ver("Pushing %u shared job(s) to job queue", conf->parallel); restore_args.line_no = &line_no; - restore_args.path = conf.input_file; + restore_args.path = conf->input_file; // push an identical job for each thread; all threads use restore_args.shared_fd for reading - for (uint32_t i = 0; i < conf.parallel; ++i) { + for (uint32_t i = 0; i < conf->parallel; ++i) { if (cf_queue_push(job_queue, &restore_args) != CF_QUEUE_OK) { err("Error while queueing restore job"); goto cleanup6; @@ -292,15 +355,15 @@ restore_main(int32_t argc, char **argv) } } - if (!conf.no_records && !conf.validate) { + if (!conf->no_records && !conf->validate) { inf("Restoring records"); } uint32_t threads_ok = 0; - ver("Creating %u restore thread(s)", conf.parallel); + ver("Creating %u restore thread(s)", conf->parallel); - for (uint32_t i = 0; i < conf.parallel; ++i) { + for (uint32_t i = 0; i < conf->parallel; ++i) { if (pthread_create(&restore_threads[i], NULL, restore_thread_func, job_queue) != 0) { err_code("Error while creating restore thread"); goto cleanup7; @@ -313,7 +376,7 @@ restore_main(int32_t argc, char **argv) inf( "Finished %s backup file(s)", - conf.validate ? "validating" : "restoring" + conf->validate ? "validating" : "restoring" ); cleanup7: @@ -333,22 +396,22 @@ restore_main(int32_t argc, char **argv) } } - if (!conf.validate && !batch_uploader_await(&status.batch_uploader)) { + if (!conf->validate && !batch_uploader_await(&status->batch_uploader)) { res = EXIT_FAILURE; } // NOTE this is here to support the --indexes-last option - if (res == EXIT_SUCCESS && !conf.no_indexes && !conf.validate && - !restore_indexes(status.as, &status.index_vec, &status.set_vec, - &restore_args, conf.wait, conf.timeout)) { + if (res == EXIT_SUCCESS && !conf->no_indexes && !conf->validate && + !restore_indexes(status->as, &status->index_vec, &status->set_vec, + &restore_args, conf->wait, conf->timeout)) { err("Error while restoring secondary indexes to cluster"); res = EXIT_FAILURE; } - if (res == EXIT_SUCCESS && conf.wait) { - for (uint32_t i = 0; i < status.udf_vec.size; i++) { - udf_param* udf = as_vector_get(&status.udf_vec, i); - if (!wait_udf(status.as, udf, conf.timeout)) { + if (res == EXIT_SUCCESS && conf->wait) { + for (uint32_t i = 0; i < status->udf_vec.size; i++) { + udf_param* udf = as_vector_get(&status->udf_vec, i); + if (!wait_udf(status->as, udf, conf->timeout)) { err("Error while waiting for UDF upload"); res = EXIT_FAILURE; } @@ -356,7 +419,7 @@ restore_main(int32_t argc, char **argv) } cleanup6: - if (conf.directory == NULL && conf.directory_list == NULL) { + if (conf->directory == NULL && conf->directory_list == NULL) { if (!close_file(restore_args.shared_fd)) { err("Error while closing shared backup file"); res = EXIT_FAILURE; @@ -371,7 +434,7 @@ restore_main(int32_t argc, char **argv) cleanup4: ver("Waiting for counter thread"); - restore_status_finish(&status); + restore_status_finish(status); if (pthread_join(counter_thread, NULL) != 0) { err_code("Error while joining counter thread"); @@ -388,23 +451,17 @@ restore_main(int32_t argc, char **argv) fclose(mach_fd); } - restore_status_destroy(&status); + if (res == EXIT_FAILURE) { + restore_status_destroy(status); + cf_free(status); + status = RUN_RESTORE_FAILURE; + } cleanup1: - restore_config_destroy(&conf); - file_proxy_cloud_shutdown(); - - ver("Exiting with status code %d", res); - - return res; + return res == EXIT_FAILURE ? RUN_RESTORE_FAILURE : status; } - -//========================================================== -// Local helpers. -// - /* * Checks if the program has been stopped. */ @@ -1578,3 +1635,22 @@ sig_hand(int32_t sig) stop(); } +static void +set_s3_configs(const restore_config_t* conf) +{ + if (conf->s3_region != NULL) { + s3_set_region(conf->s3_region); + } + + if (conf->s3_profile != NULL) { + s3_set_profile(conf->s3_profile); + } + + if (conf->s3_endpoint_override != NULL) { + s3_set_endpoint(conf->s3_endpoint_override); + } + + s3_set_max_async_downloads(conf->s3_max_async_downloads); + s3_set_connect_timeout_ms(conf->s3_connect_timeout); + s3_set_log_level(conf->s3_log_level); +} diff --git a/src/restore_config.c b/src/restore_config.c index 4b1a6e76..132f0ea5 100644 --- a/src/restore_config.c +++ b/src/restore_config.c @@ -643,22 +643,6 @@ restore_config_init(int argc, char* argv[], restore_config_t* conf) return RESTORE_CONFIG_INIT_FAILURE; } - if (conf->s3_region != NULL) { - s3_set_region(conf->s3_region); - } - - if (conf->s3_profile != NULL) { - s3_set_profile(conf->s3_profile); - } - - if (conf->s3_endpoint_override != NULL) { - s3_set_endpoint(conf->s3_endpoint_override); - } - - s3_set_max_async_downloads(conf->s3_max_async_downloads); - s3_set_connect_timeout_ms(conf->s3_connect_timeout); - s3_set_log_level(conf->s3_log_level); - if (conf->nice_list != NULL) { as_vector nice_vec; as_vector_inita(&nice_vec, sizeof(void*), 2);