From 45734dc65cf2c3393c237179f46d2f3f31bc9226 Mon Sep 17 00:00:00 2001 From: Kipp Cannon <kipp.cannon@ligo.org> Date: Fri, 15 Feb 2019 02:16:26 +0900 Subject: [PATCH] gstlal-inspiral; remove --ilwdchar-compat from DAG jobs --- gstlal-inspiral/bin/gstlal_inspiral_pipe | 29 +++++++++---------- .../bin/gstlal_ll_inspiral_calculate_range | 24 +++++++-------- .../bin/gstlal_ll_inspiral_daily_page | 16 +++++----- 3 files changed, 34 insertions(+), 35 deletions(-) diff --git a/gstlal-inspiral/bin/gstlal_inspiral_pipe b/gstlal-inspiral/bin/gstlal_inspiral_pipe index a1ae4236a9..df08c3df24 100755 --- a/gstlal-inspiral/bin/gstlal_inspiral_pipe +++ b/gstlal-inspiral/bin/gstlal_inspiral_pipe @@ -611,7 +611,7 @@ def rank_and_merge(dag, createPriorDistStatsJob, calcRankPDFsJob, calcRankPDFsWi db = dagparts.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in inputs], '.sqlite') db = os.path.join(subdir_path([toSqliteJob.output_path, CacheEntry.from_T050017(db).description[:4]]), db) sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = merge_nodes, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":inputs}, output_files = {"database":db}, input_cache_file_name = os.path.basename(db).replace('.sqlite','.cache') @@ -638,7 +638,7 @@ def rank_and_merge(dag, createPriorDistStatsJob, calcRankPDFsJob, calcRankPDFsWi injdb = dagparts.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in inputs], '.sqlite') injdb = os.path.join(subdir_path([toSqliteJob.output_path, CacheEntry.from_T050017(injdb).description[:4]]), injdb) sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = merge_nodes, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":inputs}, output_files = {"database":injdb}, input_cache_file_name = os.path.basename(injdb).replace('.sqlite','.cache') @@ -676,7 +676,7 @@ def merge_in_bin(dag, toSqliteJob, lalappsRunSqliteJob, options): noninjdb = dagparts.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in dbs], '.sqlite', path = toSqliteJob.output_path) # merge all of the dbs from the same subbank sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = [], - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":dbs}, output_files = {"database":noninjdb}, input_cache_file_name = os.path.basename(noninjdb).replace('.sqlite','.cache') @@ -697,7 +697,7 @@ def merge_in_bin(dag, toSqliteJob, lalappsRunSqliteJob, options): injdb = dagparts.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in dbs], '.sqlite', path = toSqliteJob.output_path) # merge all of the dbs from the same subbank sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = [], - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":dbs}, output_files = {"database":injdb}, input_cache_file_name = os.path.basename(injdb).replace('.sqlite','.cache') @@ -723,7 +723,7 @@ def merge_in_bin(dag, toSqliteJob, lalappsRunSqliteJob, options): # merge all of the dbs from the same subbank sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = [], - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":[ce.path for ce in ce_list]}, output_files = {"database":noninjdb}, input_cache_file_name = os.path.basename(noninjdb).replace('.sqlite','.cache') @@ -743,7 +743,7 @@ def merge_in_bin(dag, toSqliteJob, lalappsRunSqliteJob, options): # merge all of the dbs from the same subbank sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = [], - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":[ce.path for ce in ce_list]}, output_files = {"database":injdb}, input_cache_file_name = os.path.basename(injdb).replace('.sqlite','.cache') @@ -785,7 +785,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq # Merge the final non injection database into chunks noninjdb = dagparts.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in dbs], '.sqlite', path = toSqliteJob.output_path) sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = parents, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache": dbs}, output_files = {"database":noninjdb}, input_cache_file_name = os.path.basename(noninjdb).replace('.sqlite','.cache') @@ -811,7 +811,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq for chunk, nodes in enumerate(chunks(innodes[None], 10)): noninjdb = dagparts.T050017_filename(instruments, 'PART_LLOID_CHUNK_%04d' % chunk, boundary_seg, '.sqlite') sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = nodes, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache": [node.input_files[""] for node in nodes]}, output_files = {"database":noninjdb}, input_cache_file_name = os.path.basename(noninjdb).replace('.sqlite','.cache') @@ -826,7 +826,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq noninjdb = dagparts.T050017_filename(instruments, 'ALL_LLOID', boundary_seg, '.sqlite') sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = final_nodes, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_files = {"": (vetoes + [options.frame_segments_file])}, input_cache_files = {"input-cache": [node.input_files[""] for node in final_nodes]}, output_files = {"database":noninjdb}, @@ -870,7 +870,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq # merge sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = parents, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache":dbs}, output_files = {"database":injdb}, input_cache_file_name = os.path.basename(injdb).replace('.sqlite','.cache') @@ -893,7 +893,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq # merge sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = injnodes, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_cache_files = {"input-cache": [node.input_files[""] for node in injnodes]}, output_files = {"database":injdb}, input_cache_file_name = injdb.replace('.sqlite','.cache') @@ -923,7 +923,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq # merge sqlitenode = dagparts.DAGNode(toSqliteJob, dag, parent_nodes = final_nodes + ligolw_add_nodes, - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, input_files = {"": (vetoes + [options.frame_segments_file, injections])}, input_cache_files = {"input-cache": [node.input_files[""] for node in final_nodes]}, output_files = {"database":injdb}, @@ -938,7 +938,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq clusternode = dagparts.DAGNode(toXMLJob, dag, parent_nodes = [clusternode], - opts = {"tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"tmp-space":dagparts.condor_scratch_space()}, output_files = {"extract":injxml}, input_files = {"database":injdb} ) @@ -949,7 +949,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq ) sqlitenode = dagparts.DAGNode(toSqliteNoCacheJob, dag, parent_nodes = [inspinjnode], - opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space(), "ilwdchar-compat":""}, + opts = {"replace":"", "tmp-space":dagparts.condor_scratch_space()}, output_files = {"database":injdb}, input_files = {"":xml_input} ) @@ -1434,7 +1434,6 @@ if not options.lloid_cache and not options.disable_calc_inj_snr: addnode = dagparts.DAGNode(ligolwAddJob, dag, parent_nodes=inj_snr_nodes, input_files = {"": ' '.join(["%s/%s_INJ_SPLIT_%04d.xml" % (injSplitterJob.output_path, sim_tag_from_inj_file(inj.split(":")[-1]), i) for i in xrange(num_split_inj_snr_jobs)])}, - opts = {"ilwdchar-compat":""}, output_files = {"output": inj.split(":")[-1]} ) diff --git a/gstlal-inspiral/bin/gstlal_ll_inspiral_calculate_range b/gstlal-inspiral/bin/gstlal_ll_inspiral_calculate_range index 6448b229db..837d45002b 100755 --- a/gstlal-inspiral/bin/gstlal_ll_inspiral_calculate_range +++ b/gstlal-inspiral/bin/gstlal_ll_inspiral_calculate_range @@ -92,8 +92,8 @@ else START_FILE=${CUMULATIVE_SEG_FILE} fi -SEGMENT_DEF_ID=$(ligolw_print --ilwdchar-compat --table segment_definer --column name --column segment_def_id ${START_FILE} | grep triggersegments | sed -e 's/,/\n/g' | grep segment_definer | head -n 1) -START=$(ligolw_print --ilwdchar-compat --table segment --column start_time --column segment_def_id ${START_FILE} | grep ${SEGMENT_DEF_ID} | sed -e 's/,/\n/g' | sed -e '/segment_definer/d' | sort -g | head -n 1) +SEGMENT_DEF_ID=$(ligolw_print --table segment_definer --column name --column segment_def_id ${START_FILE} | grep triggersegments | sed -e 's/,/\n/g' | grep segment_definer | head -n 1) +START=$(ligolw_print --table segment --column start_time --column segment_def_id ${START_FILE} | grep ${SEGMENT_DEF_ID} | sed -e 's/,/\n/g' | sed -e '/segment_definer/d' | sort -g | head -n 1) echo analysis start time read as ${START} while true; do @@ -154,11 +154,11 @@ while true; do gstlal_segments_operations --union --segment-file1 ${TMP_CUMULATIVE_SEG_XML}.gz --segment-file2 ${TMP_TOTAL_SEG_XML_GZ} --output-file ${TRIGGER_SEG_XML} --segment-name1 triggersegments --segment-name2 triggersegments --output-segment-name triggersegments --verbose SEG_FILES="" for SEG_FILE in ${STATEVECTOR_SEG_XML} ${WHITEHT_SEG_XML} ${TRIGGER_SEG_XML}; do - if [ $(ligolw_print --ilwdchar-compat --table segment ${SEG_FILE} | wc -l) -gt 0 ]; then + if [ $(ligolw_print --table segment ${SEG_FILE} | wc -l) -gt 0 ]; then SEG_FILES="${SEG_FILE} ${SEG_FILES}" fi done - ligolw_add --ilwdchar-compat ${SEG_FILES} --verbose --output ${TMP_TOTAL_SEG_XML_GZ} + ligolw_add ${SEG_FILES} --verbose --output ${TMP_TOTAL_SEG_XML_GZ} rm ${STATEVECTOR_SEG_XML} ${WHITEHT_SEG_XML} ${TRIGGER_SEG_XML} ${TMP_CUMULATIVE_SEG_XML}.gz fi @@ -187,7 +187,7 @@ while true; do else cp ${LIGOLW_XML_GZ} ${TMP_LIGOLW_XML_GZ} - if [ $(ligolw_print --ilwdchar-compat --table coinc_inspiral --column end_time ${TMP_LIGOLW_XML_GZ} | wc -l) -eq 0 ]; then + if [ $(ligolw_print --table coinc_inspiral --column end_time ${TMP_LIGOLW_XML_GZ} | wc -l) -eq 0 ]; then QUERYSTART=${START} else # QUERYSTART is chosen so that the simdb query will @@ -195,7 +195,7 @@ while true; do # downloaded. This way we avoid any potential issues # with the asynchronous nature of the simdb queries and # the segment updates - QUERYSTART=$(ligolw_print --ilwdchar-compat --table coinc_inspiral --column end_time ${TMP_LIGOLW_XML_GZ} | sort -g | tail -n 1) + QUERYSTART=$(ligolw_print --table coinc_inspiral --column end_time ${TMP_LIGOLW_XML_GZ} | sort -g | tail -n 1) fi fi @@ -208,8 +208,8 @@ while true; do echo querying simdb from ${QUERYSTART} to ${QUERYSTOP}... gracedb --ligolw --service-url=${SIMDB_SERVICE_URL} search ${QUERYSTART}..${QUERYSTOP} ${PIPELINE} ${GROUP} ${SEARCH} > ${TMP_QUERY_XML} - if [ $(ligolw_print --ilwdchar-compat --table coinc_inspiral --column end_time ${TMP_QUERY_XML} | wc -l) -gt 0 ]; then - ligolw_add --ilwdchar-compat ${TMP_LIGOLW_XML_GZ} ${TMP_QUERY_XML} --verbose --output ${TMP_LIGOLW_XML_GZ} + if [ $(ligolw_print --table coinc_inspiral --column end_time ${TMP_QUERY_XML} | wc -l) -gt 0 ]; then + ligolw_add ${TMP_LIGOLW_XML_GZ} ${TMP_QUERY_XML} --verbose --output ${TMP_LIGOLW_XML_GZ} fi rm ${TMP_QUERY_XML} @@ -219,16 +219,16 @@ while true; do # the events+injections file and then generate the range plot # - if [ $(ligolw_print --ilwdchar-compat --table coinc_inspiral --column end_time ${TMP_LIGOLW_XML_GZ} | wc -l) -gt 0 ]; then + if [ $(ligolw_print --table coinc_inspiral --column end_time ${TMP_LIGOLW_XML_GZ} | wc -l) -gt 0 ]; then # # Delete old segments from events+injections file, then add new # segments, again using atomic operations # - if [ $(ligolw_print --ilwdchar-compat --table segment ${TMP_LIGOLW_XML_GZ} | wc -l) -gt 0 ]; then + if [ $(ligolw_print --table segment ${TMP_LIGOLW_XML_GZ} | wc -l) -gt 0 ]; then lalapps_run_sqlite --verbose --sql='DELETE FROM segment; DELETE FROM segment_definer' ${TMP_LIGOLW_XML_GZ} fi - ligolw_add --ilwdchar-compat --verbose ${TMP_LIGOLW_XML_GZ} ${TMP_TOTAL_SEG_XML_GZ} --output ${TMP_LIGOLW_XML_GZ} + ligolw_add --verbose ${TMP_LIGOLW_XML_GZ} ${TMP_TOTAL_SEG_XML_GZ} --output ${TMP_LIGOLW_XML_GZ} # FIXME Find a better way to get the location of simplify_and_cluster.sql lalapps_run_sqlite --verbose --sql-file=$(dirname $(dirname $(which gstlal_inspiral)))/share/gstlal/simplify_and_cluster.sql ${TMP_LIGOLW_XML_GZ} @@ -257,7 +257,7 @@ while true; do # Try to generate the sqlite database, but if something goes # wrong then delete the database and try again. This should fix # issues with the database locking - ligolw_sqlite --ilwdchar-compat --verbose --database ${LIGOLW_SQLITE} --replace ${TMP_LIGOLW_XML_GZ} || { rm ${LIGOLW_SQLITE} && ligolw_sqlite --ilwdchar-compat --verbose --database ${LIGOLW_SQLITE} ${TMP_LIGOLW_XML_GZ}; } + ligolw_sqlite --verbose --database ${LIGOLW_SQLITE} --replace ${TMP_LIGOLW_XML_GZ} || { rm ${LIGOLW_SQLITE} && ligolw_sqlite --verbose --database ${LIGOLW_SQLITE} ${TMP_LIGOLW_XML_GZ}; } gstlal_inspiral_plot_sensitivity --data-segments-name=statevectorsegments --bin-by-chirp-mass --verbose ${LIGOLW_SQLITE} echo plotting... diff --git a/gstlal-inspiral/bin/gstlal_ll_inspiral_daily_page b/gstlal-inspiral/bin/gstlal_ll_inspiral_daily_page index e2ac4a63cf..14c33ae635 100755 --- a/gstlal-inspiral/bin/gstlal_ll_inspiral_daily_page +++ b/gstlal-inspiral/bin/gstlal_ll_inspiral_daily_page @@ -53,7 +53,7 @@ def process_mass_bin(args): files = sorted([os.path.join(os.path.join(options.directory, d), xml) for xml in os.listdir(os.path.join(options.directory, d)) if pattern.match(xml) and "~" not in xml]) for f in files: try: - subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--ilwdchar-compat", "--tmp-space", "/dev/shm", "--database", "%s" % db, "%s" % f]) + subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--tmp-space", "/dev/shm", "--database", "%s" % db, "%s" % f]) except: print >> sys.stderr, "couldn't process %s" % f continue @@ -130,12 +130,12 @@ if __name__ == '__main__': # Parallel process the data reduction args = ([massbin, result_dirs, n, d, options, "ALL_LLOID", 0, cluster_file] for massbin in massbins) # Merge the files of this directory - subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--ilwdchar-compat", "--tmp-space", "/dev/shm", "--replace", "--verbose", "--database", "%s" % noninjdball] + list(pool.map(process_mass_bin, args))) + subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--tmp-space", "/dev/shm", "--replace", "--verbose", "--database", "%s" % noninjdball] + list(pool.map(process_mass_bin, args))) noninj_files_to_merge.append(noninjdball) for injection_file in inj_file_bins: args = ([massbin, result_dirs, n, d, options, injtag(injection_file), 1, cluster_file] for massbin in inj_file_bins[injection_file]) - subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--ilwdchar-compat", "--sql-file", cluster_file, "--tmp-space", "/dev/shm", "--replace", "--verbose", "--database", "%s" % injdball[injection_file]] + list(pool.map(process_mass_bin, args))) + subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--tmp-space", "/dev/shm", "--replace", "--verbose", "--database", "%s" % injdball[injection_file]] + list(pool.map(process_mass_bin, args))) inj_files_to_merge.setdefault(injection_file,[]).append(injdball[injection_file]) # @@ -159,7 +159,7 @@ if __name__ == '__main__': progressbar = ProgressBar("Merge noninjection files", len(noninj_files_to_merge)) for f in noninj_files_to_merge: # NOTE the online analysis doesn't do a global clustering stage!! That means that you will under count the events in the final db - subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--ilwdchar-compat", "--tmp-space", "/dev/shm", "--verbose", "--database", "%s" % noninjdb, "%s" % f]) + subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--tmp-space", "/dev/shm", "--verbose", "--database", "%s" % noninjdb, "%s" % f]) progressbar.increment() del progressbar @@ -167,19 +167,19 @@ if __name__ == '__main__': progressbar = ProgressBar("Merge injection files", len(inj_files_to_merge[injection_file])) for f in inj_files_to_merge[injection_file]: # NOTE the online analysis doesn't do a global clustering stage!! That means that you will under count the events in the final db - subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--ilwdchar-compat", "--tmp-space", "/dev/shm", "--verbose", "--database", "%s" % injdb[injection_file], "%s" % f]) + subprocess.check_call(["gstlal_inspiral_merge_and_reduce", "--sql-file", cluster_file, "--tmp-space", "/dev/shm", "--verbose", "--database", "%s" % injdb[injection_file], "%s" % f]) progressbar.increment() del progressbar # Find injections progressbar = ProgressBar("Find injections", 4) - subprocess.check_call(["ligolw_sqlite", "--ilwdchar-compat", "--tmp-space", os.environ["TMPDIR"], "--verbose", "--database", "%s" % injdb[injection_file], "%s" % injection_file]) + subprocess.check_call(["ligolw_sqlite", "--tmp-space", os.environ["TMPDIR"], "--verbose", "--database", "%s" % injdb[injection_file], "%s" % injection_file]) progressbar.increment() - subprocess.check_call(["ligolw_sqlite", "--ilwdchar-compat", "--tmp-space", os.environ["TMPDIR"], "--verbose", "--database", "%s" % injdb[injection_file], "--extract", "%s.xml" % injdb[injection_file]]) + subprocess.check_call(["ligolw_sqlite", "--tmp-space", os.environ["TMPDIR"], "--verbose", "--database", "%s" % injdb[injection_file], "--extract", "%s.xml" % injdb[injection_file]]) progressbar.increment() subprocess.check_call(["lalapps_inspinjfind", "--verbose", "%s.xml" % injdb[injection_file]]) progressbar.increment() - subprocess.check_call(["ligolw_sqlite", "--ilwdchar-compat", "--tmp-space", os.environ["TMPDIR"], "--verbose", "--database", "%s" % injdb[injection_file], "--replace", "%s.xml" % injdb[injection_file]]) + subprocess.check_call(["ligolw_sqlite", "--tmp-space", os.environ["TMPDIR"], "--verbose", "--database", "%s" % injdb[injection_file], "--replace", "%s.xml" % injdb[injection_file]]) progressbar.increment() # -- GitLab