0
0

compton_combiner.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. """Script to combine compton measurements with runs and process these data
  2. """
  3. import argparse
  4. from configparser import ConfigParser
  5. from datetime import datetime, timedelta, timezone
  6. import sqlite3
  7. from typing import Union, Tuple, Optional
  8. from compton_filter import CalibrdbHandler
  9. from iminuit import Minuit
  10. import matplotlib.dates as mdates
  11. import matplotlib.pyplot as plt
  12. from mysql.connector import connect, Error
  13. import numpy as np
  14. import pandas as pd
  15. from tqdm import tqdm
  16. SEASONS = {
  17. 'name': ['HIGH2017', 'RHO2018', 'HIGH2019', 'LOW2020', 'HIGH2020', 'HIGH2021', 'NNBAR2021'],
  18. 'start_run': [36872, 48938, 70014, 85224, 89973, 98116, 107342, None],
  19. }
  20. class RunsDBHandler():
  21. def __init__(self, host: str = 'cmddb', database: str = 'online', user: str = None, password: str = None):
  22. self.conn = connect(host = host, database = database, user = user, password = password)
  23. self.cur = self.conn.cursor()
  24. self.cur.execute("SET time_zone = '+07:00';")
  25. @property
  26. def fields(self) -> list:
  27. """Returns a list of available columns in the RunsDB
  28. """
  29. self.cur.execute("""DESCRIBE Runlog""")
  30. return self.cur.fetchall()
  31. def load_tables(self, range: Union[Tuple[int, Optional[int]], Tuple[datetime, datetime]], energy_point: Optional[float] = None, select_bad_runs: bool = False):
  32. """
  33. Parameters
  34. ----------
  35. range : Union[Tuple[int, Optional[int]], Tuple[datetime, datetime]]
  36. selection range
  37. int range defines an interval in runs
  38. datetime range defines a time interval (NSK: +7:00 time)
  39. energy_point : Optional[float]
  40. energy point name, MeV (default is None)
  41. select_bad_runs : bool
  42. select runs with labels except (Y) (default is False)
  43. """
  44. cond = ""
  45. if isinstance(range[0], int):
  46. cond = f" AND run >= {range[0]} "
  47. if range[1] is not None:
  48. cond += f" AND run <= {range[1]} "
  49. elif isinstance(range[0], datetime):
  50. cond = f" AND starttime >= %s "
  51. if range[1] is not None:
  52. cond += " AND stoptime <= %s"
  53. else:
  54. range = (range[0], )
  55. energy_cond = ""
  56. if energy_point is not None:
  57. energy_cond = f" AND energy = {energy_point}"
  58. quality_cond = ' quality = "Y" '
  59. if select_bad_runs:
  60. quality_cond = ' quality <> "Y" '
  61. sql_query = f"""
  62. SELECT
  63. run,
  64. starttime,
  65. stoptime,
  66. energy,
  67. luminosity
  68. FROM Runlog
  69. WHERE
  70. {quality_cond}
  71. {cond}
  72. {energy_cond}
  73. AND luminosity > 0
  74. AND stoptime > starttime
  75. AND nevent > 0
  76. ORDER BY run DESC"""
  77. if isinstance(range[0], datetime):
  78. self.cur.execute(sql_query, range)
  79. else:
  80. self.cur.execute(sql_query)
  81. field_names = [i[0] for i in self.cur.description]
  82. res = self.cur.fetchall()
  83. return res, field_names
  84. def __del__(self):
  85. self.conn.close()
  86. class Combiner():
  87. """Combines a dataframe with runs and a dataframe with compton measurements together
  88. """
  89. def __init__(self, runsdb: Tuple[list, list], clbrdb: Tuple[list, list]):
  90. """
  91. Parameters
  92. ----------
  93. runsdb : Tuple[list, list]
  94. table of runs (rows and field names)
  95. clbrdb : Tuple[list, list]
  96. table of compton measurements (rows and field names)
  97. """
  98. rdb_rows, r_fld = runsdb
  99. cdb_rows, c_fld = clbrdb
  100. self.conn = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
  101. self.cur = self.conn.cursor()
  102. self.cur.execute(f"CREATE table runs (run, elabel, starttime timestamp, stoptime timestamp, luminosity)")
  103. self.cur.execute(f"CREATE table compton (begintime timestamp, endtime timestamp, e_mean, e_std, spread_mean, spread_std)")
  104. run_row_generator = map(lambda x: (x[r_fld.index("run")], x[r_fld.index("energy")],
  105. x[r_fld.index("starttime")], x[r_fld.index("stoptime")],
  106. x[r_fld.index("luminosity")]), rdb_rows)
  107. c_data_idx = c_fld.index("data")
  108. compton_row_generator = map(lambda x: (x[c_fld.index("begintime")], x[c_fld.index("endtime")],
  109. float(x[c_data_idx][0]), float(x[c_data_idx][1]),
  110. float(x[c_data_idx][2]), float(x[c_data_idx][3])), cdb_rows)
  111. self.cur.executemany(f"""INSERT into runs VALUES ({','.join(['?']*5)})""", run_row_generator)
  112. self.cur.executemany(f"""INSERT into compton VALUES ({','.join(['?']*6)})""", compton_row_generator)
  113. self.__create_combined_table()
  114. def __create_combined_table(self):
  115. create_combined_query = """
  116. CREATE TABLE combined_table AS
  117. SELECT
  118. runs.run AS run,
  119. runs.elabel AS elabel,
  120. runs.starttime as "run_start [timestamp]",
  121. runs.stoptime AS "run_stop [timestamp]",
  122. compton.begintime AS "compton_start [timestamp]",
  123. compton.endtime AS "compton_stop [timestamp]",
  124. runs.luminosity, compton.e_mean, compton.e_std, compton.spread_mean, compton.spread_std
  125. FROM runs, compton
  126. WHERE
  127. (runs.starttime BETWEEN compton.begintime AND compton.endtime)
  128. OR (runs.stoptime BETWEEN compton.begintime AND compton.endtime)
  129. OR (compton.begintime BETWEEN runs.starttime AND runs.stoptime)
  130. OR (compton.endtime BETWEEN runs.starttime AND runs.stoptime);
  131. """
  132. self.cur.execute(create_combined_query)
  133. return
  134. def combined_table(self) -> pd.DataFrame:
  135. """Returns combined dataframe
  136. """
  137. sql_query = """
  138. SELECT * FROM combined_table;
  139. """
  140. df = pd.read_sql(sql_query, self.conn)
  141. df['common_duration'] = df[['run_stop', 'compton_stop']].min(axis=1) - df[['run_start', 'compton_start']].max(axis=1)
  142. df['run_duration'] = df['run_stop'] - df['run_start']
  143. df['run_in_measurement'] = df['common_duration']/df['run_duration']
  144. df = df.sort_values(by='run_in_measurement', ascending=False).drop_duplicates(subset='run').sort_values(by='run')
  145. df = df.drop(['run_duration', 'common_duration', 'run_start', 'run_stop'], axis=1)
  146. return df
  147. def __del__(self):
  148. self.conn.close()
  149. class Likelihood():
  150. """
  151. Likelihood function
  152. """
  153. def __init__(self, means: np.array, sigmas: np.array, weights: np.array):
  154. """
  155. Parameters
  156. ----------
  157. means : np.array
  158. array of means, [MeV]
  159. sigmas : np.array
  160. array of standard deviations, [MeV]
  161. weights : np.array
  162. array of luminosities
  163. """
  164. self.means = means
  165. self.sigmas = sigmas
  166. self.weights = weights/weights.mean()
  167. def __call__(self, mean: float, sigma: float):
  168. """
  169. Calls likelihood calculation
  170. Parameters
  171. ----------
  172. mean : float
  173. expected mean
  174. sigma : float
  175. expected standard deviation
  176. """
  177. sigma_total = np.sqrt(sigma**2 + self.sigmas**2)
  178. ln_L = -np.sum( self.weights*( ((mean - self.means)**2)/(2*(sigma_total**2)) + np.log(sigma_total) ) )
  179. return -ln_L
  180. def __estimate_point_with_closest(comb_df: pd.DataFrame, runs_df: pd.DataFrame, compton_df: pd.DataFrame):
  181. # estimate energy by the nearest points
  182. min_run_time = runs_df[runs_df.run == comb_df.iloc[0].at['run_first']].iloc[0].at['starttime']
  183. max_run_time = runs_df[runs_df.run == comb_df.iloc[0].at['run_last']].iloc[0].at['stoptime']
  184. nearest_row_before = compton_df.iloc[pd.Index(compton_df.endtime).get_loc(min_run_time, 'nearest')]
  185. nearest_row_after = compton_df.iloc[pd.Index(compton_df.begintime).get_loc(max_run_time, 'nearest')]
  186. # regulatization
  187. nearest_row_before['data'][1] = max(nearest_row_before['data'][3], 1e-3)
  188. nearest_row_after['data'][3] = max(nearest_row_after['data'][3], 1e-3)
  189. nearest_row_before['data'][1] = max(nearest_row_before['data'][1], 1e-3)
  190. nearest_row_after['data'][3] = max(nearest_row_after['data'][3], 1e-3)
  191. mean_energy = (nearest_row_before['data'][0] + nearest_row_after['data'][0])/2
  192. mean_spread = (nearest_row_before['data'][2] + nearest_row_after['data'][2])/2
  193. std_energy = np.sqrt(1/(1/(nearest_row_before['data'][1])**2 + 1/(nearest_row_after['data'][1])**2))
  194. std_spread = np.sqrt(1/(1/(nearest_row_before['data'][3])**2 + 1/(nearest_row_after['data'][3])**2))
  195. sys_energy = np.std([nearest_row_before['data'][0], nearest_row_after['data'][0]])
  196. return {
  197. 'energy_point': comb_df.elabel.min(),
  198. 'first_run': comb_df.run_first.min(),
  199. 'last_run': comb_df.run_last.max(),
  200. 'mean_energy': mean_energy,
  201. 'mean_energy_stat_err': std_energy,
  202. 'mean_energy_sys_err': sys_energy,
  203. 'mean_spread': mean_spread,
  204. 'mean_spread_stat_err': std_spread,
  205. 'used_lum': 0,
  206. 'comment': 'indirect measurement #2',
  207. }, pd.DataFrame([])
  208. def calculate_point(comb_df: pd.DataFrame, runs_df: pd.DataFrame, compton_df: pd.DataFrame, rdb) -> dict:
  209. """Calculates parameters of the energy (mean, std, spread) in this dataFrame
  210. Parameters
  211. ----------
  212. comb_df : pd.DataFrame
  213. table of the measurements linked with runs
  214. runs_df : pd.DataFrame
  215. table of the runs
  216. compton_df : pd.DataFrame
  217. table of the comptons
  218. Returns
  219. -------
  220. dict
  221. average parameters on this DataFrame
  222. """
  223. if (len(comb_df) == 1) and pd.isnull(comb_df.iloc[0].at['compton_start']):
  224. # no direct measurements of the compton during data runs
  225. min_Yruntime = runs_df[runs_df.run == comb_df.iloc[0].at['run_first']].iloc[0].at['starttime']
  226. max_Yruntime = runs_df[runs_df.run == comb_df.iloc[0].at['run_last']].iloc[0].at['stoptime']
  227. dlt0 = timedelta(days=1)
  228. # assymetric time because energy can be stable only after
  229. runs_df_with_bads = rdb.load_tables((min_Yruntime, max_Yruntime + dlt0), energy_point = comb_df.iloc[0].at['elabel'], select_bad_runs = True)
  230. if len(runs_df_with_bads[0]) == 0:
  231. return __estimate_point_with_closest(comb_df, runs_df, compton_df)
  232. runs_df_with_bads_df = pd.DataFrame(runs_df_with_bads[0], columns = runs_df_with_bads[1])
  233. min_run_time, max_run_time = min(min_Yruntime, runs_df_with_bads_df.starttime.min()), max(max_Yruntime, runs_df_with_bads_df.stoptime.max())
  234. compton_meas = compton_df.query('((begintime>=@min_run_time)&(begintime<=@max_run_time))|((endtime>=@min_run_time)&(endtime<=@max_run_time))').copy()
  235. res_df = pd.DataFrame(list(map(lambda x: {
  236. 'compton_start': x[1]['begintime'],
  237. 'compton_stop': x[1]['endtime'],
  238. 'e_mean': float(x[1]['data'][0]),
  239. 'e_std': float(x[1]['data'][1]),
  240. 'spread_mean': float(x[1]['data'][2]),
  241. 'spread_std': float(x[1]['data'][3]),
  242. }, compton_meas.iterrows())))
  243. res_df = res_df.query(f'abs(e_mean -{comb_df.iloc[0].at["elabel"]})<5')
  244. if len(res_df) == 0:
  245. return __estimate_point_with_closest(comb_df, runs_df, compton_df)
  246. return {
  247. 'energy_point': comb_df.elabel.min(),
  248. 'first_run': comb_df.run_first.min(),
  249. 'last_run': comb_df.run_last.max(),
  250. 'mean_energy': res_df.e_mean.mean(),
  251. 'mean_energy_stat_err': np.sqrt(1/np.sum(1/(res_df.e_std)**2)), #res_df.e_std.mean()/np.sqrt(len(res_df)),
  252. 'mean_energy_sys_err': np.abs(comb_df.iloc[0].at['elabel'] - res_df.e_mean.mean()),
  253. 'mean_spread': res_df.spread_mean.mean(),
  254. 'mean_spread_stat_err':np.sqrt(1/np.sum(1/(res_df.spread_std)**2)),
  255. 'used_lum': 0,
  256. 'comment': 'indirect measurement #1',
  257. }, res_df
  258. df = comb_df.copy()
  259. df.spread_std = np.where(df.spread_std < 1e-4, 1e-4, df.spread_std)
  260. df = df[df.e_std > 0]
  261. mean_energy = np.sum(df.e_mean*df.luminosity/(df.e_std**2))/np.sum(df.luminosity/(df.e_std**2))
  262. # std_energy = np.sqrt(1/np.sum((df.luminosity/df.luminosity.mean())/df.e_std**2))
  263. good_criterion = np.abs((df.e_mean - mean_energy)/np.sqrt(df.e_mean.std()**2 + df.e_std**2)) < 5
  264. good_criterion = good_criterion
  265. df = df[good_criterion]
  266. m = Minuit(Likelihood(df.e_mean, df.e_std, df.luminosity), mean=df.e_mean.mean(), sigma=df.e_mean.std())
  267. m.errordef = 0.5
  268. m.limits['sigma'] = (0, None)
  269. m.migrad();
  270. sys_err = m.values['sigma']
  271. mean_en = m.values['mean']
  272. mean_spread = np.sum(df.spread_mean*df.luminosity/(df.spread_std**2))/np.sum(df.luminosity/(df.spread_std**2))
  273. std_spread = np.sqrt(1/np.sum((df.luminosity/df.luminosity.mean())/df.spread_std**2))
  274. res_dict = {
  275. 'energy_point': comb_df.elabel.min(),
  276. 'first_run': comb_df.run_first.min(),
  277. 'last_run': comb_df.run_last.max(),
  278. 'mean_energy': mean_en,
  279. 'mean_energy_stat_err': m.errors['mean'],
  280. 'mean_energy_sys_err': sys_err,
  281. 'mean_spread': mean_spread,
  282. 'mean_spread_stat_err': std_spread,
  283. 'used_lum': df.luminosity.sum()/comb_df.luminosity_total.sum(),
  284. 'comment': '',
  285. }
  286. return res_dict, df
  287. def process_combined(combined_df: pd.DataFrame, runs_df: pd.DataFrame, compton_df: pd.DataFrame, pics_folder: Optional[str] = None, rdb: Optional[RunsDBHandler] = None) -> pd.DataFrame:
  288. if pics_folder is not None:
  289. plt.ioff()
  290. plt.style.use('ggplot')
  291. locator = mdates.AutoDateLocator(minticks=5)
  292. formatter = mdates.ConciseDateFormatter(locator)
  293. formatter.formats = ['%y', '%b', '%d', '%H:%M', '%H:%M', '%S.%f', ]
  294. formatter.zero_formats = [''] + formatter.formats[:-1]
  295. formatter.zero_formats[3] = '%d-%b'
  296. formatter.offset_formats = ['', '%Y', '%b %Y', '%d %b %Y', '%d %b %Y', '%d %b %Y %H:%M', ]
  297. runs_df = runs_df.rename({'luminosity': 'luminosity_full', 'energy': 'elabel'}, axis=1)
  298. combined_df = pd.merge(combined_df.drop(['elabel'], axis=1), runs_df[['run', 'elabel', 'luminosity_full']], how='outer')
  299. combined_df = combined_df.sort_values(by='run')
  300. combined_df['luminosity'] = combined_df['luminosity'].fillna(0)
  301. combined_df['point_idx'] = np.cumsum(~np.isclose(combined_df.elabel, combined_df.elabel.shift(1), atol=1e-4))
  302. combined_df = combined_df.groupby(['point_idx', 'compton_start'], dropna=False).agg(
  303. elabel=('elabel', 'min'), elabel_test=('elabel', 'max'),
  304. run_first=('run', 'min'), run_last=('run', 'max'),
  305. luminosity=('luminosity', 'sum'), luminosity_total=('luminosity_full', 'sum'),
  306. compton_stop=('compton_stop', 'min'), compton_stop_test=('compton_stop', 'max'),
  307. e_mean=('e_mean', 'min'), e_mean_test=('e_mean', 'max'),
  308. e_std=('e_std', 'min'), e_std_test=('e_std', 'max'),
  309. spread_mean=('spread_mean', 'min'), spread_mean_test=('spread_mean', 'max'),
  310. spread_std=('spread_std', 'min'), spread_std_test=('spread_std', 'max'),
  311. ).reset_index().set_index('point_idx')
  312. # return combined_df
  313. result_df = pd.DataFrame(columns=['energy_point', 'first_run', 'last_run', 'mean_energy', 'mean_energy_stat_err', 'mean_energy_sys_err', 'mean_spread', 'mean_spread_stat_err', 'used_lum', 'comment'])
  314. for i, table in tqdm(combined_df.groupby('point_idx', dropna=False)):
  315. res_dict, good_df = calculate_point(table, runs_df, compton_df, rdb)
  316. result_df = result_df.append(res_dict, ignore_index=True)
  317. if pics_folder is not None:
  318. plt_table = good_df.dropna()
  319. if len(plt_table) == 0:
  320. continue
  321. total_error = np.sqrt(res_dict["mean_energy_stat_err"]**2 + res_dict["mean_energy_sys_err"]**2)
  322. half_timedelta = (plt_table.compton_stop - plt_table.compton_start)/2
  323. time = plt_table.compton_start + half_timedelta
  324. dlt0 = timedelta(days=1)
  325. fig, ax = plt.subplots(1, 1, dpi=120, tight_layout=True)
  326. ax.errorbar(time, plt_table.e_mean, xerr=half_timedelta, yerr=plt_table.e_std, fmt='.')
  327. ax.axhline(res_dict['mean_energy'], color='black', zorder=3, label='Mean')
  328. ax.fill_between([plt_table.compton_stop.min(), plt_table.compton_stop.max()],
  329. [res_dict['mean_energy'] - total_error]*2,
  330. [res_dict['mean_energy'] + total_error]*2, color='green', zorder=1, alpha=0.4)
  331. ax.tick_params(axis='x', labelrotation=45)
  332. ax.xaxis.set_major_locator(locator)
  333. ax.xaxis.set_major_formatter(formatter)
  334. ax.set(title=f'{res_dict["energy_point"]}, E = {res_dict["mean_energy"]:.3f} ± {res_dict["mean_energy_stat_err"]:.3f} ± {res_dict["mean_energy_sys_err"]:.3f} MeV', xlabel='Time, NSK', ylabel='Energy, [MeV]',
  335. xlim=[plt_table.compton_stop.min(), plt_table.compton_stop.max()])
  336. plt.savefig(f'{pics_folder}/{res_dict["first_run"]}_{res_dict["energy_point"]}.png')
  337. plt.close()
  338. return result_df
  339. def final_table_to_clbrdb(df: pd.DataFrame, clbrdb: CalibrdbHandler, runs_df: pd.DataFrame, season: str):
  340. """Write good values from the averaged table into clbrdb
  341. """
  342. good_values = (df.comment=='')|((df.comment!='')&((df.mean_energy.astype(float) - df.energy_point).abs()<5))
  343. df_clbrdb = df.loc[good_values].drop(['comment', 'used_lum'], axis=1)
  344. df_clbrdb = pd.merge(df_clbrdb, runs_df[['run', 'starttime']], how='left', left_on='first_run', right_on='run').drop(['run'], axis=1)
  345. df_clbrdb = pd.merge(df_clbrdb, runs_df[['run', 'stoptime']], how='left', left_on='last_run', right_on='run').drop(['run'], axis=1)
  346. df_clbrdb = df_clbrdb.assign(writetime=lambda df: df['stoptime'])
  347. df_clbrdb = df_clbrdb[['writetime', 'starttime', 'stoptime',
  348. 'energy_point', 'first_run', 'last_run', 'mean_energy',
  349. 'mean_energy_stat_err', 'mean_energy_sys_err', 'mean_spread', 'mean_spread_stat_err']].values.tolist()
  350. clbrdb.insert(df_clbrdb, 'Misc', 'RunHeader', 'Compton_run_avg', 'Default', comment = season)
  351. clbrdb.commit()
  352. # python scripts/compton_combiner.py -s NNBAR2021 -c database.ini --csv --clbrdb
  353. def main():
  354. parser = argparse.ArgumentParser(description = 'Mean compton energy measurements from clbrdb')
  355. parser.add_argument('-s', '--season', help = 'Name of the season')
  356. parser.add_argument('-c', '--config', help = 'Config file containing information for access to databases')
  357. parser.add_argument('--csv', action = 'store_true', help = 'Save csv file with data or not')
  358. parser.add_argument('--clbrdb', action = 'store_true', help = 'Update Compton_run_avg clbrdb or not')
  359. parser.add_argument('--pics_folder', help = 'Path to the directory for saving the pictures')
  360. parser.add_argument('--only_last', action = 'store_true', help = 'Compute values of the last (in Compton_run_avg clbrdb) and new points only')
  361. args = parser.parse_args()
  362. # logging.info(f"Arguments: season: {args.season}, config {args.config}")
  363. parser = ConfigParser()
  364. parser.read(args.config);
  365. rdb = RunsDBHandler(**parser['cmdruns'])
  366. clbrdb = CalibrdbHandler(**parser['clbrDB'])
  367. idx = SEASONS['name'].index(args.season)
  368. runs_range = (SEASONS['start_run'][idx], SEASONS['start_run'][idx+1])
  369. if args.only_last:
  370. res_avg = clbrdb.load_table('Misc', 'RunHeader', 'Compton_run_avg', num_last_rows = 1)
  371. if len(res_avg[0]) != 0:
  372. begintime = res_avg[0][0][res_avg[1].index("begintime")]
  373. runs_range = (begintime, None)
  374. res_rdb = rdb.load_tables(runs_range)
  375. runs_df = pd.DataFrame(res_rdb[0], columns=res_rdb[1])
  376. tdlt0 = timedelta(days=2)
  377. time_range = (runs_df.starttime.min() - tdlt0, runs_df.stoptime.max() + tdlt0)
  378. res_clbrdb = clbrdb.load_table('Misc', 'RunHeader', 'Compton_run', num_last_rows = None, timerange = time_range)
  379. cb = Combiner(res_rdb, res_clbrdb)
  380. comb_df = cb.combined_table()
  381. compton_df = pd.DataFrame(res_clbrdb[0], columns=res_clbrdb[1])
  382. cdf = process_combined(comb_df, runs_df, compton_df, args.pics_folder, rdb)
  383. if args.csv:
  384. cdf.to_csv(f'{args.season}.csv', index=False, float_format='%g')
  385. if args.clbrdb:
  386. final_table_to_clbrdb(cdf, clbrdb, runs_df, args.season)
  387. return
  388. if __name__ == "__main__":
  389. main()