|
@@ -4,6 +4,7 @@
|
|
|
import argparse
|
|
|
from configparser import ConfigParser
|
|
|
from datetime import datetime, timedelta, timezone
|
|
|
+import logging
|
|
|
import os
|
|
|
import sqlite3
|
|
|
from typing import Union, Tuple, Optional
|
|
@@ -18,8 +19,8 @@ import pandas as pd
|
|
|
from tqdm import tqdm
|
|
|
|
|
|
SEASONS = {
|
|
|
- 'name': ['HIGH2017', 'RHO2018', 'HIGH2019', 'LOW2020', 'HIGH2020', 'HIGH2021', 'NNBAR2021'],
|
|
|
- 'start_run': [36872, 48938, 70014, 85224, 89973, 98116, 107342, None],
|
|
|
+ 'name': ['RHO2013', 'BRK2013/16', 'HIGH2017', 'RHO2018', 'HIGH2019', 'LOW2020', 'HIGH2020', 'HIGH2021', 'NNBAR2021'],
|
|
|
+ 'start_run': [18809, 32076, 36872, 48938, 70014, 85224, 89973, 98116, 107342, None],
|
|
|
}
|
|
|
|
|
|
class RunsDBHandler():
|
|
@@ -278,6 +279,12 @@ def calculate_point(comb_df: pd.DataFrame, runs_df: pd.DataFrame, compton_df: pd
|
|
|
min_run_time, max_run_time = min(min_Yruntime, runs_df_with_bads_df.starttime.min()), max(max_Yruntime, runs_df_with_bads_df.stoptime.max())
|
|
|
|
|
|
compton_meas = compton_df.query('((begintime>=@min_run_time)&(begintime<=@max_run_time))|((endtime>=@min_run_time)&(endtime<=@max_run_time))').copy()
|
|
|
+
|
|
|
+ if len(compton_meas) == 0:
|
|
|
+ # no compton measurements
|
|
|
+ raise Exception("No measurement in this point. Pass it.")
|
|
|
+
|
|
|
+
|
|
|
res_df = pd.DataFrame(list(map(lambda x: {
|
|
|
'compton_start': x[1]['begintime'],
|
|
|
'compton_stop': x[1]['endtime'],
|
|
@@ -286,6 +293,7 @@ def calculate_point(comb_df: pd.DataFrame, runs_df: pd.DataFrame, compton_df: pd
|
|
|
'spread_mean': float(x[1]['data'][2]),
|
|
|
'spread_std': float(x[1]['data'][3]),
|
|
|
}, compton_meas.iterrows())))
|
|
|
+
|
|
|
res_df = res_df.query(f'abs(e_mean -{comb_df.iloc[0].at["elabel"]})<5')
|
|
|
|
|
|
if len(res_df) == 0:
|
|
@@ -306,20 +314,23 @@ def calculate_point(comb_df: pd.DataFrame, runs_df: pd.DataFrame, compton_df: pd
|
|
|
|
|
|
|
|
|
df = comb_df.loc[~comb_df.compton_start.isna()].copy()
|
|
|
+ # df.spread_mean = np.where(df.spread_mean < 1e-3, 1e-3, df.spread_mean)
|
|
|
df.spread_std = np.where(df.spread_std < 1e-4, 1e-4, df.spread_std)
|
|
|
|
|
|
df = df[df.e_std > 0]
|
|
|
mean_energy = np.sum(df.e_mean*df.luminosity/(df.e_std**2))/np.sum(df.luminosity/(df.e_std**2))
|
|
|
# std_energy = np.sqrt(1/np.sum((df.luminosity/df.luminosity.mean())/df.e_std**2))
|
|
|
|
|
|
- good_criterion = np.abs((df.e_mean - mean_energy)/np.sqrt(df.e_mean.std()**2 + df.e_std**2)) < 5
|
|
|
- #print(df[~good_criterion].elabel.value_counts())
|
|
|
+ good_criterion = np.abs((df.e_mean - mean_energy)/np.sqrt(df.e_mean.std(ddof=0)**2 + df.e_std**2)) < 5
|
|
|
+ # print('vals', np.abs((df.e_mean - mean_energy)/np.sqrt(df.e_mean.std()**2 + df.e_std**2)) )
|
|
|
+ # print(df[~good_criterion].elabel.value_counts())
|
|
|
df = df[good_criterion]
|
|
|
|
|
|
- m = Minuit(Likelihood(df.e_mean, df.e_std, df.luminosity), mean=df.e_mean.mean(), sigma=df.e_mean.std())
|
|
|
+ m = Minuit(Likelihood(df.e_mean, df.e_std, df.luminosity), mean=df.e_mean.mean(), sigma=df.e_mean.std(ddof=0))
|
|
|
m.errordef = 0.5
|
|
|
m.limits['sigma'] = (0, None)
|
|
|
m.migrad();
|
|
|
+ # print(m.migrad())
|
|
|
sys_err = m.values['sigma']
|
|
|
mean_en = m.values['mean']
|
|
|
|
|
@@ -394,7 +405,10 @@ def process_combined(combined_df: pd.DataFrame, runs_df: pd.DataFrame, compton_d
|
|
|
result_df = pd.DataFrame(columns=['energy_point', 'first_run', 'last_run', 'mean_energy', 'mean_energy_stat_err', 'mean_energy_sys_err', 'mean_spread', 'mean_spread_stat_err', 'used_lum', 'comment'])
|
|
|
|
|
|
for i, table in tqdm(combined_df.groupby('point_idx', dropna=False)):
|
|
|
- res_dict, good_df = calculate_point(table, runs_df, compton_df, rdb)
|
|
|
+ try:
|
|
|
+ res_dict, good_df = calculate_point(table, runs_df, compton_df, rdb)
|
|
|
+ except Exception:
|
|
|
+ continue
|
|
|
result_df = result_df.append(res_dict, ignore_index=True)
|
|
|
|
|
|
if pics_folder is not None:
|
|
@@ -441,10 +455,10 @@ def final_table_to_clbrdb(df: pd.DataFrame, clbrdb: CalibrdbHandler, runs_df: pd
|
|
|
clbrdb.insert(df_clbrdb, 'Misc', 'RunHeader', 'Compton_run_avg', 'Default', comment = season)
|
|
|
clbrdb.commit()
|
|
|
|
|
|
-def save_csv(df: pd.DataFrame, filepath: str):
|
|
|
+def save_csv(df: pd.DataFrame, filepath: str, update_current: bool = True):
|
|
|
"""Saves csv file. Updates current file in filepath if exists"""
|
|
|
|
|
|
- if os.path.isfile(filepath):
|
|
|
+ if (os.path.isfile(filepath) and update_current):
|
|
|
df_current = pd.read_csv(filepath)
|
|
|
df_current = df_current.append(df, ignore_index=True)
|
|
|
df_current = df_current.drop_duplicates(subset=['energy_point', 'first_run'], keep='last')
|