Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a script to generate material scan plots #757

Merged
merged 4 commits into from
Aug 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 102 additions & 0 deletions bin/g4MaterialScan_raw_plot
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/usr/bin/env python3

# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (C) 2024 Chao Peng
'''
A script to plot raw data output from the script g4MaterialScan_to_csv
'''

import os
import re
import argparse
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator


if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='g4MaterialScan_raw_plot',
description = 'A python script to draw material thicknesses from raw output of g4MaterialScan_to_csv.'
)
parser.add_argument(
'data_path',
help='path to the raw data from scan (a csv file).'
)
parser.add_argument(
'--path-lengths', default="0, 180, 600",
help='path length points, separated by \",\".'
)
parser.add_argument(
'--sep', default='\t',
help='Seperator for the CSV file.'
)
parser.add_argument(
'--font-size', type=float, default=18.,
help='Font size of the plots.'
)
args = parser.parse_args()

# get the path length points
pls = np.array([float(x.strip()) for x in args.path_lengths.split(',')])
if len(pls) < 2:
print('Need at least two points in --path-lengths')
exit(-1)

# determine the eta and phi from path
eta, phi = 0., 0.
try:
match = re.search(r'eta=([\d\-\.]*\d).*phi=([\d\-\.]*\d)', args.data_path)
eta = float(match[1])
phi = float(match[2])
except:
print('WARNING: Fail to determine eta, phi from data path, information may be incorrect.')

# read and process data
df = pd.read_csv(args.data_path, sep=args.sep, index_col=0)
df.loc[:, 'X0_cum'] = df['X0'].cumsum()
df.loc[:, 'lambda_cum'] = df['lambda'].cumsum()
# print(df)

# font size and colors
fs = args.font_size
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']

# plot X0 and lambda in the path length intervals [pl(i), pl(i+1)]
fig, axs = plt.subplots(1, len(pls) - 1, figsize=(8 * (len(pls) - 1), 6), dpi=160,
gridspec_kw=dict(left=0.1, right=0.92, top=0.95, wspace=0.4))
for i, ax in enumerate(axs.flat):
min_pl, max_pl = pls[i], pls[i + 1]

dfr = df[(df['path_length'] <= max_pl) & (df['path_length'] >= min_pl)]
# X0
ax.step(df['path_length'], df['X0_cum'], color=colors[0])

# lambda
ax2 = ax.twinx()
ax2.step(df['path_length'], df['lambda_cum'], color=colors[1], ls='--')


ax.text(0.05, 0.95, r'$\eta={:g}, \phi={:g}^{{\circ}}$'.format(eta, phi),
fontsize=fs, color=colors[2],
horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
# axis format
ax.set_xlabel('Path Length [cm]', fontsize=fs)
ax.set_ylabel('$X/X_0$ (Cumulative)', fontsize=fs, color=colors[0])
ax2.set_ylabel('$\Lambda$ (Cumulative)', fontsize=fs, color=colors[1])
ax.xaxis.set_minor_locator(AutoMinorLocator(5))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax2.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.set_xlim(min_pl, max_pl)
ax.set_ylim(0, dfr['X0_cum'].max()*1.1)
ax2.set_ylim(0, dfr['lambda_cum'].max()*1.1)
ax.tick_params(direction='in', which='both', labelsize=fs)
ax2.tick_params(direction='in', which='both', labelsize=fs)
ax.grid(which='major', ls=':')
ax.set_axisbelow(True)
# save the plot with the same name
save_path = '.'.join(args.data_path.split('.')[:-1]) + '.png'
fig.savefig(save_path)
print('Plot saved as \"{}\"'.format(save_path))
28 changes: 25 additions & 3 deletions bin/g4MaterialScan_to_csv
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ class g4MaterialScanner:
]

dft = pd.read_csv(StringIO('\n'.join(scans)), sep='\s+', header=None, index_col=0, names=cols)
print(dft)
return dft.astype({key: np.float64 for key in cols[1:]})


Expand Down Expand Up @@ -151,13 +152,25 @@ if __name__ == '__main__':
'--eta', default='-4.0:4.0:0.1',
help='Eta range, in the format of \"<min>[:<max>[:<step>]]\".'
)
parser.add_argument(
'--eta-values', default=None,
help='a list of eta values, separated by \",\", this option overwrites --eta.'
)
parser.add_argument(
'--phi', default='0:30:1',
help='Phi angle range, in the format of \"<min>[:<max>[:<step>]]\" (degree).'
)
parser.add_argument(
'--phi-values', default=None,
help='a list of phi values, separated by \",\", this option overwrites --phi.'
)
parser.add_argument(
'--mat-buffer-size', type=int, default=50,
help='Material buffer size.'
help='Maximum number of materials included in the aggregated output.'
)
parser.add_argument(
'--raw-output', action='store_true',
help='Turn on to save the raw outputs from scan.'
)
parser.add_argument(
'--sep', default='\t',
Expand All @@ -170,8 +183,15 @@ if __name__ == '__main__':
exit(-1)

start_point = np.array([float(v.strip()) for v in args.start_point.split(',')])
etas = args_array(args.eta)
phis = args_array(args.phi)
if args.eta_values is not None:
etas = np.array([float(xval.strip()) for xval in args.eta_values.split(',')])
else:
etas = args_array(args.eta)

if args.phi_values is not None:
phis = np.array([float(xval.strip()) for xval in args.phi_values.split(',')])
else:
phis = args_array(args.phi)
# sanity check
if not len(phis):
print('No phi values from the input {}, aborted!'.format(args.phi))
Expand Down Expand Up @@ -201,6 +221,8 @@ if __name__ == '__main__':
for vt in value_types:
dfa.loc[:, vt] = dfa['int_{}'.format(vt)].diff(1).fillna(dfa['int_{}'.format(vt)])

if args.raw_output:
dfa.to_csv('scan_raw_eta={:g}_phi={:g}.csv'.format(eta, phi), sep=args.sep, float_format='%g')
# group by materials
single_scan = dfa.groupby('material')[value_types].sum()
# print(single_scan)
Expand Down