Skip to content

Commit 9ca9d6e

Browse files
committed
update experiment script
Signed-off-by: Le Xu <[email protected]>
1 parent 53f3d2c commit 9ca9d6e

File tree

4 files changed

+56
-22
lines changed

4 files changed

+56
-22
lines changed

benchmarks/autoscaling/plot-everything.py

+4-7
Original file line numberDiff line numberDiff line change
@@ -56,14 +56,12 @@ def parse_experiment_output(lines):
5656

5757
return df, base_time
5858

59-
def get_autoscaler_name(output_dir):
59+
def get_autoscaler_name(output_dir, workload_type):
6060
autoscaling = None
6161
print(f"output_dir: {output_dir}")
6262
# Extract the last part of the path after the last slash
6363
filename = output_dir.split("/")[-1]
64-
65-
# Regular expression to match the autoscaler name
66-
match = re.search(r"^[^-]+-[^-]+-([^-]+(?:-[^-]+)*)-\d{8}-\d{6}$", filename)
64+
match = re.search(rf"^[^-]+-{workload_type}-([^-]+(?:-[^-]+)*)-\d{{8}}-\d{{6}}$", filename)
6765

6866
if match:
6967
print(match)
@@ -246,11 +244,10 @@ def plot_combined_visualization(experiment_home_dir, workload_type):
246244
output_dir = os.path.join(experiment_home_dir, subdir)
247245
if "pod_logs" in output_dir:
248246
continue
249-
autoscaler = get_autoscaler_name(output_dir)
247+
autoscaler = get_autoscaler_name(output_dir, workload_type)
250248
color = colors[autoscaler]
251249
marker = '.'
252250
label_name = f'{autoscaler}'
253-
254251
# Read and parse data
255252
experiment_output_file = os.path.join(output_dir, "output.jsonl")
256253
parsed_lines = read_experiment_file(experiment_output_file)
@@ -380,7 +377,7 @@ def plot_combined_visualization(experiment_home_dir, workload_type):
380377
content = read_stats_file(stat_fn)
381378
if content:
382379
stats = parse_performance_stats(content)
383-
autoscaler = get_autoscaler_name(output_dir)
380+
autoscaler = get_autoscaler_name(output_dir, workload_type)
384381
title = f"{autoscaler}"
385382
if autoscaler is None or autoscaler == "none" or autoscaler == "NONE":
386383
color_list.append(colors[autoscaler])

benchmarks/autoscaling/requirements_bench_pa.txt

-6
This file was deleted.

benchmarks/autoscaling/run-test.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ python3 ${aibrix_repo}/benchmarks/client/client.py \
114114
--model ${target_ai_model} \
115115
--api-key ${api_key} \
116116
--output-file-path ${output_jsonl_path} \
117-
#--output-dir ${experiment_result_dir} \
117+
--streaming
118118

119119
echo "Experiment is done. date: $(date)"
120120

benchmarks/autoscaling/run.sh

+51-8
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11
#!/bin/bash
22
set -x
3-
#./run.sh workload/workload/25min_up_and_down/25min_up_and_down.jsonl
43

5-
export KUBECONFIG=~/.kube/config-vke
6-
export aibrix_repo="/root/aibrix-local"
7-
export api_key="sk-kFJ12nKsFVfVmGpj3QzX65s4RbN2xJqWzPYCjYu7wT3BlbLi"
8-
export kube_context="ccr3aths9g2gqedu8asdg@35122069-kcu0n2lfb7pjdd83330h0"
4+
export KUBECONFIG=${KUBECONFIG}
5+
export aibrix_repo=${aibrix_repo}
6+
export api_key=${api_key}
7+
export kube_context=${kube_context}
98

109
for WORKLOAD_TYPE in "T_HighSlow_I_HighSlow_O_HighFast" "T_HighSlow_I_HighSlow_O_HighSlow" "T_HighSlow_I_LowFast_O_HighSlow" "T_HighSlow_I_LowSlow_O_HighSlow"
1110
do
@@ -16,6 +15,35 @@ do
1615
exit 1
1716
fi
1817

18+
autoscalers="hpa kpa apa optimizer-kpa"
19+
for autoscaler in ${autoscalers}; do
20+
start_time=$(date +%s)
21+
echo "--------------------------------"
22+
echo "started experiment at $(date)"
23+
echo autoscaler: ${autoscaler}
24+
echo workload: ${workload_path}
25+
echo "The stdout/stderr is being logged in output-${autoscaler}-${WORKLOAD_TYPE}.txt"
26+
./run-test.sh ${workload_path} ${autoscaler} ${aibrix_repo} ${api_key} ${kube_context} ${WORKLOAD_TYPE} > output-${autoscaler}-${WORKLOAD_TYPE}.txt 2>&1
27+
end_time=$(date +%s)
28+
echo "Done: Time taken: $((end_time-start_time)) seconds"
29+
echo "--------------------------------"
30+
sleep 10
31+
done
32+
python plot-everything.py experiment_results/${WORKLOAD_TYPE} ${WORKLOAD_TYPE}
33+
done
34+
35+
36+
37+
38+
for WORKLOAD_TYPE in "workload-2024-10-10-19-50-00" "workload-2024-10-15-18-50-00"
39+
do
40+
workload_path="workload/maas/${WORKLOAD_TYPE}/internal.jsonl"
41+
if [ -z "${workload_path}" ]; then
42+
echo "workload path is not given"
43+
echo "Usage: $0 <workload_path>"
44+
exit 1
45+
fi
46+
1947
autoscalers="hpa kpa apa optimizer-kpa"
2048
for autoscaler in ${autoscalers}; do
2149
start_time=$(date +%s)
@@ -30,9 +58,24 @@ do
3058
echo "--------------------------------"
3159
sleep 10
3260
done
61+
python plot-everything.py experiment_results/${WORKLOAD_TYPE} ${WORKLOAD_TYPE}
3362
done
3463

35-
# for WORKLOAD_TYPE in "T_HighSlow_I_HighSlow_O_HighFast" "T_HighSlow_I_HighSlow_O_HighSlow" "T_HighSlow_I_LowFast_O_HighSlow" "T_HighSlow_I_LowSlow_O_HighSlow"
64+
65+
66+
# target_deployment="deepseek-llm-7b-chat"
67+
# kubectl delete podautoscaler --all --all-namespaces
68+
# python3 ${aibrix_repo}/benchmarks/utils/set_num_replicas.py --deployment ${target_deployment} --replicas 1 --context ${kube_context}
69+
# target_ai_model=deepseek-llm-7b-chat
70+
71+
72+
# mkdir -p output-profile
73+
# for qps in {1..10}
3674
# do
37-
# python plot-everything.py experiment_results/${WORKLOAD_TYPE}
38-
# done
75+
# kubectl -n envoy-gateway-system port-forward service/envoy-aibrix-system-aibrix-eg-903790dc 8888:80 &
76+
# STRATEGY="random"
77+
# WORKLOAD_PATH=workload/constant/qps-${qps}/constant.jsonl
78+
# python3 ${aibrix_repo}/benchmarks/client/client.py --workload-path ${WORKLOAD_PATH} --endpoint "http://localhost:8888" --model ${target_ai_model} --api-key ${api_key} --output-file-path output-profile/output-qps${qps}.jsonl
79+
# # python analyze.py output-profile/output-qps${qps}.jsonl
80+
# sleep 30
81+
# done

0 commit comments

Comments
 (0)