@@ -99,28 +99,86 @@ def generate_cpp_merge_test(n: int) -> str:
99
99
return cpp_code
100
100
101
101
102
- def analyze_trace_file ( trace_path : str ) -> tuple [ float , float ] :
102
+ def generate_cpp_nested_loop_test ( n : int ) -> str :
103
103
"""
104
- Parses the -ftime-trace JSON output to find durations.
104
+ Generates C++ code with N levels of nested loops.
105
+ This pattern tests how analysis performance scales with loop nesting depth,
106
+ which is a key factor in the complexity of dataflow analyses on structured
107
+ control flow.
105
108
106
- Returns:
107
- A tuple of (lifetime_analysis_duration_us, total_clang_duration_us).
109
+ Example (n=3):
110
+ struct MyObj { int id; ~MyObj() {} };
111
+ void nested_loops_3() {
112
+ MyObj* p = nullptr;
113
+ for(int i0=0; i0<2; ++i0) {
114
+ MyObj s0;
115
+ p = &s0;
116
+ for(int i1=0; i1<2; ++i1) {
117
+ MyObj s1;
118
+ p = &s1;
119
+ for(int i2=0; i2<2; ++i2) {
120
+ MyObj s2;
121
+ p = &s2;
122
+ }
123
+ }
124
+ }
125
+ }
126
+ """
127
+ if n <= 0 :
128
+ return "// Nesting depth must be positive."
129
+
130
+ cpp_code = "struct MyObj { int id; ~MyObj() {} };\n \n "
131
+ cpp_code += f"void nested_loops_{ n } () {{\n "
132
+ cpp_code += " MyObj* p = nullptr;\n "
133
+
134
+ for i in range (n ):
135
+ indent = " " * (i + 1 )
136
+ cpp_code += f"{ indent } for(int i{ i } =0; i{ i } <2; ++i{ i } ) {{\n "
137
+ cpp_code += f"{ indent } MyObj s{ i } ; p = &s{ i } ;\n "
138
+
139
+ for i in range (n - 1 , - 1 , - 1 ):
140
+ indent = " " * (i + 1 )
141
+ cpp_code += f"{ indent } }}\n "
142
+
143
+ cpp_code += "}\n "
144
+ cpp_code += f"\n int main() {{ nested_loops_{ n } (); return 0; }}\n "
145
+ return cpp_code
146
+
147
+
148
+ def analyze_trace_file (trace_path : str ) -> dict :
108
149
"""
109
- lifetime_duration = 0.0
110
- total_duration = 0.0
150
+ Parses the -ftime-trace JSON output to find durations for the lifetime
151
+ analysis and its sub-phases.
152
+ Returns a dictionary of durations in microseconds.
153
+ """
154
+ durations = {
155
+ "lifetime_us" : 0.0 ,
156
+ "total_us" : 0.0 ,
157
+ "fact_gen_us" : 0.0 ,
158
+ "loan_prop_us" : 0.0 ,
159
+ "expired_loans_us" : 0.0 ,
160
+ "live_origins_us" : 0.0 ,
161
+ }
162
+ event_name_map = {
163
+ "LifetimeSafetyAnalysis" : "lifetime_us" ,
164
+ "ExecuteCompiler" : "total_us" ,
165
+ "FactGenerator" : "fact_gen_us" ,
166
+ "LoanPropagation" : "loan_prop_us" ,
167
+ "ExpiredLoans" : "expired_loans_us" ,
168
+ "LiveOrigins" : "live_origins_us" ,
169
+ }
111
170
try :
112
171
with open (trace_path , "r" ) as f :
113
172
trace_data = json .load (f )
114
173
for event in trace_data .get ("traceEvents" , []):
115
- if event .get ("name" ) == "LifetimeSafetyAnalysis" :
116
- lifetime_duration += float (event .get ("dur" , 0 ))
117
- if event .get ("name" ) == "ExecuteCompiler" :
118
- total_duration += float (event .get ("dur" , 0 ))
119
-
174
+ event_name = event .get ("name" )
175
+ if event_name in event_name_map :
176
+ key = event_name_map [event_name ]
177
+ durations [key ] += float (event .get ("dur" , 0 ))
120
178
except (IOError , json .JSONDecodeError ) as e :
121
179
print (f"Error reading or parsing trace file { trace_path } : { e } " , file = sys .stderr )
122
- return 0.0 , 0.0
123
- return lifetime_duration , total_duration
180
+ return { key : 0.0 for key in durations }
181
+ return durations
124
182
125
183
126
184
def power_law (n , c , k ):
@@ -135,8 +193,29 @@ def human_readable_time(ms: float) -> str:
135
193
return f"{ ms :.2f} ms"
136
194
137
195
196
+ def calculate_complexity (n_data , y_data ) -> tuple [float | None , float | None ]:
197
+ """
198
+ Calculates the exponent 'k' for the power law fit y = c * n^k.
199
+ Returns a tuple of (k, k_standard_error).
200
+ """
201
+ try :
202
+ if len (n_data ) < 3 or np .all (y_data < 1e-6 ) or np .var (y_data ) < 1e-6 :
203
+ return None , None
204
+
205
+ non_zero_indices = y_data > 0
206
+ if np .sum (non_zero_indices ) < 3 :
207
+ return None , None
208
+
209
+ n_fit , y_fit = n_data [non_zero_indices ], y_data [non_zero_indices ]
210
+ popt , pcov = curve_fit (power_law , n_fit , y_fit , p0 = [0 , 1 ], maxfev = 5000 )
211
+ k_stderr = np .sqrt (np .diag (pcov ))[1 ]
212
+ return popt [1 ], k_stderr
213
+ except (RuntimeError , ValueError ):
214
+ return None , None
215
+
216
+
138
217
def generate_markdown_report (results : dict ) -> str :
139
- """Generates a Markdown-formatted report from the benchmark results."""
218
+ """Generates a concise, Markdown-formatted report from the benchmark results."""
140
219
report = []
141
220
timestamp = datetime .now ().strftime ("%Y-%m-%d %H:%M:%S %Z" )
142
221
report .append (f"# Lifetime Analysis Performance Report" )
@@ -146,54 +225,54 @@ def generate_markdown_report(results: dict) -> str:
146
225
for test_name , data in results .items ():
147
226
title = data ["title" ]
148
227
report .append (f"## Test Case: { title } " )
149
- report .append ("" )
228
+ report .append ("\n **Timing Results:** \n " )
150
229
151
230
# Table header
152
- report .append ("| N | Analysis Time | Total Clang Time |" )
153
- report .append ("|:----|--------------:|-----------------:|" )
231
+ report .append (
232
+ "| N (Input Size) | Total Time | Analysis Time (%) | Fact Generator (%) | Loan Propagation (%) | Expired Loans (%) | Live Origins (%) |"
233
+ )
234
+ report .append (
235
+ "|:---------------|-----------:|------------------:|-------------------:|---------------------:|------------------:|------------------:|"
236
+ )
154
237
155
238
# Table rows
156
239
n_data = np .array (data ["n" ])
157
- analysis_data = np .array (data ["lifetime_ms" ])
158
- total_data = np .array (data ["total_ms" ])
240
+ total_ms_data = np .array (data ["total_ms" ])
159
241
for i in range (len (n_data )):
160
- analysis_str = human_readable_time (analysis_data [i ])
161
- total_str = human_readable_time (total_data [i ])
162
- report .append (f"| { n_data [i ]:<3} | { analysis_str :>13} | { total_str :>16} |" )
163
-
164
- report .append ("" )
165
-
166
- # Complexity analysis
167
- report .append (f"**Complexity Analysis:**" )
168
- try :
169
- # Curve fitting requires at least 3 points
170
- if len (n_data ) < 3 :
171
- raise ValueError ("Not enough data points to perform curve fitting." )
172
-
173
- popt , pcov = curve_fit (
174
- power_law , n_data , analysis_data , p0 = [0 , 2 ], maxfev = 5000
175
- )
176
- _ , k = popt
177
-
178
- # Confidence Interval for k
179
- alpha = 0.05 # 95% confidence
180
- dof = max (0 , len (n_data ) - len (popt )) # degrees of freedom
181
- t_val = t .ppf (1.0 - alpha / 2.0 , dof )
182
- # Standard error of the parameters
183
- perr = np .sqrt (np .diag (pcov ))
184
- k_stderr = perr [1 ]
185
- k_ci_lower = k - t_val * k_stderr
186
- k_ci_upper = k + t_val * k_stderr
187
-
188
- report .append (
189
- f"- The performance for this case scales approx. as **O(n<sup>{ k :.2f} </sup>)**."
190
- )
191
- report .append (
192
- f"- **95% Confidence interval for exponent:** `[{ k_ci_lower :.2f} , { k_ci_upper :.2f} ]`."
193
- )
242
+ total_t = total_ms_data [i ]
243
+ if total_t < 1e-6 :
244
+ total_t = 1.0 # Avoid division by zero
245
+
246
+ row = [
247
+ f"| { n_data [i ]:<14} |" ,
248
+ f"{ human_readable_time (total_t ):>10} |" ,
249
+ f"{ data ['lifetime_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
250
+ f"{ data ['fact_gen_ms' ][i ] / total_t * 100 :>18.2f} % |" ,
251
+ f"{ data ['loan_prop_ms' ][i ] / total_t * 100 :>20.2f} % |" ,
252
+ f"{ data ['expired_loans_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
253
+ f"{ data ['live_origins_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
254
+ ]
255
+ report .append (" " .join (row ))
256
+
257
+ report .append ("\n **Complexity Analysis:**\n " )
258
+ report .append ("| Analysis Phase | Complexity O(n<sup>k</sup>) |" )
259
+ report .append ("|:------------------|:--------------------------|" )
260
+
261
+ analysis_phases = {
262
+ "Total Analysis" : data ["lifetime_ms" ],
263
+ "FactGenerator" : data ["fact_gen_ms" ],
264
+ "LoanPropagation" : data ["loan_prop_ms" ],
265
+ "ExpiredLoans" : data ["expired_loans_ms" ],
266
+ "LiveOrigins" : data ["live_origins_ms" ],
267
+ }
194
268
195
- except (RuntimeError , ValueError ) as e :
196
- report .append (f"- Could not determine a best-fit curve for the data: { e } " )
269
+ for phase_name , y_data in analysis_phases .items ():
270
+ k , delta = calculate_complexity (n_data , np .array (y_data ))
271
+ if k is not None and delta is not None :
272
+ complexity_str = f"O(n<sup>{ k :.2f} </sup> ± { delta :.2f} )"
273
+ else :
274
+ complexity_str = "(Negligible)"
275
+ report .append (f"| { phase_name :<17} | { complexity_str :<25} |" )
197
276
198
277
report .append ("\n ---\n " )
199
278
@@ -202,7 +281,7 @@ def generate_markdown_report(results: dict) -> str:
202
281
203
282
def run_single_test (
204
283
clang_binary : str , output_dir : str , test_name : str , generator_func , n : int
205
- ) -> tuple [ float , float ] :
284
+ ) -> dict :
206
285
"""Generates, compiles, and benchmarks a single test case."""
207
286
print (f"--- Running Test: { test_name .capitalize ()} with N={ n } ---" )
208
287
@@ -231,11 +310,12 @@ def run_single_test(
231
310
if result .returncode != 0 :
232
311
print (f"Compilation failed for N={ n } !" , file = sys .stderr )
233
312
print (result .stderr , file = sys .stderr )
234
- return 0.0 , 0.0
313
+ return {}
235
314
236
- lifetime_us , total_us = analyze_trace_file (trace_file )
237
-
238
- return lifetime_us / 1000.0 , total_us / 1000.0
315
+ durations_us = analyze_trace_file (trace_file )
316
+ return {
317
+ key .replace ("_us" , "_ms" ): value / 1000.0 for key , value in durations_us .items ()
318
+ }
239
319
240
320
241
321
if __name__ == "__main__" :
@@ -270,6 +350,12 @@ def run_single_test(
270
350
"generator_func" : generate_cpp_merge_test ,
271
351
"n_values" : [10 , 50 , 100 , 200 , 400 , 800 ],
272
352
},
353
+ {
354
+ "name" : "nested_loops" ,
355
+ "title" : "Deeply Nested Loops" ,
356
+ "generator_func" : generate_cpp_nested_loop_test ,
357
+ "n_values" : [10 , 50 , 100 , 200 , 400 , 800 ],
358
+ },
273
359
]
274
360
275
361
results = {}
@@ -282,21 +368,30 @@ def run_single_test(
282
368
"n" : [],
283
369
"lifetime_ms" : [],
284
370
"total_ms" : [],
371
+ "fact_gen_ms" : [],
372
+ "loan_prop_ms" : [],
373
+ "expired_loans_ms" : [],
374
+ "live_origins_ms" : [],
285
375
}
286
376
for n in config ["n_values" ]:
287
- lifetime_ms , total_ms = run_single_test (
377
+ durations_ms = run_single_test (
288
378
args .clang_binary ,
289
379
args .output_dir ,
290
380
test_name ,
291
381
config ["generator_func" ],
292
382
n ,
293
383
)
294
- if total_ms > 0 :
384
+ if durations_ms :
295
385
results [test_name ]["n" ].append (n )
296
- results [test_name ]["lifetime_ms" ].append (lifetime_ms )
297
- results [test_name ]["total_ms" ].append (total_ms )
386
+ for key , value in durations_ms .items ():
387
+ results [test_name ][key ].append (value )
388
+
298
389
print (
299
- f" Total: { human_readable_time (total_ms )} | Analysis: { human_readable_time (lifetime_ms )} "
390
+ f" Total Analysis: { human_readable_time (durations_ms ['lifetime_ms' ])} | "
391
+ f"FactGen: { human_readable_time (durations_ms ['fact_gen_ms' ])} | "
392
+ f"LoanProp: { human_readable_time (durations_ms ['loan_prop_ms' ])} | "
393
+ f"ExpiredLoans: { human_readable_time (durations_ms ['expired_loans_ms' ])} | "
394
+ f"LiveOrigins: { human_readable_time (durations_ms ['live_origins_ms' ])} "
300
395
)
301
396
302
397
print ("\n \n " + "=" * 80 )
@@ -305,3 +400,8 @@ def run_single_test(
305
400
306
401
markdown_report = generate_markdown_report (results )
307
402
print (markdown_report )
403
+
404
+ report_filename = os .path .join (args .output_dir , "performance_report.md" )
405
+ with open (report_filename , "w" ) as f :
406
+ f .write (markdown_report )
407
+ print (f"Report saved to: { report_filename } " )
0 commit comments