Compare commits
	
		
			No commits in common. "dee3c87c6698ca1abfd1f333e6f91fde1f5bc60b" and "dac08095c43064276c816f3c61f5ba9c25d35543" have entirely different histories.
		
	
	
		
			dee3c87c66
			...
			dac08095c4
		
	
		
| @ -1,4 +0,0 @@ | |||||||
| label,BC:cells,BC:dynamicecm,Sweep:Cells,Sweep:DynamicECM,Sweep:DynamicECMDampers,Filling,Other |  | ||||||
| CPU,0.04629667717328299,3.88243023434036,0.002164069189058585,82.18255346583861,13.842310640806883,0.005797982839735358,0.03844692981207934 |  | ||||||
| GPU,0.034897486509106485,41.227398706349675,13.180664898871365,22.432483220415783,19.749714111977486,1.9939606413394697,1.3808809345371023 |  | ||||||
| Booster,0.09128030041871667,42.736382370072874,12.260298369798207,22.36492489089858,16.070196185259338,4.364304866307413,2.1126130172448816 |  | ||||||
| 
 | 
| @ -1,4 +0,0 @@ | |||||||
| label,BC:cells,BC:dynamicecm,Sweep:Cells,Sweep:DynamicECM,Sweep:DynamicECMDampers,Other |  | ||||||
| CPU,0.9047934333333334,75.87579921250001,0.04229322083333333,1606.12465625,270.5255006333333,0.8646950250000001 |  | ||||||
| GPU,0.11372260000000001,134.35027680000002,42.952648800000006,73.102122,64.3596162,10.9978052 |  | ||||||
| Booster,0.155221,72.67267939999999,20.848482800000003,38.0312728,27.327165999999995,11.013917200000002 |  | ||||||
| 
 | 
| @ -1,9 +0,0 @@ | |||||||
| label,BC:cells,BC:dynamicecm,Sweep:Cells,Sweep:DynamicECM,Sweep:DynamicECMDampers,Filling,Other |  | ||||||
| 1,0.034897486509106485,41.227398706349675,13.180664898871365,22.432483220415783,19.749714111977486,1.9939606413394697,1.3808809345371023 |  | ||||||
| 2,0.7137918070198439,50.7137377526309,8.923981074147767,19.35772833011394,17.571497161475467,1.320924803923136,1.3983390706889622 |  | ||||||
| 4,0.6647403410134337,58.291944402330074,7.087118055335309,16.67352093599457,15.236952119238136,1.092645293437499,0.953078852650984 |  | ||||||
| 8,0.6788278363464717,65.05900206274784,5.2089254465999275,14.249993130938188,13.176763096049008,0.7851584656500297,0.8413299616685318 |  | ||||||
| 16,1.441574901760373,61.1245906198054,4.832237599492141,16.029377636605524,14.996293924006913,0.716990077435801,0.858935240893841 |  | ||||||
| 32,3.3736940900007966,47.94830950907802,5.622259685491874,21.238716402604393,19.97281149065964,0.8196124452570666,1.0245963769081996 |  | ||||||
| 64,6.284545018581091,45.28808866195176,4.502683326147901,21.66574860147326,20.522789117514915,0.6460791296539419,1.0900661446771265 |  | ||||||
| 128,11.788574344478201,40.04977497065291,3.2677482351392375,22.1995263401806,21.209134936528987,0.4598839494487746,1.0253572235712836 |  | ||||||
| 
 | 
| @ -1,61 +0,0 @@ | |||||||
| #!/usr/bin/env python |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| import argparse |  | ||||||
| import sys |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| import timing |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| ignore = ["TimeStep"] |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     p = argparse.ArgumentParser() |  | ||||||
|     p.add_argument("jobs", nargs="+") |  | ||||||
|     p.add_argument("--normalize", action="store_true") |  | ||||||
|     p.add_argument("--extra-columns", nargs="*") |  | ||||||
|     args = p.parse_args() |  | ||||||
| 
 |  | ||||||
|     columns = [ |  | ||||||
|         "BC:cells", |  | ||||||
|         "BC:dynamicecm", |  | ||||||
|         "Sweep:Cells", |  | ||||||
|         "Sweep:DynamicECM", |  | ||||||
|         "Sweep:DynamicECMDampers", |  | ||||||
|     ] + (args.extra_columns or []) |  | ||||||
| 
 |  | ||||||
|     dfs = dict() |  | ||||||
|     labels = [] |  | ||||||
|     for label, jobid in [jobarg.split(":") for jobarg in args.jobs]: |  | ||||||
|         jobs, excluded_array_indices = timing.get_jobs(jobid) |  | ||||||
|         df = timing.load_array_mean_timings(jobid, excluded_array_indices).mean() |  | ||||||
|         dfs[label] = df |  | ||||||
|         labels.extend(df.index) |  | ||||||
| 
 |  | ||||||
|     labels = set(labels) |  | ||||||
|     print(",".join(["label"] + columns + ["Other"])) |  | ||||||
| 
 |  | ||||||
|     values_by_label = dict() |  | ||||||
|     for label, df in dfs.items(): |  | ||||||
|         values = {"Other": 0} |  | ||||||
|         for c in df.index: |  | ||||||
|             if c in ignore: |  | ||||||
|                 continue |  | ||||||
|             elif c not in columns: |  | ||||||
|                 values["Other"] += df[c] |  | ||||||
|                 print(f"Others+= {c}={df[c]}", file=sys.stderr) |  | ||||||
|             else: |  | ||||||
|                 values[c] = df[c] |  | ||||||
|         values_by_label[label] = values |  | ||||||
| 
 |  | ||||||
|     if args.normalize: |  | ||||||
|         print("Normalizing data to 100%...", file=sys.stderr) |  | ||||||
|         for values in values_by_label.values(): |  | ||||||
|             row_length = sum(values.values()) |  | ||||||
|             for c in values.keys(): |  | ||||||
|                 values[c] *= 100 / row_length |  | ||||||
| 
 |  | ||||||
|     for label, values in values_by_label.items(): |  | ||||||
|         print(label + "," + ",".join(f"{values[c]}" for c in columns + ["Other"])) |  | ||||||
| @ -72,26 +72,6 @@ def get_accounting_data(jobid: str): | |||||||
|     return json.loads(sacct_results.stdout.decode("utf8")) |     return json.loads(sacct_results.stdout.decode("utf8")) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def get_jobs(jobid: str): |  | ||||||
|     accounting_data = get_accounting_data(jobid) |  | ||||||
|     jobs = [] |  | ||||||
|     excluded_array_indices = [] |  | ||||||
|     for array_job in accounting_data["jobs"]: |  | ||||||
|         # Get metadata related to array |  | ||||||
|         array_main_job = array_job["array"]["job_id"] |  | ||||||
|         array_index = array_job["array"]["task_id"] |  | ||||||
|         # The last step is the actual job we want the data for |  | ||||||
|         # The steps before set up cluster etc. |  | ||||||
|         last_step = array_job["steps"][-1] |  | ||||||
|         if last_step["state"] != "COMPLETED": |  | ||||||
|             print(f"WARNING: {array_main_job}.{array_index} has state {last_step['state']}, excluding it from measurements", file=sys.stderr) |  | ||||||
|             excluded_array_indices.append(array_index) |  | ||||||
|             continue |  | ||||||
|         jobs.append(last_step) |  | ||||||
| 
 |  | ||||||
|     return jobs, excluded_array_indices |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||||
|     p = argparse.ArgumentParser(description="Load and analzye data from nastja timing files") |     p = argparse.ArgumentParser(description="Load and analzye data from nastja timing files") | ||||||
|     p.add_argument("jobid", nargs="+") |     p.add_argument("jobid", nargs="+") | ||||||
| @ -102,7 +82,23 @@ if __name__ == "__main__": | |||||||
|     results = [] |     results = [] | ||||||
|     for i, jobid in enumerate(args.jobid, 1): |     for i, jobid in enumerate(args.jobid, 1): | ||||||
|         print(f"({i:2}/{len(args.jobid):2}) Loading accounting data for {jobid}", file=sys.stderr) |         print(f"({i:2}/{len(args.jobid):2}) Loading accounting data for {jobid}", file=sys.stderr) | ||||||
|         jobs, excluded_array_indices = get_jobs(jobid) |         accounting_data = get_accounting_data(jobid) | ||||||
|  | 
 | ||||||
|  |         jobs = [] | ||||||
|  |         excluded_array_indices = [] | ||||||
|  |         for array_job in accounting_data["jobs"]: | ||||||
|  |             # Get metadata related to array | ||||||
|  |             array_main_job = array_job["array"]["job_id"] | ||||||
|  |             array_index = array_job["array"]["task_id"] | ||||||
|  |             # The last step is the actual job we want the data for | ||||||
|  |             # The steps before set up cluster etc. | ||||||
|  |             last_step = array_job["steps"][-1] | ||||||
|  |             if last_step["state"] != "COMPLETED": | ||||||
|  |                 print(f"WARNING: {array_main_job}.{array_index} has state {last_step['state']}, excluding it from measurements", file=sys.stderr) | ||||||
|  |                 excluded_array_indices.append(array_index) | ||||||
|  |                 continue | ||||||
|  |             jobs.append(last_step) | ||||||
|  | 
 | ||||||
|         array_mean_timings = load_array_mean_timings(jobid, excluded_array_indices) |         array_mean_timings = load_array_mean_timings(jobid, excluded_array_indices) | ||||||
|         if args.dump_timings: |         if args.dump_timings: | ||||||
|             print(array_mean_timings, file=sys.stderr) |             print(array_mean_timings, file=sys.stderr) | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user