Skip to content

Commit 34efb21

Browse files
committed
linting fixes
1 parent f8fb6c7 commit 34efb21

File tree

1 file changed

+16
-16
lines changed

1 file changed

+16
-16
lines changed

python/get_export_source_metrics.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -49,16 +49,16 @@ def collect_file_size_metrics(file_dicts, size_key):
4949
mean_file_size = statistics.mean(list_of_file_sizes)
5050
total_export_size = sum(list_of_file_sizes)
5151
return human_readable_size(largest_file), \
52-
human_readable_size(mean_file_size), \
53-
human_readable_size(total_export_size), \
54-
len(list_of_file_sizes)
52+
human_readable_size(mean_file_size), \
53+
human_readable_size(total_export_size), \
54+
len(list_of_file_sizes)
5555

5656

5757
def validate_export_buckets(csv_dicts, request_util):
5858
for row in csv_dicts:
5959
workspace_client = TerraWorkspace(request_util=request_util,
60-
billing_project=row['destination_billing_project'],
61-
workspace_name=row['destination_workspace_name'])
60+
billing_project=row['destination_billing_project'],
61+
workspace_name=row['destination_workspace_name'])
6262
workspace_bucket = workspace_client.get_workspace_bucket()
6363
if workspace_bucket != row['export_bucket']:
6464
logging.error(f"Export bucket {row['export_bucket']} does not match workspace bucket {workspace_bucket}")
@@ -76,12 +76,12 @@ def validate_export_buckets(csv_dicts, request_util):
7676
case "dataset":
7777
for row in csv_dicts:
7878
file_list = tdr_client.get_data_set_files(dataset_id=row['source_dataset_id'])
79-
largest_file, mean_file_size, total_export_size, number_of_files = collect_file_size_metrics(file_list, 'size')
80-
collected_size_metrics.append({'DATASET_ID': row['DATASET_ID'],
79+
largest_file, mean_file_size, total_export_size, number_of_files = collect_file_size_metrics(file_list, 'size') # noqa: E501
80+
collected_size_metrics.append({'DATASET_ID': row['DATASET_ID'],
8181
'LARGEST_FILE_SIZE': largest_file,
82-
'MEAN_FILE_SIZE': mean_file_size,
83-
'TOTAL_EXPORT_SIZE': total_export_size,
84-
'FILE_COUNT': number_of_files})
82+
'MEAN_FILE_SIZE': mean_file_size,
83+
'TOTAL_EXPORT_SIZE': total_export_size,
84+
'FILE_COUNT': number_of_files})
8585
case "workspace":
8686
print('looping through input tsv')
8787
for row in csv_dicts:
@@ -95,12 +95,12 @@ def validate_export_buckets(csv_dicts, request_util):
9595
sas_token=sas_token,
9696
container_name=workspace_client.storage_container)
9797
az_blobs = az_blob_client.get_blob_details(max_per_page=1000)
98-
largest_file, mean_file_size, total_export_size, number_of_files = collect_file_size_metrics(az_blobs, 'size_in_bytes')
99-
collected_size_metrics.append({'DATASET_ID': row['DATASET_ID'],
100-
'LARGEST_FILE_SIZE': largest_file,
101-
'MEAN_FILE_SIZE': mean_file_size,
102-
'TOTAL_EXPORT_SIZE': total_export_size,
103-
'FILE_COUNT': number_of_files})
98+
largest_file, mean_file_size, total_export_size, number_of_files = collect_file_size_metrics(az_blobs, 'size_in_bytes') # noqa: E501
99+
collected_size_metrics.append({'DATASET_ID': row['DATASET_ID'],
100+
'LARGEST_FILE_SIZE': largest_file,
101+
'MEAN_FILE_SIZE': mean_file_size,
102+
'TOTAL_EXPORT_SIZE': total_export_size,
103+
'FILE_COUNT': number_of_files})
104104
report_path = f'{args.target}_metrics.csv'
105105
Csv(file_path=report_path, delimiter=',').create_tsv_from_list_of_dicts(collected_size_metrics)
106106

0 commit comments

Comments
 (0)