Skip to content

Commit

Permalink
Fix typo in fid_score.py, fail_safe for SDXL short runs, fix argument…
Browse files Browse the repository at this point in the history
… error in DLRMv2 implementation #1909, fixes preprocess_submission (#1910)

* Update generate_final_report.py

* Fix sdxl (#1911)

* Fix typo in fid_score.py, fail_safe for SDXL short runs

* [Automated Commit] Format Codebase

* Fix typo in fid_score.py, fail_safe for SDXL short runs

* Fix dlrmv2 reference implementation | Update run_local.sh

* Fixes for filtering invalid results

* [Automated Commit] Format Codebase

* Update preprocess_submission.py

* Added an option to pass in sample_ids.txt for SDXL accuracy check

* [Automated Commit] Format Codebase

* Update accuracy_coco.py

* [Automated Commit] Format Codebase

* Fix typo

* Not use default for sample_ids.txt

---------

Co-authored-by: arjunsuresh <[email protected]>
  • Loading branch information
arjunsuresh and arjunsuresh authored Nov 14, 2024
1 parent dffd292 commit 97122a2
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 22 deletions.
4 changes: 3 additions & 1 deletion recommendation/dlrm_v2/pytorch/run_local.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

source ./run_common.sh

common_opt="--mlperf_conf ../../../mlperf.conf"
#mlperf.conf is now automatically loaded by loadgen
#common_opt="--mlperf_conf ../../../mlperf.conf"

OUTPUT_DIR=`pwd`/output/$name
if [ ! -d $OUTPUT_DIR ]; then
mkdir -p $OUTPUT_DIR
Expand Down
24 changes: 14 additions & 10 deletions text_to_image/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,20 +176,24 @@ def __call__(self, results, ids, expected=None, result_dict=None):
def save_images(self, ids, ds):
info = []
idx = {}
for i, id in enumerate(self.content_ids):
if id in ids:
idx[id] = i
for i, image_id in enumerate(self.content_ids):
if image_id in ids:
idx[image_id] = i
if not os.path.exists("images/"):
os.makedirs("images/", exist_ok=True)
for id in ids:
caption = ds.get_caption(id)
generated = Image.fromarray(self.results[idx[id]])
image_path_tmp = f"images/{self.content_ids[idx[id]]}.png"
for image_id in ids:
if not idx.get(image_id):
print(
f"image id {image_id} is missing in the results. Hence not saved.")
continue
caption = ds.get_caption(image_id)
generated = Image.fromarray(self.results[idx[image_id]])
image_path_tmp = f"images/{self.content_ids[idx[image_id]]}.png"
generated.save(image_path_tmp)
info.append((self.content_ids[idx[id]], caption))
info.append((self.content_ids[idx[image_id]], caption))
with open("images/captions.txt", "w+") as f:
for id, caption in info:
f.write(f"{id} {caption}\n")
for image_id, caption in info:
f.write(f"{image_id} {caption}\n")

def start(self):
self.results = []
Expand Down
9 changes: 8 additions & 1 deletion text_to_image/tools/accuracy_coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,11 @@ def get_args():
required=False,
help="path to dump 10 stable diffusion xl compliance images",
)
# Do not use for official MLPerf inference submissions as only the default
# one is valid
parser.add_argument(
"--ids-path", help="Path to 10 caption ids to dump as compliance images"
)
parser.add_argument("--device", default="cpu", choices=["gpu", "cpu"])
parser.add_argument(
"--low_memory",
Expand Down Expand Up @@ -97,8 +102,10 @@ def main():
os.makedirs(args.compliance_images_path)
dump_compliance_images = True
compliance_images_idx_list = []
sample_ids_file_path = args.ids_path if args.ids_path else os.path.join(
os.path.dirname(__file__), "sample_ids.txt")
with open(
os.path.join(os.path.dirname(__file__), "sample_ids.txt"), "r"
sample_ids_file_path, "r"
) as compliance_id_file:
for line in compliance_id_file:
idx = int(line.strip())
Expand Down
2 changes: 1 addition & 1 deletion text_to_image/tools/fid/fid_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
import pathlib
import os
import sys
sys.path.insert("..", 0)
sys.path.insert(0, "..")
from inception import InceptionV3 # noqa: E402


Expand Down
2 changes: 1 addition & 1 deletion tools/submission/generate_final_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def main():
df["p#"] = df.apply(lambda x: int(x["host_processors_per_node"]), axis=1)

# details url
base_url = f"https://github.com/mlcommons/{args.repository}/tree/main"
base_url = f"https://github.com/{args.repository_owner}/{args.repository}/tree/{args.repository_branch}"
df["Details"] = df.apply(
lambda x: '=HYPERLINK("{}","details")'.format(
"/".join(
Expand Down
38 changes: 30 additions & 8 deletions tools/submission/preprocess_submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,6 @@
Tool to infer scenario results and cleanup submission tree
"""

from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import argparse
import logging
import os
Expand Down Expand Up @@ -142,6 +138,27 @@ def change_folder_name_in_path(path, old_folder_name, new_folder_name):
return new_path


def clean_model_dir(model_results_dir):
model_measurements_dir = change_folder_name_in_path(
model_results_dir, "results", "measurements")
model_compliance_dir = change_folder_name_in_path(
model_results_dir, "results", "compliance")

print(f"rmtree {model_results_dir}")
shutil.rmtree(model_results_dir)
shutil.rmtree(model_measurements_dir)
shutil.rmtree(model_compliance_dir)
sut_results_dir = os.path.dirname(model_results_dir)
if not os.listdir(sut_results_dir):
# clean sut dir
sut = os.path.basename(sut_results_dir)
log.info(
f"No benchmark results remaining for {sut}. rmtree {sut_results_dir}")
shutil.rmtree(sut_results_dir)
shutil.rmtree(os.path.dirname(model_measurements_dir))
shutil.rmtree(os.path.dirname(model_compliance_dir))


def clean_invalid_results(args, log_path, config, system_desc, system_json,
model, mlperf_model, division, system_id_json, is_closed_or_network):
# cleanup invalid results
Expand Down Expand Up @@ -176,6 +193,7 @@ def clean_invalid_results(args, log_path, config, system_desc, system_json,
except Exception as e:
log.warning(e)
perf_is_valid = False
compliance_is_valid = False
if perf_is_valid:
power_path = os.path.join(scenario_path, "performance", "power")
has_power = os.path.exists(power_path)
Expand Down Expand Up @@ -260,9 +278,12 @@ def clean_invalid_results(args, log_path, config, system_desc, system_json,
# if only accuracy or compliance failed, result is valid
# for open
if not perf_is_valid:
shutil.rmtree(scenario_path)
log.warning(
f"{scenario} scenario result is invalid for {system_desc}: {model} in {division} and open divisions. Accuracy: {accuracy_is_valid}, Performance: {perf_is_valid}. Removing it...")
shutil.rmtree(scenario_path)
scenario_measurements_path = change_folder_name_in_path(
scenario_path, "results", "measurements")
shutil.rmtree(scenario_measurements_path)
if not os.path.exists(target_results_path):
shutil.copytree(
model_results_path, target_results_path)
Expand All @@ -288,9 +309,7 @@ def clean_invalid_results(args, log_path, config, system_desc, system_json,
log.warning(f"{scenario} scenario result is invalid for {system_desc}: {model} in {division} division. Accuracy: {accuracy_is_valid}, Performance: {perf_is_valid}. Compliance: {compliance_is_valid}. Moving other scenario results of {model} to open...")
else:
log.warning(f"{scenario} scenario result is invalid for {system_desc}: {model} in {division} division. Accuracy: {accuracy_is_valid}, Performance: {perf_is_valid}. Removing all dependent scenario results...")
shutil.rmtree(model_results_path)
shutil.rmtree(model_measurements_path)
shutil.rmtree(model_compliance_path)
clean_model_dir(model_results_path)
else: # delete this result
# delete other scenario results too
shutil.rmtree(scenario_path)
Expand Down Expand Up @@ -517,6 +536,9 @@ def main():

infer_scenario_results(args, config)

if not args.nodelete_empty_dirs:
delete_empty_dirs(os.path.join(src_dir))

return 0


Expand Down

0 comments on commit 97122a2

Please sign in to comment.