diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index d6380790d3..bd3af0dc73 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,5 +1,5 @@ # Automatic code formatting -name: "format" +name: "Code formatting" on: push: branches: [ "main", "mlperf-inference" ] @@ -10,6 +10,7 @@ env: jobs: format-code: runs-on: ubuntu-latest + if: github.repository_owner == 'mlcommons' steps: - uses: actions/checkout@v4 with: @@ -37,12 +38,22 @@ jobs: git add $FILE done - - name: Commit + - name: Commit and create PR run: | HAS_CHANGES=$(git diff --staged --name-only) if [ ${#HAS_CHANGES} -gt 0 ]; then git config --global user.name mlcommons-bot git config --global user.email "mlcommons-bot@users.noreply.github.com" + # Commit changes git commit -m '[Automated Commit] Format Codebase' - git push + + # Push changes to a new branch + BRANCH_NAME="auto/code-format" + git branch $BRANCH_NAME + git push origin $BRANCH_NAME --force + + # Create a pull request to the "code-format" branch + gh pr create --base code-format --head $BRANCH_NAME --title "[Automated PR] Format Codebase" --body "This pull request contains automated code formatting changes." fi + env: + GH_TOKEN: ${{ secrets.ACCESS_TOKEN }} diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 465d8edae4..be894cfa94 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations on: schedule: - - cron: "54 22 * * *" #to be adjusted + - cron: "55 01 * * *" #to be adjusted jobs: run_nvidia: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1ad44e56ff..35ccfbc5ba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,2 +1,9 @@ -You can find the guidelines to contribute to the MLCommons CM project -at the [CM GitHub page](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md). +## Contributing + +The best way to contribute to the MLCommons is to get involved with one of our many project communities. You find more information about getting involved with MLCommons [here](https://mlcommons.org/en/get-involved/#getting-started). + +Generally we encourage people to become a MLCommons member if they wish to contribute to MLCommons projects, but outside pull requests are very welcome too. + +To get started contributing code, you or your organization needs to sign the MLCommons CLA found at the [MLC policies page](https://mlcommons.org/en/policies/). Once you or your organization has signed the corporate CLA, please fill out this [CLA sign up form](https://forms.gle/Ew1KkBVpyeJDuRw67) form to get your specific GitHub handle authorized so that you can start contributing code under the proper license. + +MLCommons project work is tracked with issue trackers and pull requests. Modify the project in your own fork and issue a pull request once you want other developers to take a look at what you have done and discuss the proposed changes. Ensure that cla-bot and other checks pass for your Pull requests. diff --git a/VERSION b/VERSION index 79a2734bbf..844f6a91ac 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.5.0 \ No newline at end of file +0.6.3 diff --git a/automation/cache/module.py b/automation/cache/module.py index ac2d141131..6720fe5779 100644 --- a/automation/cache/module.py +++ b/automation/cache/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,9 +48,9 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} ############################################################ def show(self, i): @@ -77,7 +78,7 @@ def show(self, i): # Check parsed automation if 'parsed_automation' not in i: - return {'return':1, 'error':'automation is not specified'} + return {'return': 1, 'error': 'automation is not specified'} console = i.get('out') == 'con' @@ -101,54 +102,65 @@ def show(self, i): i['out'] = None r = self.search(i) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] - for artifact in sorted(lst, key = lambda x: sorted(x.meta['tags'])): -# for artifact in lst: + for artifact in sorted(lst, key=lambda x: sorted(x.meta['tags'])): + # for artifact in lst: path = artifact.path meta = artifact.meta original_meta = artifact.original_meta - alias = meta.get('alias','') - uid = meta.get('uid','') + alias = meta.get('alias', '') + uid = meta.get('uid', '') - tags = meta.get('tags',[]) + tags = meta.get('tags', []) tags1 = sorted([x for x in tags if not x.startswith('_')]) tags2 = sorted([x for x in tags if x.startswith('_')]) tags = tags1 + tags2 - version = meta.get('version','') + version = meta.get('version', '') if console: - print ('') + print('') # print ('* UID: {}'.format(uid)) - print ('* Tags: {}'.format(','.join(tags))) - print (' Path: {}'.format(path)) - if version!='': - print (' Version: {}'.format(version)) + print('* Tags: {}'.format(','.join(tags))) + print(' Path: {}'.format(path)) + if version != '': + print(' Version: {}'.format(version)) if show_env and console: - path_to_cached_state_file = os.path.join(path, 'cm-cached-state.json') + path_to_cached_state_file = os.path.join( + path, 'cm-cached-state.json') if os.path.isfile(path_to_cached_state_file): - r = utils.load_json(file_name = path_to_cached_state_file) - if r['return']>0: return r + r = utils.load_json(file_name=path_to_cached_state_file) + if r['return'] > 0: + return r # Update env and state from cache! cached_state = r['meta'] new_env = cached_state.get('new_env', {}) - if len(new_env)>0: - print (' New env:') - print (json.dumps(new_env, indent=6, sort_keys=True).replace('{','').replace('}','')) + if len(new_env) > 0: + print(' New env:') + print( + json.dumps( + new_env, + indent=6, + sort_keys=True).replace( + '{', + '').replace( + '}', + '')) new_state = cached_state.get('new_state', {}) - if len(new_state)>0: - print (' New state:') - print (json.dumps(new_env, indent=6, sort_keys=True)) + if len(new_state) > 0: + print(' New state:') + print(json.dumps(new_env, indent=6, sort_keys=True)) - return {'return':0, 'list': lst} + return {'return': 0, 'list': lst} ############################################################ def search(self, i): @@ -159,30 +171,36 @@ def search(self, i): """ # Check simplified CMD: cm show cache "get python" # If artifact has spaces, treat them as tags! - artifact = i.get('artifact','') - tags = i.get('tags','') - # Tags may be a list (if comes internally from CM scripts) or string if comes from CMD - if type(tags)!=list: + artifact = i.get('artifact', '') + tags = i.get('tags', '') + + # Tags may be a list (if comes internally from CM scripts) or string if + # comes from CMD + if not isinstance(tags, list): tags = tags.strip() - if ' ' in artifact:# or ',' in artifact: - del(i['artifact']) - if 'parsed_artifact' in i: del(i['parsed_artifact']) - new_tags = artifact.replace(' ',',') - tags = new_tags if tags=='' else new_tags+','+tags + if ' ' in artifact: # or ',' in artifact: + del (i['artifact']) + if 'parsed_artifact' in i: + del (i['parsed_artifact']) + + new_tags = artifact.replace(' ', ',') + tags = new_tags if tags == '' else new_tags + ',' + tags i['tags'] = tags # Force automation when reruning access with processed input - i['automation']='cache,541d6f712a6b464e' - i['action']='search' - i['common'] = True # Avoid recursion - use internal CM add function to add the script artifact + i['automation'] = 'cache,541d6f712a6b464e' + i['action'] = 'search' + # Avoid recursion - use internal CM add function to add the script + # artifact + i['common'] = True # Find CM artifact(s) return self.cmind.access(i) - ############################################################ + def copy_to_remote(self, i): """ Add CM automation. @@ -208,4 +226,5 @@ def copy_to_remote(self, i): """ - return utils.call_internal_module(self, __file__, 'module_misc', 'copy_to_remote', i) + return utils.call_internal_module( + self, __file__, 'module_misc', 'copy_to_remote', i) diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py index cfb0e8e84d..91ecbd5a1c 100644 --- a/automation/cache/module_misc.py +++ b/automation/cache/module_misc.py @@ -30,13 +30,17 @@ def copy_to_remote(i): self_module = i['self_module'] - remote_host = i.get('remote_host') + remote_host = i.get('remote_host') if not remote_host: - return {'return':1, 'error': 'Please input remote host_name/IP via --remote_host'} - remote_cm_repos_location = i.get('remote_cm_repos_location', os.path.join("/home", os.getlogin(), "CM", "repos")) - remote_cm_cache_location = os.path.join(remote_cm_repos_location, "local", "cache") - - remote_port = i.get('remote_port', '22') + return {'return': 1, + 'error': 'Please input remote host_name/IP via --remote_host'} + remote_cm_repos_location = i.get( + 'remote_cm_repos_location', os.path.join( + "/home", os.getlogin(), "CM", "repos")) + remote_cm_cache_location = os.path.join( + remote_cm_repos_location, "local", "cache") + + remote_port = i.get('remote_port', '22') remote_user = i.get('remote_user', os.getlogin()) tag_string = i['tags'] @@ -52,10 +56,10 @@ def copy_to_remote(i): return r if len(r['list']) == 0: - pass #fixme + pass # fixme elif len(r['list']) > 1: print("Multiple cache entries found: ") - for k in sorted(r['list'], key = lambda x: x.meta.get('alias','')): + for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')): print(k.path) x = input("Would you like to copy them all? Y/n: ") if x.lower() == 'n': @@ -63,7 +67,7 @@ def copy_to_remote(i): import json - for k in sorted(r['list'], key = lambda x: x.meta.get('alias','')): + for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')): path = k.path cacheid = os.path.basename(path) @@ -73,26 +77,33 @@ def copy_to_remote(i): cm_cached_state_json_file = os.path.join(path, "cm-cached-state.json") if not os.path.exists(cm_cached_state_json_file): - return {'return':1, 'error': f'cm-cached-state.json file missing in {path}'} + return {'return': 1, + 'error': f'cm-cached-state.json file missing in {path}'} with open(cm_cached_state_json_file, "r") as f: cm_cached_state = json.load(f) new_env = cm_cached_state['new_env'] - new_state = cm_cached_state['new_state'] # Todo fix new state - cm_repos_path = os.environ.get('CM_REPOS', os.path.join(os.path.expanduser("~"), "CM", "repos")) - cm_cache_path = os.path.realpath(os.path.join(cm_repos_path, "local", "cache")) + new_state = cm_cached_state['new_state'] # Todo fix new state + cm_repos_path = os.environ.get( + 'CM_REPOS', os.path.join( + os.path.expanduser("~"), "CM", "repos")) + cm_cache_path = os.path.realpath( + os.path.join(cm_repos_path, "local", "cache")) + + for key, val in new_env.items(): + - for key,val in new_env.items(): - if type(val) == str and cm_cache_path in val: - new_env[key] = val.replace(cm_cache_path, remote_cm_cache_location) +if isinstance(val, if ) new_env[key] = val.replace( + cm_cache_path, remote_cm_cache_location) with open("tmp_remote_cached_state.json", "w") as f: json.dump(cm_cached_state, f, indent=2) - remote_cached_state_file_location = os.path.join(remote_cm_cache_location, cacheid, "cm-cached-state.json") + remote_cached_state_file_location = os.path.join( + remote_cm_cache_location, cacheid, "cm-cached-state.json") copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}" print(copy_cmd) os.system(copy_cmd) - return {'return':0} + return {'return': 0} diff --git a/automation/cfg/module.py b/automation/cfg/module.py index 9dea0683ab..6fff7d8029 100644 --- a/automation/cfg/module.py +++ b/automation/cfg/module.py @@ -7,6 +7,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -51,9 +52,9 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} ############################################################ def xset(self, i): @@ -84,7 +85,8 @@ def xset(self, i): import json r = self._find_cfg_artifact(i) - if r['return']>0: return r + if r['return'] > 0: + return r # Path to cfg path = r['path'] @@ -92,10 +94,10 @@ def xset(self, i): config = r['config'] # Clean input to leave only keys for the configuration - new_config = i.get('key',{}) + new_config = i.get('key', {}) # If new config is empty, just print existing config - if len(new_config) >0 : + if len(new_config) > 0: # Check if need to delete some def check_to_delete(d): @@ -106,29 +108,36 @@ def check_to_delete(d): else: if k.endswith('-'): if k[:-1] in d: - del(d[k[:-1]]) - del(d[k]) + del (d[k[:-1]]) + del (d[k]) else: vsl = str(v).lower() - if vsl == 'none': v = None - elif vsl == 'false': v = False - elif vsl == 'true': v = True + if vsl == 'none': + v = None + elif vsl == 'false': + v = False + elif vsl == 'true': + v = True - d[k]=v + d[k] = v - utils.merge_dicts({'dict1':config, 'dict2':new_config, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1': config, + 'dict2': new_config, + 'append_lists': True, + 'append_unique': True}) check_to_delete(config) r = utils.save_json(path_to_config, config) - if r['return']>0: return r + if r['return'] > 0: + return r # Print config - print ('Config:') - print ('') - print (json.dumps(config, indent=2)) + print('Config:') + print('') + print(json.dumps(config, indent=2)) - return {'return':0} + return {'return': 0} ############################################################ def load(self, i): @@ -179,12 +188,13 @@ def _find_cfg_artifact(self, i): """ # Clean input to find artifact - ii = utils.sub_input(i, self.cmind.cfg['artifact_keys']+['tags']) + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) - parsed_artifact = i.get('parsed_artifact',[]) + parsed_artifact = i.get('parsed_artifact', []) - artifact_obj = parsed_artifact[0] if len(parsed_artifact)>0 else None - artifact_repo = parsed_artifact[1] if len(parsed_artifact)>1 else None + artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None + artifact_repo = parsed_artifact[1] if len( + parsed_artifact) > 1 else None artifact = i.get('artifact', '') @@ -194,23 +204,27 @@ def _find_cfg_artifact(self, i): tags = ii.get('tags', '') if 'cm-universal-cfg' not in tags: - if tags!='': tags+=',' - tags+='cm-universal-cfg' + if tags != '': + tags += ',' + tags += 'cm-universal-cfg' ii['tags'] = tags automation = ii['automation'] - if automation!='.' and ',' not in automation: + if automation != '.' and ',' not in automation: ii['automation'] = automation + ',' + self.meta['uid'] # Add placeholder (use common action) - ii['action']='find' - ii['out']='' - ii['common']=True # Avoid recursion - use internal CM add function to add the script artifact + ii['action'] = 'find' + ii['out'] = '' + # Avoid recursion - use internal CM add function to add the script + # artifact + ii['common'] = True - r=self.cmind.access(ii) - if r['return']>0: return r + r = self.cmind.access(ii) + if r['return'] > 0: + return r lst = r['list'] @@ -219,12 +233,14 @@ def _find_cfg_artifact(self, i): ii['meta'] = {} # Tags must be unique for default - r=self.cmind.access(ii) - if r['return']>0: return r + r = self.cmind.access(ii) + if r['return'] > 0: + return r path = r['path'] - elif len(lst)>1: - return {'return':1, 'error':'ambiguity in cfg name - more than 1 CM artifact found'} + elif len(lst) > 1: + return { + 'return': 1, 'error': 'ambiguity in cfg name - more than 1 CM artifact found'} else: path = lst[0].path @@ -234,8 +250,10 @@ def _find_cfg_artifact(self, i): config = {} if os.path.isfile(path_to_cfg): r = utils.load_json(path_to_cfg) - if r['return']>0: return r + if r['return'] > 0: + return r config = r['meta'] - return {'return':0, 'path':path, 'path_to_config':path_to_cfg, 'config':config} + return {'return': 0, 'path': path, + 'path_to_config': path_to_cfg, 'config': config} diff --git a/automation/challenge/module.py b/automation/challenge/module.py index c6e81eed4c..be8bf9c089 100644 --- a/automation/challenge/module.py +++ b/automation/challenge/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,6 +48,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/contributor/module.py b/automation/contributor/module.py index 89fee477f0..e077cb3cfc 100644 --- a/automation/contributor/module.py +++ b/automation/contributor/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,9 +48,9 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} ############################################################ def add(self, i): @@ -71,59 +72,64 @@ def add(self, i): """ - self_automation = self.meta['alias']+','+self.meta['uid'] + self_automation = self.meta['alias'] + ',' + self.meta['uid'] console = i.get('out') == 'con' - artifact = i.get('artifact','') + artifact = i.get('artifact', '') if ':' not in artifact: - artifact = 'mlcommons@ck:'+artifact + artifact = 'mlcommons@ck:' + artifact j = artifact.find(':') - name = artifact[j+1:] + name = artifact[j + 1:] # Check info if name == '': name = input('Enter your name: ').strip() if name == '': - return {'return':1, 'error':'name can\'t be empty'} + return {'return': 1, 'error': 'name can\'t be empty'} artifact += name # Check if doesn't exist - r = self.cmind.access({'action':'find', - 'automation':self_automation, - 'artifact':artifact}) - if r['return']>0: return r - elif r['return']==0 and len(r['list'])>0: - return {'return':1, 'error':'CM artifact with name {} already exists in {}'.format(name, r['list'][0].path)} + r = self.cmind.access({'action': 'find', + 'automation': self_automation, + 'artifact': artifact}) + if r['return'] > 0: + return r + elif r['return'] == 0 and len(r['list']) > 0: + return {'return': 1, 'error': 'CM artifact with name {} already exists in {}'.format( + name, r['list'][0].path)} - meta = i.get('meta',{}) + meta = i.get('meta', {}) # Prepare meta - org = meta.get('organization','') - if org=='': + org = meta.get('organization', '') + if org == '': org = input('Enter your organization (optional): ').strip() url = input('Enter your webpage (optional): ').strip() - tags = input('Enter tags of your challenges separate by comma (you can add them later): ').strip() + tags = input( + 'Enter tags of your challenges separate by comma (you can add them later): ').strip() - if meta.get('name','')=='': - meta = {'name':name} + if meta.get('name', '') == '': + meta = {'name': name} - if org!='': + if org != '': meta['organization'] = org - if url!='': + if url != '': meta['urls'] = [url] - if tags!='': + if tags != '': meta['ongoing'] = tags.split(',') # Add placeholder (use common action) i['out'] = 'con' - i['common'] = True # Avoid recursion - use internal CM add function to add the script artifact + # Avoid recursion - use internal CM add function to add the script + # artifact + i['common'] = True i['action'] = 'add' i['automation'] = self_automation @@ -131,23 +137,25 @@ def add(self, i): i['meta'] = meta - print ('') + print('') r = self.cmind.access(i) - if r['return']>0: return r + if r['return'] > 0: + return r path = r['path'] path2 = os.path.dirname(path) - print ('') - print ('Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) - print ('') - print ('cd {}'.format(path2)) - print ('git add "{}"'.format(name)) - print ('git commit "{}"'.format(name)) - print ('') - print ('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!') - print ('Looking forward to your contributions!') + print('') + print( + 'Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) + print('') + print('cd {}'.format(path2)) + print('git add "{}"'.format(name)) + print('git commit "{}"'.format(name)) + print('') + print('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!') + print('Looking forward to your contributions!') return r diff --git a/automation/data/module.py b/automation/data/module.py index c6e81eed4c..be8bf9c089 100644 --- a/automation/data/module.py +++ b/automation/data/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,6 +48,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/docker/module.py b/automation/docker/module.py index 9e5339bd0a..7e612677b1 100644 --- a/automation/docker/module.py +++ b/automation/docker/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ CM "docker" automation actions @@ -46,6 +47,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/docs/module.py b/automation/docs/module.py index c6e81eed4c..be8bf9c089 100644 --- a/automation/docs/module.py +++ b/automation/docs/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,6 +48,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/experiment/module.py b/automation/experiment/module.py index 1ea8d43022..6e98029d54 100644 --- a/automation/experiment/module.py +++ b/automation/experiment/module.py @@ -11,6 +11,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ CM "experiment" automation actions @@ -58,15 +59,12 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) - - return {'return':0} - - - + print(json.dumps(i, indent=2)) + return {'return': 0} ############################################################ + def run(self, i): """ Run experiment @@ -109,52 +107,55 @@ def run(self, i): # Find or add artifact based on repo/alias/tags r = self._find_or_add_artifact(i) - if r['return']>0: return r + if r['return'] > 0: + return r experiment = r['experiment'] - console = i.get('out','')=='con' + console = i.get('out', '') == 'con' # Print experiment folder experiment_path = experiment.path if console: - print ('') - print ('Path to CM experiment artifact: {}'.format(experiment_path)) - + print('') + print('Path to CM experiment artifact: {}'.format(experiment_path)) # Get directory with datetime - datetime = i.get('dir','') + datetime = i.get('dir', '') if datetime == '' and i.get('rerun', False): # Check if already some dir exist directories = os.listdir(experiment_path) - datetimes = sorted([f for f in directories if os.path.isfile(os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) + datetimes = sorted([f for f in directories if os.path.isfile( + os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) - if len(datetimes)==1: + if len(datetimes) == 1: datetime = datetimes[0] - elif len(datetimes)>1: - print ('') - print ('Select experiment:') + elif len(datetimes) > 1: + print('') + print('Select experiment:') datetimes = sorted(datetimes) num = 0 - print ('') + print('') for d in datetimes: - print ('{}) {}'.format(num, d.replace('.',' '))) + print('{}) {}'.format(num, d.replace('.', ' '))) num += 1 if not console: - return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'} + return { + 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'} - print ('') - x=input('Make your selection or press Enter for 0: ') + print('') + x = input('Make your selection or press Enter for 0: ') - x=x.strip() - if x=='': x='0' + x = x.strip() + if x == '': + x = '0' selection = int(x) @@ -163,8 +164,7 @@ def run(self, i): datetime = datetimes[selection] - - if datetime!='': + if datetime != '': experiment_path2 = os.path.join(experiment_path, datetime) else: num = 0 @@ -172,12 +172,14 @@ def run(self, i): while not found: r = utils.get_current_date_time({}) - if r['return']>0: return r + if r['return'] > 0: + return r - datetime = r['iso_datetime'].replace(':','-').replace('T','.') + datetime = r['iso_datetime'].replace( + ':', '-').replace('T', '.') - if num>0: - datetime+='.'+str(num) + if num > 0: + datetime += '.' + str(num) experiment_path2 = os.path.join(experiment_path, datetime) @@ -185,46 +187,50 @@ def run(self, i): found = True break - num+=1 + num += 1 # Check/create directory with date_time if not os.path.isdir(experiment_path2): os.makedirs(experiment_path2) # Change current path - print ('Path to experiment: {}'.format(experiment_path2)) + print('Path to experiment: {}'.format(experiment_path2)) os.chdir(experiment_path2) # Record experiment input with possible exploration - experiment_input_file = os.path.join(experiment_path2, self.CM_INPUT_FILE) - experiment_result_file = os.path.join(experiment_path2, self.CM_RESULT_FILE) + experiment_input_file = os.path.join( + experiment_path2, self.CM_INPUT_FILE) + experiment_result_file = os.path.join( + experiment_path2, self.CM_RESULT_FILE) # Clean original input for k in ['parsed_artifact', 'parsed_automation', 'cmd']: if k in ii_copy: - del(ii_copy[k]) + del (ii_copy[k]) r = utils.save_json(file_name=experiment_input_file, meta=ii_copy) - if r['return']>0: return r + if r['return'] > 0: + return r # Prepare run command cmd = '' unparsed = i.get('unparsed_cmd', []) - if len(unparsed)>0: + if len(unparsed) > 0: for u in unparsed: - if ' ' in u: u='"'+u+'"' - cmd+=' '+u + if ' ' in u: + u = '"' + u + '"' + cmd += ' ' + u - cmd=cmd.strip() + cmd = cmd.strip() # Prepare script run env = i.get('env', {}) - ii = {'action':'native-run', - 'automation':'script,5b4e0237da074764', - 'env':env} + ii = {'action': 'native-run', + 'automation': 'script,5b4e0237da074764', + 'env': env} # Prepare exploration # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml @@ -235,41 +241,41 @@ def run(self, i): j = 1 k = 0 - while j>=0: + while j >= 0: j = cmd.find('}}}', k) - if j>=0: - k = j+1 + if j >= 0: + k = j + 1 - l = cmd.rfind('{{',0, j) + l = cmd.rfind('{{', 0, j) - if l>=0: - l2 = cmd.find('{', l+2, j) - if l2>=0: - k = l2+1 + if l >= 0: + l2 = cmd.find('{', l + 2, j) + if l2 >= 0: + k = l2 + 1 - var = cmd[l+2:l2] - expr = cmd[l2+1:j] + var = cmd[l + 2:l2] + expr = cmd[l2 + 1:j] explore[var] = expr - cmd = cmd[:l2]+ cmd[j+1:] - + cmd = cmd[:l2] + cmd[j + 1:] # Separate Design Space Exploration into var and range - explore_keys=[] - explore_dimensions=[] + explore_keys = [] + explore_dimensions = [] for k in explore: - v=explore[k] + v = explore[k] explore_keys.append(k) - if type(v)!=list: - v=eval(v) + +if not isinstance(v, if ) v = eval(v) explore_dimensions.append(v) - # Next command will run all iterations so we need to redo above command once again + # Next command will run all iterations so we need to redo above command + # once again step = 0 steps = itertools.product(*explore_dimensions) @@ -284,32 +290,33 @@ def run(self, i): step += 1 - print ('================================================================') - print ('Experiment step: {} out of {}'.format(step, num_steps)) + print('================================================================') + print('Experiment step: {} out of {}'.format(step, num_steps)) - print ('') + print('') ii = copy.deepcopy(ii_copy) env = ii.get('env', {}) - l_dimensions=len(dimensions) - if l_dimensions>0: - print (' Updating ENV variables during exploration:') + l_dimensions = len(dimensions) + if l_dimensions > 0: + print(' Updating ENV variables during exploration:') - print ('') + print('') for j in range(l_dimensions): v = dimensions[j] k = explore_keys[j] - print (' - Dimension {}: "{}" = {}'.format(j, k, v)) + print(' - Dimension {}: "{}" = {}'.format(j, k, v)) env[k] = str(v) - print ('') + print('') # Generate UID and prepare extra directory: r = utils.gen_uid() - if r['return']>0: return r + if r['return'] > 0: + return r uid = r['uid'] @@ -319,13 +326,14 @@ def run(self, i): # Get date time of experiment r = utils.get_current_date_time({}) - if r['return']>0: return r + if r['return'] > 0: + return r current_datetime = r['iso_datetime'] # Change current path - print ('Path to experiment step: {}'.format(experiment_path3)) - print ('') + print('Path to experiment step: {}'.format(experiment_path3)) + print('') os.chdir(experiment_path3) # Prepare and run experiment in a given placeholder directory @@ -334,72 +342,77 @@ def run(self, i): ii['env'] = env # Change only in CMD - env_local={'CD':cur_dir, - 'CM_EXPERIMENT_STEP':str(step), - 'CM_EXPERIMENT_PATH':experiment_path, - 'CM_EXPERIMENT_PATH2':experiment_path2, - 'CM_EXPERIMENT_PATH3':experiment_path3} - + env_local = {'CD': cur_dir, + 'CM_EXPERIMENT_STEP': str(step), + 'CM_EXPERIMENT_PATH': experiment_path, + 'CM_EXPERIMENT_PATH2': experiment_path2, + 'CM_EXPERIMENT_PATH3': experiment_path3} # Update {{}} in CMD cmd_step = cmd j = 1 k = 0 - while j>=0: + while j >= 0: j = cmd_step.find('{{', k) - if j>=0: + if j >= 0: k = j - l = cmd_step.find('}}',j+2) - if l>=0: - var = cmd_step[j+2:l] + l = cmd_step.find('}}', j + 2) + if l >= 0: + var = cmd_step[j + 2:l] # Such vars must be in env if var not in env and var not in env_local: - return {'return':1, 'error':'key "{}" is not in env during exploration'.format(var)} + return { + 'return': 1, 'error': 'key "{}" is not in env during exploration'.format(var)} if var in env: value = env[var] else: value = env_local[var] - cmd_step = cmd_step[:j] + str(value) + cmd_step[l+2:] + cmd_step = cmd_step[:j] + str(value) + cmd_step[l + 2:] ii['command'] = cmd_step - print ('Generated CMD:') - print ('') - print (cmd_step) - print ('') + print('Generated CMD:') + print('') + print(cmd_step) + print('') # Prepare experiment step input - experiment_step_input_file = os.path.join(experiment_path3, self.CM_INPUT_FILE) + experiment_step_input_file = os.path.join( + experiment_path3, self.CM_INPUT_FILE) r = utils.save_json(file_name=experiment_step_input_file, meta=ii) - if r['return']>0: return r + if r['return'] > 0: + return r - experiment_step_output_file = os.path.join(experiment_path3, self.CM_OUTPUT_FILE) + experiment_step_output_file = os.path.join( + experiment_path3, self.CM_OUTPUT_FILE) if os.path.isfile(experiment_step_output_file): os.delete(experiment_step_output_file) # Run CMD - rr=self.cmind.access(ii) - if rr['return']>0: return rr + rr = self.cmind.access(ii) + if rr['return'] > 0: + return rr # Record output result = {} if os.path.isfile(experiment_step_output_file): r = utils.load_json(file_name=experiment_step_output_file) - if r['return']>0: return r + if r['return'] > 0: + return r result = r['meta'] - #Try to flatten + # Try to flatten try: flatten_result = flatten_dict(result) result = flatten_result - except: + except BaseException: pass # Add extra info @@ -411,26 +424,27 @@ def run(self, i): if os.path.isfile(experiment_result_file): r = utils.load_json(file_name=experiment_result_file) - if r['return']>0: return r + if r['return'] > 0: + return r all_results = r['meta'] all_results.append(result) - r = utils.save_json(file_name=experiment_result_file, meta = all_results) - if r['return']>0: return r - + r = utils.save_json( + file_name=experiment_result_file, + meta=all_results) + if r['return'] > 0: + return r - rr = {'return':0, - 'experiment_path':experiment_path, - 'experiment_path2':experiment_path2} + rr = {'return': 0, + 'experiment_path': experiment_path, + 'experiment_path2': experiment_path2} return rr - - - ############################################################ + def rerun(self, i): """ Rerun experiment @@ -438,22 +452,12 @@ def rerun(self, i): cm run experiment --rerun=True ... """ - i['rerun']=True + i['rerun'] = True return self.run(i) - - - - - - - - - - - ############################################################ + def replay(self, i): """ Replay experiment @@ -482,75 +486,82 @@ def replay(self, i): """ # Find or add artifact based on repo/alias/tags - i['fail_if_not_found']=True + i['fail_if_not_found'] = True r = self._find_or_add_artifact(i) - if r['return']>0: return r + if r['return'] > 0: + return r experiment = r['experiment'] - console = i.get('out','')=='con' + console = i.get('out', '') == 'con' # Print experiment folder experiment_path = experiment.path if console: - print ('') - print ('Path to CM experiment artifact: {}'.format(experiment_path)) + print('') + print('Path to CM experiment artifact: {}'.format(experiment_path)) # Check date and time folder uid = i.get('uid', '') datetime = i.get('dir', '') - if datetime!='': + if datetime != '': datetimes = [datetime] else: directories = os.listdir(experiment_path) - datetimes = sorted([f for f in directories if os.path.isfile(os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) + datetimes = sorted([f for f in directories if os.path.isfile( + os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) - if len(datetimes)==0: - return {'return':1, 'error':'experiment(s) not found in {}'.format(experiment_path)} + if len(datetimes) == 0: + return {'return': 1, 'error': 'experiment(s) not found in {}'.format( + experiment_path)} # Check datetime directory found_result = {} - if uid!='': + if uid != '': for d in datetimes: - r = self._find_uid({'path':experiment_path, 'datetime':d, 'uid':uid}) - if r['return']>0: return r + r = self._find_uid({'path': experiment_path, 'datetime': d, 'uid': uid}) + if r['return'] > 0: + return r - if len(r.get('result',{}))>0: + if len(r.get('result', {})) > 0: found_result = r['result'] datetime = d experiment_path2 = os.path.join(experiment_path, datetime) break - if len(found_result)==0: - return {'return':1, 'error':'couldn\'t find result with UID {} in {}'.format(uid, experiment_path)} + if len(found_result) == 0: + return {'return': 1, 'error': 'couldn\'t find result with UID {} in {}'.format( + uid, experiment_path)} else: - if len(datetimes)==1: + if len(datetimes) == 1: datetime = datetimes[0] else: - print ('') - print ('Available experiments:') + print('') + print('Available experiments:') datetimes = sorted(datetimes) num = 0 - print ('') + print('') for d in datetimes: - print ('{}) {}'.format(num, d.replace('.',' '))) + print('{}) {}'.format(num, d.replace('.', ' '))) num += 1 if not console: - return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} + return { + 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} - print ('') - x=input('Make your selection or press Enter for 0: ') + print('') + x = input('Make your selection or press Enter for 0: ') - x=x.strip() - if x=='': x='0' + x = x.strip() + if x == '': + x = '0' selection = int(x) @@ -563,39 +574,44 @@ def replay(self, i): experiment_path2 = os.path.join(experiment_path, datetime) if not os.path.isdir(experiment_path2): - return {'return':1, 'error':'experiment path not found {}'.format(experiment_path2)} + return {'return': 1, 'error': 'experiment path not found {}'.format( + experiment_path2)} - r = self._find_uid({'path':experiment_path, 'datetime':datetime}) - if r['return']>0: return r + r = self._find_uid({'path': experiment_path, 'datetime': datetime}) + if r['return'] > 0: + return r results = r['meta'] - if len(results)==0: - return {'return':1, 'error':'results not found in {}'.format(experiment_path2)} + if len(results) == 0: + return {'return': 1, 'error': 'results not found in {}'.format( + experiment_path2)} - elif len(results)==1: + elif len(results) == 1: selection = 0 else: - print ('') - print ('Available Unique IDs of results:') + print('') + print('Available Unique IDs of results:') - results = sorted(results, key=lambda x: x.get('uid','')) + results = sorted(results, key=lambda x: x.get('uid', '')) num = 0 - print ('') + print('') for r in results: - print ('{}) {}'.format(num, r.get('uid',''))) + print('{}) {}'.format(num, r.get('uid', ''))) num += 1 if not console: - return {'return':1, 'error':'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'} + return { + 'return': 1, 'error': 'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'} - print ('') - x=input('Make your selection or press Enter for 0: ') + print('') + x = input('Make your selection or press Enter for 0: ') - x=x.strip() - if x=='': x='0' + x = x.strip() + if x == '': + x = '0' selection = int(x) @@ -607,47 +623,51 @@ def replay(self, i): # Final info if console: - print ('') - print ('Path to experiment: {}'.format(experiment_path2)) + print('') + print('Path to experiment: {}'.format(experiment_path2)) - print ('') - print ('Result UID: {}'.format(uid)) + print('') + print('Result UID: {}'.format(uid)) # Attempt to load cm-input.json - experiment_input_file = os.path.join(experiment_path2, self.CM_INPUT_FILE) + experiment_input_file = os.path.join( + experiment_path2, self.CM_INPUT_FILE) if not os.path.isfile(experiment_input_file): - return {'return':1, 'error':'{} not found - can\'t replay'.format(self.CM_INPUT_FILE)} + return { + 'return': 1, 'error': '{} not found - can\'t replay'.format(self.CM_INPUT_FILE)} r = utils.load_json(experiment_input_file) - if r['return']>0: return r + if r['return'] > 0: + return r cm_input = r['meta'] - tags = cm_input.get('tags','').strip() + tags = cm_input.get('tags', '').strip() if 'replay' not in tags: - if tags!='': tags+=',' - tags+='replay' + if tags != '': + tags += ',' + tags += 'replay' cm_input['tags'] = tags if console: - print ('') - print ('Experiment input:') - print ('') - print (json.dumps(cm_input, indent=2)) - print ('') + print('') + print('Experiment input:') + print('') + print(json.dumps(cm_input, indent=2)) + print('') # Run experiment again r = self.cmind.access(cm_input) - if r['return']>0: return r + if r['return'] > 0: + return r # TBA - validate experiment, etc ... - - return {'return':0} - + return {'return': 0} ############################################################ + def _find_or_add_artifact(self, i): """ Find or add experiment artifact (reused in run and reply) @@ -669,45 +689,53 @@ def _find_or_add_artifact(self, i): """ - console = i.get('out','')=='con' + console = i.get('out', '') == 'con' # Try to find experiment artifact by alias and/or tags ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) - ii['action']='find' + ii['action'] = 'find' ii_copy = copy.deepcopy(ii) # If artifact is specified, remove tags - artifact = ii.get('artifact','').strip() - if artifact!='' and not artifact.endswith(':') \ - and '*' not in artifact and '?' not in artifact: - if 'tags' in ii: del(ii['tags']) + artifact = ii.get('artifact', '').strip() + if artifact != '' and not artifact.endswith(':') \ + and '*' not in artifact and '?' not in artifact: + if 'tags' in ii: + del (ii['tags']) r = self.cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] - if len(lst)>1: - print ('More than 1 experiment artifact found:') + if len(lst) > 1: + print('More than 1 experiment artifact found:') lst = sorted(lst, key=lambda x: x.path) num = 0 - print ('') + print('') for e in lst: - print ('{}) {}'.format(num, e.path)) - print (' Tags: {}'.format(','.join(e.meta.get('tags',[])))) + print('{}) {}'.format(num, e.path)) + print( + ' Tags: {}'.format( + ','.join( + e.meta.get( + 'tags', + [])))) num += 1 if not console: - return {'return':1, 'error':'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} + return {'return': 1, 'error': 'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} - print ('') - x=input('Make your selection or press Enter for 0: ') + print('') + x = input('Make your selection or press Enter for 0: ') - x=x.strip() - if x=='': x='0' + x = x.strip() + if x == '': + x = '0' selection = int(x) @@ -716,32 +744,35 @@ def _find_or_add_artifact(self, i): experiment = lst[selection] - elif len(lst)==1: + elif len(lst) == 1: experiment = lst[0] else: # Create new entry - if i.get('fail_if_not_found',False): - return {'return':1, 'error':'experiment not found'} + if i.get('fail_if_not_found', False): + return {'return': 1, 'error': 'experiment not found'} ii = copy.deepcopy(ii_copy) - ii['action']='add' + ii['action'] = 'add' r = self.cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r experiment_uid = r['meta']['uid'] - r = self.cmind.access({'action':'find', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':experiment_uid}) - if r['return']>0: return r + r = self.cmind.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': experiment_uid}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0 or len(lst)>1: - return {'return':1, 'error':'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)} + if len(lst) == 0 or len(lst) >1: + return { + 'return': 1, 'error': 'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)} experiment = lst[0] - return {'return':0, 'experiment':experiment} + return {'return': 0, 'experiment': experiment} ############################################################ def _find_uid(self, i): @@ -773,37 +804,41 @@ def _find_uid(self, i): datetime = i['datetime'] uid = i.get('uid', '').strip() - path_to_experiment_result_file = os.path.join(path, datetime, self.CM_RESULT_FILE) + path_to_experiment_result_file = os.path.join( + path, datetime, self.CM_RESULT_FILE) - rr={'return':0, 'path_to_file':path_to_experiment_result_file} + rr = {'return': 0, 'path_to_file': path_to_experiment_result_file} if os.path.isfile(path_to_experiment_result_file): r = utils.load_json(file_name=path_to_experiment_result_file) - if r['return']>0: return r + if r['return'] > 0: + return r meta = r['meta'] rr['meta'] = meta # Searching for UID - if uid!='': + if uid != '': for result in meta: ruid = result.get('uid', '').strip() - if ruid!='' and ruid==uid: - rr['result']=result + if ruid != '' and ruid ==uid: + rr['result'] = result break return rr ############################################################################ -def flatten_dict(d, flat_dict = {}, prefix = ''): + + +def flatten_dict(d, flat_dict= {}, prefix = ''): for k in d: v = d[k] if type(v) is dict: - flatten_dict(v, flat_dict, prefix+k+'.') + flatten_dict(v, flat_dict, prefix + k + '.') else: - flat_dict[prefix+k] = v + flat_dict[prefix + k] = v return flat_dict diff --git a/automation/project/module.py b/automation/project/module.py index c6e81eed4c..be8bf9c089 100644 --- a/automation/project/module.py +++ b/automation/project/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,6 +48,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/report/module.py b/automation/report/module.py index c6e81eed4c..be8bf9c089 100644 --- a/automation/report/module.py +++ b/automation/report/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,6 +48,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/script/module.py b/automation/script/module.py index ab768f1ff4..97707b9ced 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -3285,33 +3285,33 @@ def _update_variation_meta_with_dynamic_suffix( for key in variation_meta: value = variation_meta[key] - if type(value) is list: # deps,pre_deps... + if isinstance(value, list): # deps,pre_deps... for item in value: - if type(item) is dict: + if isinstance(item, dict): for item_key in item: item_value = item[item_key] - if type( - item_value) is dict: # env,default_env inside deps + if isinstance( + item_value, dict): # env,default_env inside deps for item_key2 in item_value: item_value[item_key2] = item_value[item_key2].replace( "#", variation_tag_dynamic_suffix) - elif type(item_value) is list: # names for example + elif isinstance(item_value, list): # names for example for i, l_item in enumerate(item_value): - if type(l_item) is str: + if isinstance(l_item, str): item_value[i] = l_item.replace( "#", variation_tag_dynamic_suffix) else: item[item_key] = item[item_key].replace( "#", variation_tag_dynamic_suffix) - elif type(value) is dict: # add_deps, env, .. + elif isinstance(value, dict): # add_deps, env, .. for item in value: item_value = value[item] - if type(item_value) is dict: # deps + if isinstance(item_value, dict): # deps for item_key in item_value: item_value2 = item_value[item_key] - if type( - item_value2) is dict: # env,default_env inside deps + if isinstance( + item_value2, dict): # env,default_env inside deps for item_key2 in item_value2: item_value2[item_key2] = item_value2[item_key2].replace( "#", variation_tag_dynamic_suffix) @@ -3319,9 +3319,9 @@ def _update_variation_meta_with_dynamic_suffix( item_value[item_key] = item_value[item_key].replace( "#", variation_tag_dynamic_suffix) else: - if type(item_value) is list: # lists inside env... + if isinstance(item_value, list): # lists inside env... for i, l_item in enumerate(item_value): - if type(l_item) is str: + if isinstance(l_item, str): item_value[i] = l_item.replace( "#", variation_tag_dynamic_suffix) else: diff --git a/automation/script/module_help.py b/automation/script/module_help.py index 5094207d5d..820378180a 100644 --- a/automation/script/module_help.py +++ b/automation/script/module_help.py @@ -2,97 +2,103 @@ from cmind import utils # Pring help about script + + def print_help(i): meta = i.get('meta', '') path = i.get('path', '') - if len(meta)==0 and path=='': - return {'return':0} + if len(meta) == 0 and path == '': + return {'return': 0} - print ('') - print ('Help for this CM script ({},{}):'.format(meta.get('alias',''), meta.get('uid',''))) + print('') + print( + 'Help for this CM script ({},{}):'.format( + meta.get( + 'alias', ''), meta.get( + 'uid', ''))) - print ('') - print ('Path to this automation recipe: {}'.format(path)) + print('') + print('Path to this automation recipe: {}'.format(path)) - variations = meta.get('variations',{}) - if len(variations)>0: - print ('') - print ('Available variations:') - print ('') + variations = meta.get('variations', {}) + if len(variations) > 0: + print('') + print('Available variations:') + print('') for v in sorted(variations): - print (' _'+v) + print(' _' + v) input_mapping = meta.get('input_mapping', {}) - if len(input_mapping)>0: - print ('') - print ('Available flags mapped to environment variables:') - print ('') + if len(input_mapping) > 0: + print('') + print('Available flags mapped to environment variables:') + print('') for k in sorted(input_mapping): v = input_mapping[k] - print (' --{} -> --env.{}'.format(k,v)) + print(' --{} -> --env.{}'.format(k, v)) input_description = meta.get('input_description', {}) - if len(input_description)>0: + if len(input_description) > 0: # Check if has important ones (sort) sorted_keys = [] all_keys = sorted(list(input_description.keys())) - for k in sorted(all_keys, key = lambda x: input_description[x].get('sort',0)): + for k in sorted( + all_keys, key=lambda x: input_description[x].get('sort', 0)): v = input_description[k] - if v.get('sort',0)>0: + if v.get('sort', 0) > 0: sorted_keys.append(k) - - print ('') - print ('Available flags (Python API dict keys):') - print ('') + print('') + print('Available flags (Python API dict keys):') + print('') for k in all_keys: v = input_description[k] - n = v.get('desc','') + n = v.get('desc', '') - x = ' --'+k - if n!='': x+=' ({})'.format(n) + x = ' --' + k + if n != '': + x += ' ({})'.format(n) - print (x) + print(x) - if len(sorted_keys)>0: - print ('') - print ('Main flags:') - print ('') + if len(sorted_keys) > 0: + print('') + print('Main flags:') + print('') for k in sorted_keys: v = input_description[k] - n = v.get('desc','') + n = v.get('desc', '') - x = ' --'+k + x = ' --' + k d = None if 'default' in v: - d = v.get('default','') - - if d!=None: - x+='='+d - - c = v.get('choices',[]) - if len(c)>0: - x+=' {'+','.join(c)+'}' + d = v.get('default', '') - if n!='': x+=' ({})'.format(n) + if d is not None: + x += '=' + d - print (x) + c = v.get('choices', []) + if len(c) > 0: + x += ' {' + ','.join(c) + '}' + if n != '': + x += ' ({})'.format(n) + print(x) - print ('') - x = input ('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ') + print('') + x = input('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ') x = x.strip().lower() - skip_delayed_help = False if x in ['y','yes'] else True + skip_delayed_help = False if x in ['y', 'yes'] else True - r = {'return':0} + r = {'return': 0} if skip_delayed_help: r['skip_delayed_help'] = True diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py index fe6e6aacd8..db42a322ee 100644 --- a/automation/script/module_misc.py +++ b/automation/script/module_misc.py @@ -2,78 +2,100 @@ from cmind import utils # Meta deps -def process_deps(self_module, meta, meta_url, md_script_readme, key, extra_space='', skip_from_meta=False, skip_if_empty=False): + + +def process_deps(self_module, meta, meta_url, md_script_readme, + key, extra_space='', skip_from_meta=False, skip_if_empty=False): x = '' y = [] - if len(meta.get(key,{}))>0: + if len(meta.get(key, {})) > 0: x = '***' for d in meta[key]: d_tags = d.get('tags', '') - z = extra_space+' * '+d_tags + z = extra_space + ' * ' + d_tags y.append(z) names = d.get('names', []) for kk in [ - ('enable_if_env', 'Enable this dependency only if all ENV vars are set'), - ('enable_if_any_env', 'Enable this dependency only if any of ENV vars are set'), - ('skip_if_env', 'Skip this dependenecy only if all ENV vars are set'), - ('skip_if_any_env', 'Skip this dependenecy only if any of ENV vars are set') - ]: + ('enable_if_env', 'Enable this dependency only if all ENV vars are set'), + ('enable_if_any_env', + 'Enable this dependency only if any of ENV vars are set'), + ('skip_if_env', + 'Skip this dependenecy only if all ENV vars are set'), + ('skip_if_any_env', + 'Skip this dependenecy only if any of ENV vars are set') + ]: k1 = kk[0] k2 = kk[1] conditions = d.get(k1, {}) - if len(conditions)>0: - y.append(extra_space+' * {}:
\n`{}`'.format(k2, str(conditions))) - - if len(names)>0: - y.append(extra_space+' * CM names: `--adr.'+str(names)+'...`') + if len(conditions) > 0: + y.append(extra_space + + ' * {}:
\n`{}`'.format(k2, str(conditions))) + if len(names) > 0: + y.append( + extra_space + + ' * CM names: `--adr.' + + str(names) + + '...`') # Attempt to find related CM scripts - r = self_module.cmind.access({'action':'find', - 'automation':'script', - 'tags':d_tags}) - if r['return']==0: + r = self_module.cmind.access({'action': 'find', + 'automation': 'script', + 'tags': d_tags}) + if r['return'] == 0: lst = r['list'] - if len(lst)==0: - y.append(extra_space+' - *Warning: no scripts found*') + if len(lst) == 0: + y.append(extra_space + + ' - *Warning: no scripts found*') else: for s in lst: s_repo_meta = s.repo_meta - s_repo_alias = s_repo_meta.get('alias','') - s_repo_uid = s_repo_meta.get('uid','') + s_repo_alias = s_repo_meta.get('alias', '') + s_repo_uid = s_repo_meta.get('uid', '') # Check URL s_url = '' s_url_repo = '' if s_repo_alias == 'internal': s_url_repo = 'https://github.com/mlcommons/ck/tree/master/cm/cmind/repo' - s_url = s_url_repo+'/script/' + s_url = s_url_repo + '/script/' elif '@' in s_repo_alias: - s_url_repo = 'https://github.com/'+s_repo_alias.replace('@','/')+'/tree/master' - if s_repo_meta.get('prefix','')!='': s_url_repo+='/'+s_repo_meta['prefix'] - s_url = s_url_repo+ '/script/' + s_url_repo = 'https://github.com/' + \ + s_repo_alias.replace('@', '/') + '/tree/master' + if s_repo_meta.get('prefix', '') != '': + s_url_repo += '/' + s_repo_meta['prefix'] + s_url = s_url_repo + '/script/' s_alias = s.meta['alias'] - y.append(extra_space+' - CM script: [{}]({})'.format(s_alias, s_url+s_alias)) + y.append( + extra_space + ' - CM script: [{}]({})'.format(s_alias, s_url + s_alias)) z = '' if not skip_from_meta: z = ' from [meta]({})'.format(meta_url) - if not skip_if_empty or len(y)>0: - md_script_readme.append((extra_space+' 1. '+x+'Read "{}" on other CM scripts'+z+x).format(key)) + if not skip_if_empty or len(y) > 0: + md_script_readme.append( + (extra_space + + ' 1. ' + + x + + 'Read "{}" on other CM scripts' + + z + + x).format(key)) md_script_readme += y ############################################################ + + def doc(i): """ Add CM automation. @@ -110,15 +132,16 @@ def doc(i): console = i.get('out') == 'con' - repos = i.get('repos','') - if repos == '': repos='internal,a4705959af8e447a' + repos = i.get('repos', '') + if repos == '': + repos = 'internal,a4705959af8e447a' - parsed_artifact = i.get('parsed_artifact',[]) + parsed_artifact = i.get('parsed_artifact', []) - if len(parsed_artifact)<1: - parsed_artifact = [('',''), ('','')] - elif len(parsed_artifact)<2: - parsed_artifact.append(('','')) + if len(parsed_artifact) < 1: + parsed_artifact = [('', ''), ('', '')] + elif len(parsed_artifact) < 2: + parsed_artifact.append(('', '')) else: repos = parsed_artifact[1][0] @@ -132,10 +155,13 @@ def doc(i): lst = [] for repo in list_of_repos: - parsed_artifact[1] = ('',repo) if utils.is_cm_uid(repo) else (repo,'') + parsed_artifact[1] = ( + '', repo) if utils.is_cm_uid(repo) else ( + repo, '') ii['parsed_artifact'] = parsed_artifact r = self_module.search(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst += r['list'] md = [] @@ -147,7 +173,7 @@ def doc(i): script_meta = {} urls = {} - for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')): + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): toc_readme = [] @@ -158,30 +184,30 @@ def doc(i): meta = artifact.meta original_meta = artifact.original_meta - print ('Documenting {}'.format(path)) + print('Documenting {}'.format(path)) - alias = meta.get('alias','') - uid = meta.get('uid','') + alias = meta.get('alias', '') + uid = meta.get('uid', '') script_meta[alias] = meta - name = meta.get('name','') - developers = meta.get('developers','') + name = meta.get('name', '') + developers = meta.get('developers', '') # Check if has tags help otherwise all tags - tags = meta.get('tags_help','').strip() - if tags=='': - tags = meta.get('tags',[]) + tags = meta.get('tags_help', '').strip() + if tags == '': + tags = meta.get('tags', []) else: tags = tags.split(' ') - variations = meta.get('variations',{}) + variations = meta.get('variations', {}) variation_keys = sorted(list(variations.keys())) - version_keys = sorted(list(meta.get('versions',{}).keys())) + version_keys = sorted(list(meta.get('versions', {}).keys())) - default_variation = meta.get('default_variation','') - default_version = meta.get('default_version','') + default_variation = meta.get('default_variation', '') + default_version = meta.get('default_version', '') input_mapping = meta.get('input_mapping', {}) input_description = meta.get('input_description', {}) @@ -190,10 +216,10 @@ def doc(i): category_sort = meta.get('category_sort', 0) if category != '': if category not in toc_category: - toc_category[category]=[] + toc_category[category] = [] - if category not in toc_category_sort or category_sort>0: - toc_category_sort[category]=category_sort + if category not in toc_category_sort or category_sort > 0: + toc_category_sort[category] = category_sort if alias not in toc_category[category]: toc_category[category].append(alias) @@ -201,25 +227,26 @@ def doc(i): repo_path = artifact.repo_path repo_meta = artifact.repo_meta - repo_alias = repo_meta.get('alias','') - repo_uid = repo_meta.get('uid','') - + repo_alias = repo_meta.get('alias', '') + repo_uid = repo_meta.get('uid', '') # Check URL url = '' url_repo = '' if repo_alias == 'internal': url_repo = 'https://github.com/mlcommons/ck/tree/dev/cm/cmind/repo' - url = url_repo+'/script/' + url = url_repo + '/script/' elif '@' in repo_alias: - url_repo = 'https://github.com/'+repo_alias.replace('@','/')+'/tree/dev' - if repo_meta.get('prefix','')!='': url_repo+='/'+repo_meta['prefix'] - url = url_repo+ '/script/' + url_repo = 'https://github.com/' + \ + repo_alias.replace('@', '/') + '/tree/dev' + if repo_meta.get('prefix', '') != '': + url_repo += '/' + repo_meta['prefix'] + url = url_repo + '/script/' - if url!='': - url+=alias + if url != '': + url += alias - urls[alias]=url + urls[alias] = url # Check if there is about doc path_readme = os.path.join(path, 'README.md') @@ -228,26 +255,26 @@ def doc(i): readme_about = '' if os.path.isfile(path_readme_about): - r = utils.load_txt(path_readme_about, split = True) - if r['return']>0: return + r = utils.load_txt(path_readme_about, split=True) + if r['return'] > 0: + return s = r['string'] readme_about = r['list'] - ####################################################################### # Start automatically generated README md_script_readme = [ -# '
', -# 'Click here to see the table of contents.', -# '{{CM_README_TOC}}', -# '
', -# '', - 'Automatically generated README for this automation recipe: **{}**'.format(meta['alias']), - ] - - - md_script.append('## '+alias) + # '
', + # 'Click here to see the table of contents.', + # '{{CM_README_TOC}}', + # '
', + # '', + 'Automatically generated README for this automation recipe: **{}**'.format( + meta['alias']), + ] + + md_script.append('## ' + alias) md_script.append('') # x = 'About' @@ -261,19 +288,18 @@ def doc(i): # md_script_readme.append('') # toc_readme.append(' '+x) - if name!='': + if name != '': name += '.' - md_script.append('*'+name+'*') + md_script.append('*' + name + '*') md_script.append('') # md_script_readme.append('*'+name+'*') # md_script_readme.append('') - - if os.path.isfile(path_readme): - r = utils.load_txt(path_readme, split = True) - if r['return']>0: return + r = utils.load_txt(path_readme, split=True) + if r['return'] > 0: + return s = r['string'] readme = r['list'] @@ -283,7 +309,8 @@ def doc(i): # Attempt to rename to README-extra.md if os.path.isfile(path_readme_extra): - return {'return':1, 'error':'README.md is not auto-generated and README-extra.md already exists - can\'t rename'} + return { + 'return': 1, 'error': 'README.md is not auto-generated and README-extra.md already exists - can\'t rename'} os.rename(path_readme, path_readme_extra) @@ -292,16 +319,13 @@ def doc(i): os.system('git add README-extra.md') os.chdir(cur_dir) - - - if category!='': + if category != '': md_script_readme.append('') md_script_readme.append('Category: **{}**'.format(category)) md_script_readme.append('') md_script_readme.append('License: **Apache 2.0**') - md_script_readme.append('') if developers == '': @@ -310,40 +334,38 @@ def doc(i): md_script_readme.append('Developers: ' + developers) x = '* [{}]({})'.format(alias, url) - if name !='': x+=' *('+name+')*' + if name != '': + x += ' *(' + name + ')*' toc.append(x) - - - cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format(alias, uid) + cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format( + alias, uid) if os.path.isfile(path_readme_extra): - readme_extra_url = url+'/README-extra.md' + readme_extra_url = url + '/README-extra.md' - x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format(readme_extra_url) + x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format( + readme_extra_url) md_script.append(x) cm_readme_extra += '[ [Notes from the authors, contributors and users](README-extra.md) ] ' md_script_readme.append('') md_script_readme.append('---') - md_script_readme.append('*'+cm_readme_extra.strip()+'*') - + md_script_readme.append('*' + cm_readme_extra.strip() + '*') - if readme_about!='': + if readme_about != '': md_script_readme += ['', '---', ''] + readme_about - - x = 'Summary' md_script_readme.append('') md_script_readme.append('---') md_script_readme += [ -# '
', -# 'Click to see the summary', - '#### Summary', - '' - ] + # '
', + # 'Click to see the summary', + '#### Summary', + '' + ] toc_readme.append(x) @@ -369,82 +391,82 @@ def doc(i): md_script.append(x) md_script_readme.append(x) - x = '* GitHub directory for this script: *[GitHub]({})*'.format(url) md_script.append(x) md_script_readme.append(x) - - # Check meta meta_file = self_module.cmind.cfg['file_cmeta'] meta_path = os.path.join(path, meta_file) - meta_file += '.yaml' if os.path.isfile(meta_path+'.yaml') else '.json' + meta_file += '.yaml' if os.path.isfile( + meta_path + '.yaml') else '.json' - meta_url = url+'/'+meta_file + meta_url = url + '/' + meta_file - x = '* CM meta description of this script: *[GitHub]({})*'.format(meta_url) + x = '* CM meta description of this script: *[GitHub]({})*'.format( + meta_url) md_script.append(x) # x = '* CM automation "script": *[Docs]({})*'.format('https://github.com/octoml/ck/blob/master/docs/list_of_automations.md#script') # md_script.append(x) # md_script_readme.append(x) - if len(variation_keys)>0: - variation_pointer="[,variations]" - variation_pointer2="[variations]" + if len(variation_keys) > 0: + variation_pointer = "[,variations]" + variation_pointer2 = "[variations]" else: - variation_pointer='' - variation_pointer2='' + variation_pointer = '' + variation_pointer2 = '' - if len(input_mapping)>0: - input_mapping_pointer="[--input_flags]" + if len(input_mapping) > 0: + input_mapping_pointer = "[--input_flags]" else: - input_mapping_pointer='' + input_mapping_pointer = '' cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags)) - cli_all_tags3 = '`cm run script --tags={}{} {}`'.format(','.join(tags), variation_pointer, input_mapping_pointer) + cli_all_tags3 = '`cm run script --tags={}{} {}`'.format( + ','.join(tags), variation_pointer, input_mapping_pointer) x = '* CM CLI with all tags: {}*'.format(cli_all_tags) md_script.append(x) cli_help_tags_alternative = '`cmr "{}" --help`'.format(' '.join(tags)) cli_all_tags_alternative = '`cmr "{}"`'.format(' '.join(tags)) - cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format(' '.join(tags), variation_pointer2, input_mapping_pointer) - cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format(' '.join(tags), variation_pointer, input_mapping_pointer) + cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format( + ' '.join(tags), variation_pointer2, input_mapping_pointer) + cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format( + ' '.join(tags), variation_pointer, input_mapping_pointer) x = '* CM CLI alternative: {}*'.format(cli_all_tags_alternative) md_script.append(x) - cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format(' '.join(tags), variation_pointer2, input_mapping_pointer) + cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format( + ' '.join(tags), variation_pointer2, input_mapping_pointer) # cli_uid = '`cm run script {} {}`'.format(meta['uid'], input_mapping_pointer) # x = '* CM CLI with alias and UID: {}*'.format(cli_uid) # md_script.append(x) - if len(variation_keys)>0: - x='' + if len(variation_keys) > 0: + x = '' for variation in variation_keys: - if x!='': x+=';  ' - x+='_'+variation + if x != '': + x += ';  ' + x += '_' + variation md_script.append('* Variations: *{}*'.format(x)) - if default_variation!='': - md_script.append('* Default variation: *{}*'.format(default_variation)) + if default_variation != '': + md_script.append( + '* Default variation: *{}*'.format(default_variation)) - if len(version_keys)>0: - md_script.append('* Versions: *{}*'.format(';  '.join(version_keys))) + if len(version_keys) > 0: + md_script.append( + '* Versions: *{}*'.format(';  '.join(version_keys))) - if default_version!='': + if default_version != '': md_script.append('* Default version: *{}*'.format(default_version)) - - - - - - md_script.append('') # md_script_readme.append('') @@ -452,29 +474,29 @@ def doc(i): x = 'Meta description' # md_script_readme.append('___') # md_script_readme.append('### '+x) - md_script_readme.append('* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file)) + md_script_readme.append( + '* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file)) # md_script_readme.append('') # toc_readme.append(x) x = 'Tags' # md_script_readme.append('___') # md_script_readme.append('### '+x) - md_script_readme.append('* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags))) + md_script_readme.append( + '* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags))) # md_script_readme.append('') # toc_readme.append(x) - cache = meta.get('cache', False) md_script_readme.append('* Output cached? *{}*'.format(str(cache))) - md_script_readme.append('* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts')) + md_script_readme.append( + '* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts')) md_script_readme += ['', -# '
' + # '
' ] - - # Add usage x1 = 'Reuse this script in your project' x1a = 'Install MLCommons CM automation meta-framework' @@ -485,84 +507,81 @@ def doc(i): x3a = 'Run this script via GUI' x4 = 'Run this script via Docker (beta)' md_script_readme += [ - '', - '---', - '### '+x1, - '', - '#### '+x1a, - '', - '* [Install CM](https://access.cknowledge.org/playground/?action=install)', - '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)', - '', - '#### '+x1aa, - '', - '```cm pull repo {}```'.format(repo_alias), - '', - '#### '+x1b, - '', - '```{}```'.format(cli_help_tags_alternative), - '', - '#### '+x2, - '', - '{}'.format(cli_all_tags), - '', - '{}'.format(cli_all_tags3), - '', - '*or*', - '', - '{}'.format(cli_all_tags_alternative), - '', - '{}'.format(cli_all_tags_alternative3), - '', -# '3. {}'.format(cli_uid), - ''] - + '', + '---', + '### ' + x1, + '', + '#### ' + x1a, + '', + '* [Install CM](https://access.cknowledge.org/playground/?action=install)', + '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)', + '', + '#### ' + x1aa, + '', + '```cm pull repo {}```'.format(repo_alias), + '', + '#### ' + x1b, + '', + '```{}```'.format(cli_help_tags_alternative), + '', + '#### ' + x2, + '', + '{}'.format(cli_all_tags), + '', + '{}'.format(cli_all_tags3), + '', + '*or*', + '', + '{}'.format(cli_all_tags_alternative), + '', + '{}'.format(cli_all_tags_alternative3), + '', + # '3. {}'.format(cli_uid), + ''] x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.' - if len(variation_keys)>0: - md_script_readme += ['* *See the list of `variations` [here](#variations)'+x+'*', + if len(variation_keys) > 0: + md_script_readme += ['* *See the list of `variations` [here](#variations)' + x + '*', '' ] - if input_description and len(input_description)>0: + if input_description and len(input_description) > 0: x = 'Input Flags' md_script_readme.append('') - md_script_readme.append('#### '+x) - toc_readme.append(' '+x) + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) md_script_readme.append('') key0 = '' for key in input_description: - if key0=='': key0=key + if key0 == '': + key0 = key value = input_description[key] desc = value - if type(value) == dict: + if isinstance(value, dict): desc = value['desc'] choices = value.get('choices', []) if len(choices) > 0: - desc+=' {'+','.join(choices)+'}' + desc += ' {' + ','.join(choices) + '}' - default = value.get('default','') - if default!='': - desc+=' (*'+str(default)+'*)' + default = value.get('default', '') + if default != '': + desc += ' (*' + str(default) + '*)' - md_script_readme.append('* --**{}**={}'.format(key,desc)) + md_script_readme.append('* --**{}**={}'.format(key, desc)) md_script_readme.append('') - md_script_readme.append('**Above CLI flags can be used in the Python CM API as follows:**') + md_script_readme.append( + '**Above CLI flags can be used in the Python CM API as follows:**') md_script_readme.append('') - x = '```python\nr=cm.access({... , "'+key0+'":...}\n```' + x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```' md_script_readme.append(x) - - - - - md_script_readme += ['#### '+x3, + md_script_readme += ['#### ' + x3, '', '
', 'Click here to expand this section.', @@ -573,7 +592,8 @@ def doc(i): '', "r = cmind.access({'action':'run'", " 'automation':'script',", - " 'tags':'{}'".format(','.join(tags)), + " 'tags':'{}'".format( + ','.join(tags)), " 'out':'con',", " ...", " (other input keys for this script)", @@ -589,58 +609,57 @@ def doc(i): '', '', - '#### '+x3a, + '#### ' + x3a, '', - '```cmr "cm gui" --script="'+','.join(tags)+'"```', + '```cmr "cm gui" --script="' + + ','.join(tags) + '"```', '', -# 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)), -# '', - '#### '+x4, + # 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)), + # '', + '#### ' + x4, '', '{}'.format(cli_all_tags_alternative_docker), '' - ] + ] toc_readme.append(x1) - toc_readme.append(' '+x1a) - toc_readme.append(' '+x1b) - toc_readme.append(' '+x2) - toc_readme.append(' '+x3) - toc_readme.append(' '+x3a) - toc_readme.append(' '+x4) + toc_readme.append(' ' + x1a) + toc_readme.append(' ' + x1b) + toc_readme.append(' ' + x2) + toc_readme.append(' ' + x3) + toc_readme.append(' ' + x3a) + toc_readme.append(' ' + x4) x = 'Customization' md_script_readme.append('___') - md_script_readme.append('### '+x) + md_script_readme.append('### ' + x) md_script_readme.append('') toc_readme.append(x) - - - - if len(variation_keys)>0: -# x = 'Variation groups' -# md_script_readme.append('___') -# md_script_readme.append('### '+x) -# toc_readme.append(x) + if len(variation_keys) > 0: + # x = 'Variation groups' + # md_script_readme.append('___') + # md_script_readme.append('### '+x) + # toc_readme.append(x) variation_groups = {} default_variations = [] variation_md = {} variation_alias = {} - # Normally should not use anymore. Should use default:true inside individual variations. - default_variation = meta.get('default_variation','') + # Normally should not use anymore. Should use default:true inside + # individual variations. + default_variation = meta.get('default_variation', '') for variation_key in sorted(variation_keys): variation = variations[variation_key] - alias = variation.get('alias','').strip() + alias = variation.get('alias', '').strip() - if alias!='': + if alias != '': aliases = variation_alias.get(alias, []) if variation_key not in aliases: aliases.append(variation_key) - variation_alias[alias]=aliases + variation_alias[alias] = aliases # Do not continue this loop if alias continue @@ -660,16 +679,16 @@ def doc(i): default_variations.append(variation_key) - md_var = [] - md_var.append('* {}`_{}`{}'.format(extra1, variation_key, extra2)) + md_var.append( + '* {}`_{}`{}'.format(extra1, variation_key, extra2)) variation_md[variation_key] = md_var # md_script_readme+=md_var - group = variation.get('group','') + group = variation.get('group', '') if variation_key.endswith('_'): group = '*Internal group (variations should not be selected manually)*' @@ -677,17 +696,16 @@ def doc(i): group = '*No group (any variation can be selected)*' if group not in variation_groups: - variation_groups[group]=[] + variation_groups[group] = [] variation_groups[group].append(variation_key) - x = 'Variations' md_script_readme.append('') - md_script_readme.append('#### '+x) - toc_readme.append(' '+x) + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) - variation_groups_order = meta.get('variation_groups_order',[]) + variation_groups_order = meta.get('variation_groups_order', []) for variation in sorted(variation_groups): if variation not in variation_groups_order: variation_groups_order.append(variation) @@ -696,40 +714,51 @@ def doc(i): md_script_readme.append('') if not group_key.startswith('*'): - md_script_readme.append(' * Group "**{}**"'.format(group_key)) + md_script_readme.append( + ' * Group "**{}**"'.format(group_key)) else: md_script_readme.append(' * {}'.format(group_key)) - md_script_readme += [ - '
', - ' Click here to expand this section.', - '' - ] + '
', + ' Click here to expand this section.', + '' + ] for variation_key in sorted(variation_groups[group_key]): variation = variations[variation_key] xmd = variation_md[variation_key] - aliases = variation_alias.get(variation_key,[]) - aliases2 = ['_'+v for v in aliases] + aliases = variation_alias.get(variation_key, []) + aliases2 = ['_' + v for v in aliases] - if len(aliases)>0: - xmd.append(' - Aliases: `{}`'.format(','.join(aliases2))) + if len(aliases) > 0: + xmd.append( + ' - Aliases: `{}`'.format(','.join(aliases2))) - if len(variation.get('env',{}))>0: + if len(variation.get('env', {})) > 0: xmd.append(' - Environment variables:') for key in variation['env']: - xmd.append(' - *{}*: `{}`'.format(key, variation['env'][key])) + xmd.append( + ' - *{}*: `{}`'.format(key, variation['env'][key])) xmd.append(' - Workflow:') - for dep in ['deps', 'prehook_deps', 'posthook_deps', 'post_deps']: - process_deps(self_module, variation, meta_url, xmd, dep, ' ', True, True) + for dep in ['deps', 'prehook_deps', + 'posthook_deps', 'post_deps']: + process_deps( + self_module, + variation, + meta_url, + xmd, + dep, + ' ', + True, + True) for x in xmd: - md_script_readme.append(' '+x) + md_script_readme.append(' ' + x) md_script_readme.append('') md_script_readme.append('
') @@ -737,127 +766,119 @@ def doc(i): # Check if has invalid_variation_combinations vvc = meta.get('invalid_variation_combinations', []) - if len(vvc)>0: + if len(vvc) > 0: x = 'Unsupported or invalid variation combinations' md_script_readme.append('') - md_script_readme.append('#### '+x) + md_script_readme.append('#### ' + x) md_script_readme.append('') md_script_readme.append('') md_script_readme.append('') - toc_readme.append(' '+x) + toc_readme.append(' ' + x) for v in vvc: - vv = ['_'+x for x in v] - md_script_readme.append('* `'+','.join(vv)+'`') - + vv = ['_' + x for x in v] + md_script_readme.append('* `' + ','.join(vv) + '`') - if len(default_variations)>0: + if len(default_variations) > 0: md_script_readme.append('') md_script_readme.append('#### Default variations') md_script_readme.append('') - dv = ['_'+x for x in sorted(default_variations)] + dv = ['_' + x for x in sorted(default_variations)] md_script_readme.append('`{}`'.format(','.join(dv))) - # Check if has valid_variation_combinations vvc = meta.get('valid_variation_combinations', []) - if len(vvc)>0: + if len(vvc) > 0: x = 'Valid variation combinations checked by the community' md_script_readme.append('') - md_script_readme.append('#### '+x) + md_script_readme.append('#### ' + x) md_script_readme.append('') md_script_readme.append('') md_script_readme.append('') - toc_readme.append(' '+x) + toc_readme.append(' ' + x) for v in vvc: - vv = ['_'+x for x in v] - md_script_readme.append('* `'+','.join(vv)+'`') - - - - + vv = ['_' + x for x in v] + md_script_readme.append('* `' + ','.join(vv) + '`') # Check input flags - if input_mapping and len(input_mapping)>0: + if input_mapping and len(input_mapping) > 0: x = 'Script flags mapped to environment' md_script_readme.append('') - md_script_readme.append('#### '+x) - toc_readme.append(' '+x) + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) md_script_readme.append('
') - md_script_readme.append('Click here to expand this section.') + md_script_readme.append( + 'Click here to expand this section.') md_script_readme.append('') key0 = '' for key in sorted(input_mapping): - if key0=='': key0=key + if key0 == '': + key0 = key value = input_mapping[key] - md_script_readme.append('* `--{}=value` → `{}=value`'.format(key,value)) + md_script_readme.append( + '* `--{}=value` → `{}=value`'.format(key, value)) md_script_readme.append('') - md_script_readme.append('**Above CLI flags can be used in the Python CM API as follows:**') + md_script_readme.append( + '**Above CLI flags can be used in the Python CM API as follows:**') md_script_readme.append('') - x = '```python\nr=cm.access({... , "'+key0+'":...}\n```' + x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```' md_script_readme.append(x) md_script_readme.append('') md_script_readme.append('
') md_script_readme.append('') - # Default environment - default_env = meta.get('default_env',{}) + default_env = meta.get('default_env', {}) x = 'Default environment' # md_script_readme.append('___') - md_script_readme.append('#### '+x) - toc_readme.append(' '+x) + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) md_script_readme.append('') md_script_readme.append('
') - md_script_readme.append('Click here to expand this section.') + md_script_readme.append( + 'Click here to expand this section.') md_script_readme.append('') - md_script_readme.append('These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.') + md_script_readme.append( + 'These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.') md_script_readme.append('') for key in default_env: value = default_env[key] - md_script_readme.append('* {}: `{}`'.format(key,value)) + md_script_readme.append('* {}: `{}`'.format(key, value)) md_script_readme.append('') md_script_readme.append('
') md_script_readme.append('') - - - - - - - if len(version_keys)>0 or default_version!='': + if len(version_keys) > 0 or default_version != '': x = 'Versions' # md_script_readme.append('___') - md_script_readme.append('#### '+x) + md_script_readme.append('#### ' + x) toc_readme.append(x) - if default_version!='': - md_script_readme.append('Default version: `{}`'.format(default_version)) + if default_version != '': + md_script_readme.append( + 'Default version: `{}`'.format(default_version)) md_script_readme.append('') - if len(version_keys)>0: + if len(version_keys) > 0: for version in version_keys: md_script_readme.append('* `{}`'.format(version)) - - # Add workflow x = 'Dependencies on other CM scripts' md_script_readme += ['___', - '### '+x, + '### ' + x, ''] toc_readme.append(x) @@ -877,9 +898,10 @@ def doc(i): found_customize = True r = utils.load_txt(path_customize, split=True) - if r['return']>0: return r + if r['return'] > 0: + return r - customize = r['string'] + customize = r['string'] customize_l = r['list'] if 'def preprocess(' in customize: @@ -891,20 +913,21 @@ def doc(i): # Ugly attempt to get output env found_postprocess = False for l in customize_l: -# if not found_postprocess: -# if 'def postprocess' in l: -# found_postprocess = True -# else: + # if not found_postprocess: + # if 'def postprocess' in l: + # found_postprocess = True + # else: j = l.find(' env[') - if j>=0: - j1 = l.find(']', j+4) - if j1>=0: - j2 = l.find('=',j1+1) - if j2>=0: - key2 = l[j+5:j1].strip() - key=key2[1:-1] - - if key.startswith('CM_') and 'TMP' not in key and key not in found_output_env: + if j >= 0: + j1 = l.find(']', j + 4) + if j1 >= 0: + j2 = l.find('=', j1 + 1) + if j2 >= 0: + key2 = l[j + 5:j1].strip() + key = key2[1:-1] + + if key.startswith( + 'CM_') and 'TMP' not in key and key not in found_output_env: found_output_env.append(key) process_deps(self_module, meta, meta_url, md_script_readme, 'deps') @@ -913,10 +936,16 @@ def doc(i): y = 'customize.py' if found_customize_preprocess: x = '***' - y = '['+y+']('+url+'/'+y+')' - md_script_readme.append((' 1. '+x+'Run "preprocess" function from {}'+x).format(y)) + y = '[' + y + '](' + url + '/' + y + ')' + md_script_readme.append( + (' 1. ' + x + 'Run "preprocess" function from {}' + x).format(y)) - process_deps(self_module, meta, meta_url, md_script_readme, 'prehook_deps') + process_deps( + self_module, + meta, + meta_url, + md_script_readme, + 'prehook_deps') # Check scripts files = os.listdir(path) @@ -924,38 +953,51 @@ def doc(i): y = [] for f in sorted(files): x = '***' - if f.startswith('run') and (f.endswith('.sh') or f.endswith('.bat')): - f_url = url+'/'+f + if f.startswith('run') and ( + f.endswith('.sh') or f.endswith('.bat')): + f_url = url + '/' + f y.append(' * [{}]({})'.format(f, f_url)) - md_script_readme.append((' 1. '+x+'Run native script if exists'+x).format(y)) + md_script_readme.append( + (' 1. ' + x + 'Run native script if exists' + x).format(y)) md_script_readme += y - process_deps(self_module, meta, meta_url, md_script_readme, 'posthook_deps') + process_deps( + self_module, + meta, + meta_url, + md_script_readme, + 'posthook_deps') x = '' y = 'customize.py' if found_customize_postprocess: x = '***' - y = '['+y+']('+url+'/'+y+')' - md_script_readme.append((' 1. '+x+'Run "postrocess" function from {}'+x).format(y)) - - process_deps(self_module, meta, meta_url, md_script_readme, 'post_deps') + y = '[' + y + '](' + url + '/' + y + ')' + md_script_readme.append( + (' 1. ' + x + 'Run "postrocess" function from {}' + x).format(y)) + + process_deps( + self_module, + meta, + meta_url, + md_script_readme, + 'post_deps') # md_script_readme.append('
') md_script_readme.append('') # New environment - new_env_keys = meta.get('new_env_keys',[]) + new_env_keys = meta.get('new_env_keys', []) x = 'Script output' md_script_readme.append('___') - md_script_readme.append('### '+x) + md_script_readme.append('### ' + x) toc_readme.append(x) md_script_readme.append(cli_all_tags_alternative_j) x = 'New environment keys (filter)' - md_script_readme.append('#### '+x) + md_script_readme.append('#### ' + x) toc_readme.append(x) md_script_readme.append('') @@ -979,15 +1021,13 @@ def doc(i): found_output_env_filtered.append(key) x = 'New environment keys auto-detected from customize' - md_script_readme.append('#### '+x) + md_script_readme.append('#### ' + x) toc_readme.append(x) md_script_readme.append('') for key in sorted(found_output_env_filtered): md_script_readme.append('* `{}`'.format(key)) - - # Add maintainers # x = 'Maintainers' # md_script_readme.append('___') @@ -1006,7 +1046,7 @@ def doc(i): prefix = ' ' x2 = x[1:] - x2 = x2.lower().replace(' ','-').replace(',','') + x2 = x2.lower().replace(' ', '-').replace(',', '') toc_readme_string += prefix + '* [{}](#{})\n'.format(x, x2) # Add to the total list @@ -1019,31 +1059,33 @@ def doc(i): s = s.replace('{{CM_README_TOC}}', toc_readme_string) r = utils.save_txt(path_readme, s) - if r['return']>0: return r + if r['return'] > 0: + return r # Add to Git (if in git) os.chdir(path) os.system('git add README.md') os.chdir(cur_dir) - # Recreate TOC with categories toc2 = [] - for category in sorted(toc_category):#, key = lambda x: -toc_category_sort[x]): - toc2.append('### '+category) + # , key = lambda x: -toc_category_sort[x]): + for category in sorted(toc_category): + toc2.append('### ' + category) toc2.append('') for script in sorted(toc_category[category]): meta = script_meta[script] - name = meta.get('name','') + name = meta.get('name', '') url = urls[script] x = '* [{}]({})'.format(script, url) - if name !='': x+=' *('+name+')*' + if name != '': + x += ' *(' + name + ')*' toc2.append(x) @@ -1051,13 +1093,13 @@ def doc(i): toc_category_string = '' for category in sorted(toc_category): - category_link = category.lower().replace(' ','-').replace('/','') + category_link = category.lower().replace(' ', '-').replace('/', '') toc_category_string += '* [{}](#{})\n'.format(category, category_link) - # Load template r = utils.load_txt(os.path.join(self_module.path, template_file)) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] @@ -1068,32 +1110,41 @@ def doc(i): s = s.replace('{{CM_TOC_CATEGORIES}}', toc_category_string) # Output - output_dir = i.get('output_dir','') + output_dir = i.get('output_dir', '') - if output_dir == '': output_dir = '..' + if output_dir == '': + output_dir = '..' output_file = os.path.join(output_dir, list_file) r = utils.save_txt(output_file, s) - if r['return']>0: return r + if r['return'] > 0: + return r - out_docs_file = os.path.join("..", "docs", "scripts", category, alias, "index.md") + out_docs_file = os.path.join( + "..", + "docs", + "scripts", + category, + alias, + "index.md") r = utils.save_txt(out_docs_file, s) - if r['return']>0: return r - - return {'return':0} + if r['return'] > 0: + return r + return {'return': 0} # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # This function takes in a host path and returns the absolute path on host and the container -# If mounts is passed, the function appends the host path and the container path to mounts in the form "host_path:container_path" +# If mounts is passed, the function appends the host path and the +# container path to mounts in the form "host_path:container_path" def update_path_for_docker(path, mounts=None, force_path_target=''): path_orig = '' path_target = '' - if path!='': # and (os.path.isfile(path) or os.path.isdir(path)): + if path != '': # and (os.path.isfile(path) or os.path.isdir(path)): path = os.path.abspath(path) path_target = path @@ -1105,9 +1156,11 @@ def update_path_for_docker(path, mounts=None, force_path_target=''): x = PureWindowsPath(path_orig) path_target = str(PurePosixPath('/', *x.parts[1:])) - if not path_target.startswith('/'): path_target='/'+path_target + if not path_target.startswith('/'): + path_target = '/' + path_target - path_target='/cm-mount'+path_target if force_path_target=='' else force_path_target + path_target = '/cm-mount' + \ + path_target if force_path_target == '' else force_path_target # If file, mount directory if os.path.isfile(path) or not os.path.isdir(path): @@ -1116,19 +1169,20 @@ def update_path_for_docker(path, mounts=None, force_path_target=''): x = path_orig + ':' + path_target # CHeck if no duplicates - if mounts != None: + if mounts is not None: to_add = True for y in mounts: - if y.lower()==x.lower(): + if y.lower() == x.lower(): to_add = False break if to_add: mounts.append(x) - return (path_orig, path_target) ############################################################ + + def process_inputs(i): import copy @@ -1140,13 +1194,12 @@ def process_inputs(i): # Check if need to update/map/mount inputs and env i_run_cmd = copy.deepcopy(i_run_cmd_arc) - def get_value_using_key_with_dots(d, k): v = None j = k.find('.') - if j>=0: + if j >= 0: k1 = k[:j] - k2 = k[j+1:] + k2 = k[j + 1:] if k1 in d: v = d[k1] @@ -1156,7 +1209,7 @@ def get_value_using_key_with_dots(d, k): else: d = v k = k2 - if type(v)==dict: + if isinstance(v, dict): v = v.get(k2) else: v = None @@ -1168,20 +1221,20 @@ def get_value_using_key_with_dots(d, k): return v, d, k - docker_input_paths = docker_settings.get('input_paths',[]) - if len(i_run_cmd)>0: + docker_input_paths = docker_settings.get('input_paths', []) + if len(i_run_cmd) > 0: for k in docker_input_paths: v2, i_run_cmd2, k2 = get_value_using_key_with_dots(i_run_cmd, k) - if v2!=None: - v=i_run_cmd2[k2] + if v2 is not None: + v = i_run_cmd2[k2] path_orig, path_target = update_path_for_docker(v, mounts) - if path_target!='': + if path_target != '': i_run_cmd2[k2] = path_target - return {'return':0, 'run_cmd':i_run_cmd} + return {'return': 0, 'run_cmd': i_run_cmd} ############################################################ @@ -1195,18 +1248,18 @@ def regenerate_script_cmd(i): i_run_cmd = i['run_cmd'] - #Cleanup from env everything that has a host path value + # Cleanup from env everything that has a host path value if i_run_cmd.get('env'): for key in list(i_run_cmd.get('env')): - if type(i_run_cmd['env'][key]) == str and ((os.path.join("local", "cache", "") in i_run_cmd['env'][key]) or (os.path.join("CM", "repos", "") in i_run_cmd['env'][key])) : - del(i_run_cmd['env'][key]) - elif type(i_run_cmd['env'][key]) == list: + if isinstance(i_run_cmd['env'][key], str) and ((os.path.join("local", "cache", "") in i_run_cmd['env'][key]) or (os.path.join("CM", "repos", "") in i_run_cmd['env'][key])): + del (i_run_cmd['env'][key]) + elif isinstance(i_run_cmd['env'][key], list): values_to_remove = [] for val in i_run_cmd['env'][key]: - if type(val) == str and ((os.path.join("local", "cache", "") in val) or (os.path.join("CM", "repos", "") in val)): + if isinstance(val, str) and ((os.path.join("local", "cache", "") in val) or (os.path.join("CM", "repos", "") in val)): values_to_remove.append(val) if values_to_remove == i_run_cmd['env'][key]: - del(i_run_cmd['env'][key]) + del (i_run_cmd['env'][key]) else: for val in values_to_remove: i_run_cmd['env'][key].remove(val) @@ -1229,20 +1282,22 @@ def regenerate_script_cmd(i): if not tags_without_variation: # If no tags without variation, add script alias and UID explicitly - if script_uid!='': x=script_uid - if script_alias!='': - if x!='': x=','+x - x = script_alias+x - - if x!='': + if script_uid != '': + x = script_uid + if script_alias != '': + if x != '': + x = ',' + x + x = script_alias + x + + if x != '': run_cmd += ' ' + x + ' ' - - skip_input_for_fake_run = docker_settings.get('skip_input_for_fake_run', []) + skip_input_for_fake_run = docker_settings.get( + 'skip_input_for_fake_run', []) add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', []) - - def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_keys, key_prefix): + def rebuild_flags(i_run_cmd, fake_run, + skip_input_for_fake_run, add_quotes_to_keys, key_prefix): run_cmd = '' @@ -1251,14 +1306,15 @@ def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_ke if 'tags' in keys: # Move tags first tags_position = keys.index('tags') - del(keys[tags_position]) - keys = ['tags']+keys + del (keys[tags_position]) + keys = ['tags'] + keys for k in keys: # Assemble long key if dictionary long_key = key_prefix - if long_key!='': long_key+='.' - long_key+=k + if long_key != '': + long_key += '.' + long_key += k if fake_run and long_key in skip_input_for_fake_run: continue @@ -1267,25 +1323,34 @@ def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_ke q = '\\"' if long_key in add_quotes_to_keys else '' - if type(v)==dict: - run_cmd += rebuild_flags(v, fake_run, skip_input_for_fake_run, add_quotes_to_keys, long_key) - elif type(v)==list: + if isinstance(v, dict): + run_cmd += rebuild_flags(v, + fake_run, + skip_input_for_fake_run, + add_quotes_to_keys, + long_key) + elif isinstance(v, list): x = '' for vv in v: - if x != '': x+=',' - x+=q+str(vv)+q - run_cmd+=' --'+long_key+',=' + x + if x != '': + x += ',' + x += q + str(vv) + q + run_cmd += ' --' + long_key + ',=' + x else: - run_cmd+=' --'+long_key+'='+q+str(v)+q + run_cmd += ' --' + long_key + '=' + q + str(v) + q return run_cmd - run_cmd += rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_keys, '') - - run_cmd = docker_run_cmd_prefix + ' && ' + run_cmd if docker_run_cmd_prefix!='' else run_cmd + run_cmd += rebuild_flags(i_run_cmd, + fake_run, + skip_input_for_fake_run, + add_quotes_to_keys, + '') - return {'return':0, 'run_cmd_string':run_cmd} + run_cmd = docker_run_cmd_prefix + ' && ' + \ + run_cmd if docker_run_cmd_prefix != '' else run_cmd + return {'return': 0, 'run_cmd_string': run_cmd} ############################################################ @@ -1295,38 +1360,44 @@ def aux_search(i): inp = i['input'] - repos = inp.get('repos','') + repos = inp.get('repos', '') # Grigori Fursin remarked on 20240412 because this line prevents # from searching for scripts in other public or private repositories. # Not sure why we enforce just 2 repositories # # if repos == '': repos='internal,a4705959af8e447a' - parsed_artifact = inp.get('parsed_artifact',[]) + parsed_artifact = inp.get('parsed_artifact', []) - if len(parsed_artifact)<1: - parsed_artifact = [('',''), ('','')] - elif len(parsed_artifact)<2: - parsed_artifact.append(('','')) + if len(parsed_artifact) < 1: + parsed_artifact = [('', ''), ('', '')] + elif len(parsed_artifact) < 2: + parsed_artifact.append(('', '')) else: repos = parsed_artifact[1][0] list_of_repos = repos.split(',') if ',' in repos else [repos] - ii = utils.sub_input(inp, self_module.cmind.cfg['artifact_keys'] + ['tags']) + ii = utils.sub_input( + inp, + self_module.cmind.cfg['artifact_keys'] + + ['tags']) ii['out'] = None # Search for automations in repos lst = [] for repo in list_of_repos: - parsed_artifact[1] = ('',repo) if utils.is_cm_uid(repo) else (repo,'') + parsed_artifact[1] = ( + '', repo) if utils.is_cm_uid(repo) else ( + repo, '') ii['parsed_artifact'] = parsed_artifact r = self_module.search(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst += r['list'] - return {'return':0, 'list':lst} + return {'return': 0, 'list': lst} ############################################################ @@ -1354,10 +1425,14 @@ def dockerfile(i): # Check simplified CMD: cm docker script "python app image-classification onnx" # If artifact has spaces, treat them as tags! self_module = i['self_module'] - self_module.cmind.access({'action':'detect_tags_in_artifact', 'automation':'utils', 'input':i}) + self_module.cmind.access( + {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i}) # Prepare "clean" input to replicate command - r = self_module.cmind.access({'action':'prune_input', 'automation':'utils', 'input':i, 'extra_keys_starts_with':['docker_']}) + r = self_module.cmind.access({'action': 'prune_input', + 'automation': 'utils', + 'input': i, + 'extra_keys_starts_with': ['docker_']}) i_run_cmd_arc = r['new_input'] cur_dir = os.getcwd() @@ -1368,14 +1443,13 @@ def dockerfile(i): # Search for script(s) r = aux_search({'self_module': self_module, 'input': i}) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - return {'return':1, 'error':'no scripts were found'} - - + if len(lst) == 0: + return {'return': 1, 'error': 'no scripts were found'} # if i.get('cmd'): @@ -1389,38 +1463,33 @@ def dockerfile(i): # # run_cmd = i.get('docker_run_cmd_prefix') + ' && ' + run_cmd if i.get('docker_run_cmd_prefix') else run_cmd - - - - - env=i.get('env', {}) + env = i.get('env', {}) state = i.get('state', {}) - const=i.get('const', {}) + const = i.get('const', {}) const_state = i.get('const_state', {}) script_automation = i['self_module'] - dockerfile_env=i.get('dockerfile_env', {}) + dockerfile_env = i.get('dockerfile_env', {}) tags_split = i.get('tags', '').split(",") - variation_tags = [ t[1:] for t in tags_split if t.startswith("_") ] + variation_tags = [t[1:] for t in tags_split if t.startswith("_")] - for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')): + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): meta = artifact.meta script_path = artifact.path tags = meta.get("tags", []) - tag_string=",".join(tags) + tag_string = ",".join(tags) script_alias = meta.get('alias', '') script_uid = meta.get('uid', '') - verbose = i.get('v', False) show_time = i.get('show_time', False) - run_state = {'deps':[], 'fake_deps':[], 'parent': None} + run_state = {'deps': [], 'fake_deps': [], 'parent': None} run_state['script_id'] = script_alias + "," + script_uid run_state['script_variation_tags'] = variation_tags variations = meta.get('variations', {}) @@ -1429,11 +1498,42 @@ def dockerfile(i): state['docker'] = docker_settings add_deps_recursive = i.get('add_deps_recursive', {}) - r = script_automation.update_state_from_meta(meta, env, state, const, const_state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys = [], new_state_keys = [], run_state=run_state, i = i) + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) if r['return'] > 0: return r - r = script_automation._update_state_from_variations(i, meta, variation_tags, variations, env, state, const, const_state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys_from_meta = [], new_state_keys_from_meta = [], add_deps_recursive = add_deps_recursive, run_state = run_state, recursion_spaces='', verbose = False) + r = script_automation._update_state_from_variations( + i, + meta, + variation_tags, + variations, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys_from_meta=[], + new_state_keys_from_meta=[], + add_deps_recursive=add_deps_recursive, + run_state=run_state, + recursion_spaces='', + verbose=False) if r['return'] > 0: return r @@ -1441,7 +1541,8 @@ def dockerfile(i): dockerfile_env = docker_settings['dockerfile_env'] dockerfile_env['CM_RUN_STATE_DOCKER'] = True - if not docker_settings.get('run', True) and not i.get('docker_run_override', False): + if not docker_settings.get('run', True) and not i.get( + 'docker_run_override', False): print("docker.run set to False in _cm.json") continue '''run_config_path = os.path.join(script_path,'run_config.yml') @@ -1459,11 +1560,40 @@ def dockerfile(i): deps = docker_settings.get('build_deps', []) if deps: - r = script_automation._run_deps(deps, [], env, {}, {}, {}, {}, '', [], '', False, '', verbose, show_time, ' ', run_state) + r = script_automation._run_deps( + deps, + [], + env, + {}, + {}, + {}, + {}, + '', + [], + '', + False, + '', + verbose, + show_time, + ' ', + run_state) if r['return'] > 0: return r - #For updating meta from update_meta_if_env - r = script_automation.update_state_from_meta(meta, env, state, const, const_state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys = [], new_state_keys = [], run_state=run_state, i = i) + # For updating meta from update_meta_if_env + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) if r['return'] > 0: return r docker_settings = state['docker'] @@ -1471,68 +1601,107 @@ def dockerfile(i): d_env = i_run_cmd_arc.get('env', {}) for key in list(d_env.keys()): if key.startswith("CM_TMP_"): - del(d_env[key]) + del (d_env[key]) # Check if need to update/map/mount inputs and env r = process_inputs({'run_cmd_arc': i_run_cmd_arc, 'docker_settings': docker_settings, - 'mounts':[]}) - if r['return']>0: return r + 'mounts': []}) + if r['return'] > 0: + return r i_run_cmd = r['run_cmd'] - docker_run_cmd_prefix = i.get('docker_run_cmd_prefix', docker_settings.get('run_cmd_prefix', '')) - - r = regenerate_script_cmd({'script_uid':script_uid, - 'script_alias':script_alias, - 'run_cmd':i_run_cmd, - 'tags':tags, - 'fake_run':True, - 'docker_settings':docker_settings, - 'docker_run_cmd_prefix':docker_run_cmd_prefix}) - if r['return']>0: return r - - run_cmd = r['run_cmd_string'] - - cm_repo = i.get('docker_cm_repo', docker_settings.get('cm_repo', 'mlcommons@cm4mlops')) - cm_repo_branch = i.get('docker_cm_repo_branch', docker_settings.get('cm_repo_branch', 'mlperf-inference')) - - cm_repo_flags = i.get('docker_cm_repo_flags', docker_settings.get('cm_repo_flags', '')) - - docker_base_image = i.get('docker_base_image', docker_settings.get('base_image')) - docker_os = i.get('docker_os', docker_settings.get('docker_os', 'ubuntu')) - docker_os_version = i.get('docker_os_version', docker_settings.get('docker_os_version', '22.04')) - - docker_cm_repos = i.get('docker_cm_repos', docker_settings.get('cm_repos', '')) + docker_run_cmd_prefix = i.get( + 'docker_run_cmd_prefix', docker_settings.get( + 'run_cmd_prefix', '')) + + r = regenerate_script_cmd({'script_uid': script_uid, + 'script_alias': script_alias, + 'run_cmd': i_run_cmd, + 'tags': tags, + 'fake_run': True, + 'docker_settings': docker_settings, + 'docker_run_cmd_prefix': docker_run_cmd_prefix}) + if r['return'] > 0: + return r - docker_skip_cm_sys_upgrade = i.get('docker_skip_cm_sys_upgrade', docker_settings.get('skip_cm_sys_upgrade', '')) + run_cmd = r['run_cmd_string'] + + cm_repo = i.get( + 'docker_cm_repo', + docker_settings.get( + 'cm_repo', + 'mlcommons@cm4mlops')) + cm_repo_branch = i.get( + 'docker_cm_repo_branch', + docker_settings.get( + 'cm_repo_branch', + 'mlperf-inference')) + + cm_repo_flags = i.get( + 'docker_cm_repo_flags', + docker_settings.get( + 'cm_repo_flags', + '')) + + docker_base_image = i.get( + 'docker_base_image', + docker_settings.get('base_image')) + docker_os = i.get( + 'docker_os', docker_settings.get( + 'docker_os', 'ubuntu')) + docker_os_version = i.get( + 'docker_os_version', docker_settings.get( + 'docker_os_version', '22.04')) + + docker_cm_repos = i.get( + 'docker_cm_repos', + docker_settings.get( + 'cm_repos', + '')) + + docker_skip_cm_sys_upgrade = i.get( + 'docker_skip_cm_sys_upgrade', docker_settings.get( + 'skip_cm_sys_upgrade', '')) docker_extra_sys_deps = i.get('docker_extra_sys_deps', '') if not docker_base_image: - dockerfilename_suffix = docker_os +'_'+docker_os_version + dockerfilename_suffix = docker_os + '_' + docker_os_version else: if os.name == 'nt': - dockerfilename_suffix = docker_base_image.replace('/', '-').replace(':','-') + dockerfilename_suffix = docker_base_image.replace( + '/', '-').replace(':', '-') else: dockerfilename_suffix = docker_base_image.split("/") - dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1] + dockerfilename_suffix = dockerfilename_suffix[len( + dockerfilename_suffix) - 1] - fake_run_deps = i.get('fake_run_deps', docker_settings.get('fake_run_deps', False)) - docker_run_final_cmds = docker_settings.get('docker_run_final_cmds', []) + fake_run_deps = i.get( + 'fake_run_deps', docker_settings.get( + 'fake_run_deps', False)) + docker_run_final_cmds = docker_settings.get( + 'docker_run_final_cmds', []) r = check_gh_token(i, docker_settings, quiet) - if r['return'] >0 : return r + if r['return'] > 0: + return r gh_token = r['gh_token'] - i['docker_gh_token'] = gh_token # To pass to docker function if needed + i['docker_gh_token'] = gh_token # To pass to docker function if needed - if i.get('docker_real_run', docker_settings.get('docker_real_run',False)): + if i.get('docker_real_run', docker_settings.get( + 'docker_real_run', False)): fake_run_option = " " fake_run_deps = False else: fake_run_option = " --fake_run" - docker_copy_files = i.get('docker_copy_files', docker_settings.get('copy_files', [])) + docker_copy_files = i.get( + 'docker_copy_files', + docker_settings.get( + 'copy_files', + [])) env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds @@ -1540,23 +1709,27 @@ def dockerfile(i): if docker_path == '': docker_path = script_path - dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile') + dockerfile_path = os.path.join( + docker_path, + 'dockerfiles', + dockerfilename_suffix + + '.Dockerfile') if i.get('print_deps'): cm_input = {'action': 'run', - 'automation': 'script', - 'tags': f"""{i.get('tags')}""", - 'print_deps': True, - 'quiet': True, - 'silent': True, - 'fake_run': True, - 'fake_deps': True - } + 'automation': 'script', + 'tags': f"""{i.get('tags')}""", + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True, + 'fake_deps': True + } r = self_module.cmind.access(cm_input) if r['return'] > 0: return r print_deps = r['new_state']['print_deps'] - comments = [ "#RUN " + dep for dep in print_deps ] + comments = ["#RUN " + dep for dep in print_deps] comments.append("") comments.append("# Run CM workflow") else: @@ -1588,7 +1761,7 @@ def dockerfile(i): 'fake_docker_deps': fake_run_deps, 'print_deps': True, 'real_run': True - } + } if docker_cm_repos != '': cm_docker_input['cm_repos'] = docker_cm_repos @@ -1603,12 +1776,15 @@ def dockerfile(i): if r['return'] > 0: return r - print ('') - print ("Dockerfile generated at " + dockerfile_path) + print('') + print("Dockerfile generated at " + dockerfile_path) + + return {'return': 0} + +# we mount the main folder of the CM cache entry in case any file/folder +# in that cache entry is needed inside the container - return {'return':0} -# we mount the main folder of the CM cache entry in case any file/folder in that cache entry is needed inside the container def get_host_path(value): path_split = value.split(os.sep) if len(path_split) == 1: @@ -1618,15 +1794,17 @@ def get_host_path(value): if "cache" in path_split and "local": repo_entry_index = path_split.index("local") if len(path_split) >= repo_entry_index + 3: - return os.sep.join(path_split[0:repo_entry_index+3]) + return os.sep.join(path_split[0:repo_entry_index + 3]) return value + def get_container_path_script(i): tmp_dep_cached_path = i['tmp_dep_cached_path'] - value_mnt,value_env = get_container_path(tmp_dep_cached_path) + value_mnt, value_env = get_container_path(tmp_dep_cached_path) return {'return': 0, 'value_mnt': value_mnt, 'value_env': value_env} + def get_container_path(value): path_split = value.split(os.sep) if len(path_split) == 1: @@ -1634,14 +1812,15 @@ def get_container_path(value): new_value = '' if "cache" in path_split and "local" in path_split: - new_path_split = [ "", "home", "cmuser", "CM", "repos" ] + new_path_split = ["", "home", "cmuser", "CM", "repos"] repo_entry_index = path_split.index("local") if len(path_split) >= repo_entry_index + 3: - new_path_split1 = new_path_split + path_split[repo_entry_index:repo_entry_index+3] + new_path_split1 = new_path_split + \ + path_split[repo_entry_index:repo_entry_index + 3] new_path_split2 = new_path_split + path_split[repo_entry_index:] return "/".join(new_path_split1), "/".join(new_path_split2) else: - orig_path,target_path = update_path_for_docker(path=value) + orig_path, target_path = update_path_for_docker(path=value) return target_path, target_path # return value, value @@ -1673,7 +1852,7 @@ def docker(i): self_module = i['self_module'] - if type(i.get('docker', None)) == dict: + if isinstance(i.get('docker', None), dict): # Grigori started cleaning and refactoring this code on 20240929 # # 1. use --docker dictionary instead of --docker_{keys} @@ -1681,15 +1860,15 @@ def docker(i): if utils.compare_versions(current_cm_version, '2.3.8.1') >= 0: docker_params = utils.convert_dictionary(i['docker'], 'docker') i.update(docker_params) - del(i['docker']) + del (i['docker']) quiet = i.get('quiet', False) detached = i.get('docker_detached', '') - if detached=='': + if detached == '': detached = i.get('docker_dt', '') - if detached=='': - detached='no' + if detached == '': + detached = 'no' interactive = i.get('docker_interactive', '') if interactive == '': @@ -1700,17 +1879,24 @@ def docker(i): # Check simplified CMD: cm docker script "python app image-classification onnx" # If artifact has spaces, treat them as tags! - self_module.cmind.access({'action':'detect_tags_in_artifact', 'automation':'utils', 'input':i}) + self_module.cmind.access( + {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i}) - # CAREFUL -> artifacts and parsed_artifacts are not supported in input (and should not be?) - if 'artifacts' in i: del(i['artifacts']) - if 'parsed_artifacts' in i: del(i['parsed_artifacts']) + # CAREFUL -> artifacts and parsed_artifacts are not supported in input + # (and should not be?) + if 'artifacts' in i: + del (i['artifacts']) + if 'parsed_artifacts' in i: + del (i['parsed_artifacts']) # Prepare "clean" input to replicate command - r = self_module.cmind.access({'action':'prune_input', 'automation':'utils', 'input':i, 'extra_keys_starts_with':['docker_']}) + r = self_module.cmind.access({'action': 'prune_input', + 'automation': 'utils', + 'input': i, + 'extra_keys_starts_with': ['docker_']}) i_run_cmd_arc = r['new_input'] - env=i.get('env', {}) + env = i.get('env', {}) noregenerate_docker_file = i.get('docker_noregenerate', False) norecreate_docker_image = i.get('docker_norecreate', True) @@ -1726,18 +1912,20 @@ def docker(i): if docker_cfg != '' or docker_cfg_uid != '': # Check if docker_cfg is turned on but not selected - if type(docker_cfg) == bool or str(docker_cfg).lower() in ['true','yes']: - docker_cfg= '' - - r = self_module.cmind.access({'action':'select_cfg', - 'automation':'utils,dc2743f8450541e3', - 'tags':'basic,docker,configurations', - 'title':'docker', - 'alias':docker_cfg, - 'uid':docker_cfg_uid}) + if isinstance(docker_cfg, bool) or str( + docker_cfg).lower() in ['true', 'yes']: + docker_cfg = '' + + r = self_module.cmind.access({'action': 'select_cfg', + 'automation': 'utils,dc2743f8450541e3', + 'tags': 'basic,docker,configurations', + 'title': 'docker', + 'alias': docker_cfg, + 'uid': docker_cfg_uid}) if r['return'] > 0: if r['return'] == 16: - return {'return':1, 'error':'Docker configuration {} was not found'.format(docker_cfg)} + return {'return': 1, 'error': 'Docker configuration {} was not found'.format( + docker_cfg)} return r selection = r['selection'] @@ -1746,11 +1934,13 @@ def docker(i): i.update(docker_input_update) - ######################################################################################## + ########################################################################## # Run dockerfile if not noregenerate_docker_file: - r = utils.call_internal_module(self_module, __file__, 'module_misc', 'dockerfile', i) - if r['return']>0: return r + r = utils.call_internal_module( + self_module, __file__, 'module_misc', 'dockerfile', i) + if r['return'] > 0: + return r # Save current directory cur_dir = os.getcwd() @@ -1759,12 +1949,13 @@ def docker(i): # Search for script(s) r = aux_search({'self_module': self_module, 'input': i}) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - return {'return':1, 'error':'no scripts were found'} + if len(lst) == 0: + return {'return': 1, 'error': 'no scripts were found'} env['CM_RUN_STATE_DOCKER'] = False script_automation = i['self_module'] @@ -1773,40 +1964,40 @@ def docker(i): const_state = i.get('const_state', {}) tags_split = i.get('tags', '').split(",") - variation_tags = [ t[1:] for t in tags_split if t.startswith("_") ] + variation_tags = [t[1:] for t in tags_split if t.startswith("_")] docker_cache = i.get('docker_cache', "yes") - if docker_cache in ["no", False, "False" ]: + if docker_cache in ["no", False, "False"]: if 'CM_DOCKER_CACHE' not in env: env['CM_DOCKER_CACHE'] = docker_cache - image_repo = i.get('docker_image_repo','') + image_repo = i.get('docker_image_repo', '') if image_repo == '': image_repo = 'local' # Host system needs to have docker - r = self_module.cmind.access({'action':'run', - 'automation':'script', - 'tags': "get,docker"}) + r = self_module.cmind.access({'action': 'run', + 'automation': 'script', + 'tags': "get,docker"}) if r['return'] > 0: return r - for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')): + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): meta = artifact.meta - if i.get('help',False): - return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', {'meta':meta, 'path':artifact.path}) + if i.get('help', False): + return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', { + 'meta': meta, 'path': artifact.path}) script_path = artifact.path tags = meta.get("tags", []) - tag_string=",".join(tags) + tag_string = ",".join(tags) script_alias = meta.get('alias', '') script_uid = meta.get('uid', '') - mounts = copy.deepcopy(i.get('docker_mounts', [])) '''run_config_path = os.path.join(script_path,'run_config.yml') @@ -1822,22 +2013,54 @@ def docker(i): docker_settings = meta.get('docker', {}) state['docker'] = docker_settings # Todo: Support state, const and add_deps_recursive - run_state = {'deps':[], 'fake_deps':[], 'parent': None} + run_state = {'deps': [], 'fake_deps': [], 'parent': None} run_state['script_id'] = script_alias + "," + script_uid run_state['script_variation_tags'] = variation_tags add_deps_recursive = i.get('add_deps_recursive', {}) - r = script_automation.update_state_from_meta(meta, env, state, const, const_state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys = [], new_state_keys = [], run_state=run_state, i = i) + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) if r['return'] > 0: return r - r = script_automation._update_state_from_variations(i, meta, variation_tags, variations, env, state, const, const_state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys_from_meta = [], new_state_keys_from_meta = [], add_deps_recursive = add_deps_recursive, run_state = run_state, recursion_spaces='', verbose = False) + r = script_automation._update_state_from_variations( + i, + meta, + variation_tags, + variations, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys_from_meta=[], + new_state_keys_from_meta=[], + add_deps_recursive=add_deps_recursive, + run_state=run_state, + recursion_spaces='', + verbose=False) if r['return'] > 0: return r docker_settings = state['docker'] - if not docker_settings.get('run', True) and not i.get('docker_run_override', False): + if not docker_settings.get('run', True) and not i.get( + 'docker_run_override', False): print("docker.run set to False in _cm.json") continue ''' @@ -1849,30 +2072,64 @@ def docker(i): # Check if need to update/map/mount inputs and env r = process_inputs({'run_cmd_arc': i_run_cmd_arc, 'docker_settings': docker_settings, - 'mounts':mounts}) - if r['return']>0: return r + 'mounts': mounts}) + if r['return'] > 0: + return r i_run_cmd = r['run_cmd'] # Check if need to mount home directory current_path_target = '/cm-mount/current' - if docker_settings.get('mount_current_dir','')=='yes': - update_path_for_docker('.', mounts, force_path_target=current_path_target) - + if docker_settings.get('mount_current_dir', '') == 'yes': + update_path_for_docker( + '.', mounts, force_path_target=current_path_target) _os = i.get('docker_os', docker_settings.get('os', 'ubuntu')) - version = i.get('docker_os_version', docker_settings.get('os_version', '22.04')) + version = i.get( + 'docker_os_version', + docker_settings.get( + 'os_version', + '22.04')) build_deps = docker_settings.get('deps', []) deps = docker_settings.get('deps', []) deps = build_deps + deps if deps: - r = script_automation._run_deps(deps, [], env, {}, {}, {}, {}, '', [], '', False, '', verbose, show_time, ' ', run_state) + r = script_automation._run_deps( + deps, + [], + env, + {}, + {}, + {}, + {}, + '', + [], + '', + False, + '', + verbose, + show_time, + ' ', + run_state) if r['return'] > 0: return r - #For updating meta from update_meta_if_env - r = script_automation.update_state_from_meta(meta, env, state, const, const_state, deps = [], post_deps = [], prehook_deps = [], posthook_deps = [], new_env_keys = [], new_state_keys = [], run_state=run_state, i = i) + # For updating meta from update_meta_if_env + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) if r['return'] > 0: return r @@ -1881,36 +2138,42 @@ def docker(i): for key in docker_settings.get('mounts', []): mounts.append(key) - # Updating environment variables from CM input based on input_mapping from meta + # Updating environment variables from CM input based on input_mapping + # from meta input_mapping = meta.get('input_mapping', {}) for c_input in input_mapping: if c_input in i: env[input_mapping[c_input]] = i[c_input] - #del(i[c_input]) + # del(i[c_input]) - # Updating environment variables from CM input based on docker_input_mapping from meta + # Updating environment variables from CM input based on + # docker_input_mapping from meta docker_input_mapping = docker_settings.get('docker_input_mapping', {}) for c_input in docker_input_mapping: if c_input in i: env[docker_input_mapping[c_input]] = i[c_input] - #del(i[c_input]) + # del(i[c_input]) - container_env_string = '' # env keys corresponding to container mounts are explicitly passed to the container run cmd + # env keys corresponding to container mounts are explicitly passed to + # the container run cmd + container_env_string = '' for index in range(len(mounts)): mount = mounts[index] # Since windows may have 2 :, we search from the right j = mount.rfind(':') - if j>0: - mount_parts = [mount[:j], mount[j+1:]] + if j > 0: + mount_parts = [mount[:j], mount[j + 1:]] else: - return {'return':1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)} + return { + 'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)} # mount_parts = mount.split(":") # if len(mount_parts) != 2: -# return {'return': 1, 'error': f'Invalid mount specified in docker settings'} +# return {'return': 1, 'error': f'Invalid mount specified in docker +# settings'} host_mount = mount_parts[0] new_host_mount = host_mount @@ -1925,7 +2188,7 @@ def docker(i): if tmp_value in env: host_env_key = tmp_value new_host_mount = get_host_path(env[tmp_value]) - else:# we skip those mounts + else: # we skip those mounts mounts[index] = None skip = True break @@ -1935,10 +2198,11 @@ def docker(i): for tmp_value in tmp_values: container_env_key = tmp_value if tmp_value in env: - new_container_mount, new_container_mount_env = get_container_path(env[tmp_value]) + new_container_mount, new_container_mount_env = get_container_path( + env[tmp_value]) container_env_key = new_container_mount_env - #container_env_string += " --env.{}={} ".format(tmp_value, new_container_mount_env) - else:# we skip those mounts + # container_env_string += " --env.{}={} ".format(tmp_value, new_container_mount_env) + else: # we skip those mounts mounts[index] = None skip = True break @@ -1947,9 +2211,10 @@ def docker(i): if skip: continue - mounts[index] = new_host_mount+":"+new_container_mount + mounts[index] = new_host_mount + ":" + new_container_mount if host_env_key: - container_env_string += " --env.{}={} ".format(host_env_key, container_env_key) + container_env_string += " --env.{}={} ".format( + host_env_key, container_env_key) for v in docker_input_mapping: if docker_input_mapping[v] == host_env_key: @@ -1958,10 +2223,21 @@ def docker(i): mounts = list(filter(lambda item: item is not None, mounts)) - mount_string = "" if len(mounts)==0 else ",".join(mounts) - - #check for proxy settings and pass onto the docker - proxy_keys = [ "ftp_proxy", "FTP_PROXY", "http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY", "socks_proxy", "SOCKS_PROXY", "GH_TOKEN" ] + mount_string = "" if len(mounts) == 0 else ",".join(mounts) + + # check for proxy settings and pass onto the docker + proxy_keys = [ + "ftp_proxy", + "FTP_PROXY", + "http_proxy", + "HTTP_PROXY", + "https_proxy", + "HTTPS_PROXY", + "no_proxy", + "NO_PROXY", + "socks_proxy", + "SOCKS_PROXY", + "GH_TOKEN"] if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []: env['+ CM_DOCKER_BUILD_ARGS'] = [] @@ -1970,45 +2246,76 @@ def docker(i): if os.environ.get(key, '') != '': value = os.environ[key] container_env_string += " --env.{}={} ".format(key, value) - env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format(key, value)) - - docker_use_host_group_id = i.get('docker_use_host_group_id', docker_settings.get('use_host_group_id')) - if str(docker_use_host_group_id).lower() not in ['false', 'no', '0'] and os.name != 'nt': - env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format('GID', '\\" $(id -g $USER) \\"')) - - docker_use_host_user_id = i.get('docker_use_host_user_id', docker_settings.get('use_host_user_id')) - if str(docker_use_host_user_id).lower() not in ['false', 'no', '0'] and os.name != 'nt': - env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format('UID', '\\" $(id -u $USER) \\"')) - - docker_base_image = i.get('docker_base_image', docker_settings.get('base_image')) + env['+ CM_DOCKER_BUILD_ARGS'].append( + "{}={}".format(key, value)) + + docker_use_host_group_id = i.get( + 'docker_use_host_group_id', + docker_settings.get('use_host_group_id')) + if str(docker_use_host_group_id).lower() not in [ + 'false', 'no', '0'] and os.name != 'nt': + env['+ CM_DOCKER_BUILD_ARGS'].append( + "{}={}".format('GID', '\\" $(id -g $USER) \\"')) + + docker_use_host_user_id = i.get( + 'docker_use_host_user_id', + docker_settings.get('use_host_user_id')) + if str(docker_use_host_user_id).lower() not in [ + 'false', 'no', '0'] and os.name != 'nt': + env['+ CM_DOCKER_BUILD_ARGS'].append( + "{}={}".format('UID', '\\" $(id -u $USER) \\"')) + + docker_base_image = i.get( + 'docker_base_image', + docker_settings.get('base_image')) docker_os = i.get('docker_os', docker_settings.get('os', 'ubuntu')) - docker_os_version = i.get('docker_os_version', docker_settings.get('os_version', '22.04')) - image_tag_extra = i.get('docker_image_tag_extra', docker_settings.get('image_tag_extra', '-latest')) + docker_os_version = i.get( + 'docker_os_version', docker_settings.get( + 'os_version', '22.04')) + image_tag_extra = i.get( + 'docker_image_tag_extra', + docker_settings.get( + 'image_tag_extra', + '-latest')) if not docker_base_image: - dockerfilename_suffix = docker_os +'_'+docker_os_version + dockerfilename_suffix = docker_os + '_' + docker_os_version else: if os.name == 'nt': - dockerfilename_suffix = docker_base_image.replace('/', '-').replace(':','-') + dockerfilename_suffix = docker_base_image.replace( + '/', '-').replace(':', '-') else: dockerfilename_suffix = docker_base_image.split("/") - dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1] + dockerfilename_suffix = dockerfilename_suffix[len( + dockerfilename_suffix) - 1] - - cm_repo=i.get('docker_cm_repo', docker_settings.get('cm_repo', 'mlcommons@cm4mlops')) + cm_repo = i.get( + 'docker_cm_repo', + docker_settings.get( + 'cm_repo', + 'mlcommons@cm4mlops')) docker_path = i.get('docker_path', '').strip() if docker_path == '': docker_path = script_path - dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile') + dockerfile_path = os.path.join( + docker_path, + 'dockerfiles', + dockerfilename_suffix + + '.Dockerfile') # Skips docker run cmd and gives an interactive shell to the user - docker_skip_run_cmd = i.get('docker_skip_run_cmd', docker_settings.get('skip_run_cmd', False)) + docker_skip_run_cmd = i.get( + 'docker_skip_run_cmd', docker_settings.get( + 'skip_run_cmd', False)) - docker_pre_run_cmds = i.get('docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', []) + docker_pre_run_cmds = i.get( + 'docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', []) - docker_run_cmd_prefix = i.get('docker_run_cmd_prefix', docker_settings.get('run_cmd_prefix', '')) + docker_run_cmd_prefix = i.get( + 'docker_run_cmd_prefix', docker_settings.get( + 'run_cmd_prefix', '')) all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus')) @@ -2016,26 +2323,50 @@ def docker(i): device = i.get('docker_device', docker_settings.get('device')) - image_name = i.get('docker_image_name', docker_settings.get('image_name', '')) + image_name = i.get( + 'docker_image_name', + docker_settings.get( + 'image_name', + '')) r = check_gh_token(i, docker_settings, quiet) - if r['return'] >0 : return r + if r['return'] > 0: + return r gh_token = r['gh_token'] - - port_maps = i.get('docker_port_maps', docker_settings.get('port_maps', [])) - - shm_size = i.get('docker_shm_size', docker_settings.get('shm_size', '')) - - pass_user_id = i.get('docker_pass_user_id', docker_settings.get('pass_user_id', '')) - pass_user_group = i.get('docker_pass_user_group', docker_settings.get('pass_user_group', '')) - - extra_run_args = i.get('docker_extra_run_args', docker_settings.get('extra_run_args', '')) + port_maps = i.get( + 'docker_port_maps', + docker_settings.get( + 'port_maps', + [])) + + shm_size = i.get( + 'docker_shm_size', + docker_settings.get( + 'shm_size', + '')) + + pass_user_id = i.get( + 'docker_pass_user_id', + docker_settings.get( + 'pass_user_id', + '')) + pass_user_group = i.get( + 'docker_pass_user_group', + docker_settings.get( + 'pass_user_group', + '')) + + extra_run_args = i.get( + 'docker_extra_run_args', + docker_settings.get( + 'extra_run_args', + '')) if detached == '': detached = docker_settings.get('detached', '') - if str(docker_skip_run_cmd).lower() in ['true','1','yes']: + if str(docker_skip_run_cmd).lower() in ['true', '1', 'yes']: interactive = 'yes' elif interactive == '': interactive = docker_settings.get('interactive', '') @@ -2051,29 +2382,30 @@ def docker(i): # else: # run_cmd = "" - - - r = regenerate_script_cmd({'script_uid':script_uid, - 'script_alias':script_alias, - 'tags':tags, - 'run_cmd':i_run_cmd, - 'docker_settings':docker_settings, - 'docker_run_cmd_prefix':i.get('docker_run_cmd_prefix','')}) - if r['return']>0: return r - run_cmd = r['run_cmd_string'] + ' ' + container_env_string + ' --docker_run_deps ' + r = regenerate_script_cmd({'script_uid': script_uid, + 'script_alias': script_alias, + 'tags': tags, + 'run_cmd': i_run_cmd, + 'docker_settings': docker_settings, + 'docker_run_cmd_prefix': i.get('docker_run_cmd_prefix', '')}) + if r['return'] > 0: + return r + run_cmd = r['run_cmd_string'] + ' ' + \ + container_env_string + ' --docker_run_deps ' env['CM_RUN_STATE_DOCKER'] = True - if docker_settings.get('mount_current_dir','')=='yes': - run_cmd = 'cd '+current_path_target+' && '+run_cmd + if docker_settings.get('mount_current_dir', '') == 'yes': + run_cmd = 'cd ' + current_path_target + ' && ' + run_cmd - final_run_cmd = run_cmd if docker_skip_run_cmd not in [ 'yes', True, 'True' ] else 'cm version' + final_run_cmd = run_cmd if docker_skip_run_cmd not in [ + 'yes', True, 'True'] else 'cm version' - print ('') - print ('CM command line regenerated to be used inside Docker:') - print ('') - print (final_run_cmd) - print ('') + print('') + print('CM command line regenerated to be used inside Docker:') + print('') + print(final_run_cmd) + print('') docker_recreate_image = 'yes' if not norecreate_docker_image else 'no' @@ -2093,7 +2425,7 @@ def docker(i): 'interactive': interactive, 'mounts': mounts, 'image_name': image_name, -# 'image_tag': script_alias, + # 'image_tag': script_alias, 'image_tag_extra': image_tag_extra, 'detached': detached, 'script_tags': f"""{i.get('tags')}""", @@ -2107,7 +2439,7 @@ def docker(i): 'dockerfile': dockerfile_path } } - } + } if all_gpus: cm_docker_input['all_gpus'] = True @@ -2133,36 +2465,39 @@ def docker(i): if pass_user_group != '': cm_docker_input['pass_user_group'] = pass_user_group - if extra_run_args != '': cm_docker_input['extra_run_args'] = extra_run_args if i.get('docker_save_script', ''): cm_docker_input['save_script'] = i['docker_save_script'] - print ('') + print('') r = self_module.cmind.access(cm_docker_input) if r['return'] > 0: return r - - return {'return':0} + return {'return': 0} ############################################################ + + def check_gh_token(i, docker_settings, quiet): gh_token = i.get('docker_gh_token', '') if docker_settings.get('gh_token_required', False) and gh_token == '': - rx = {'return':1, 'error':'GH token is required but not provided. Use --docker_gh_token to set it'} + rx = { + 'return': 1, + 'error': 'GH token is required but not provided. Use --docker_gh_token to set it'} if quiet: return rx - print ('') - gh_token = input ('Enter GitHub token to access private CM repositories required for this CM script: ') + print('') + gh_token = input( + 'Enter GitHub token to access private CM repositories required for this CM script: ') if gh_token == '': return rx - return {'return':0, 'gh_token': gh_token} + return {'return': 0, 'gh_token': gh_token} diff --git a/automation/script/template-ae-python/customize.py b/automation/script/template-ae-python/customize.py index d12f9b3e1d..273999d460 100644 --- a/automation/script/template-ae-python/customize.py +++ b/automation/script/template-ae-python/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/automation/script/template-ae-python/main.py b/automation/script/template-ae-python/main.py index d851f1450f..caa499bf08 100644 --- a/automation/script/template-ae-python/main.py +++ b/automation/script/template-ae-python/main.py @@ -2,9 +2,9 @@ if __name__ == "__main__": - print ('') - print ('Main script:') - print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) - print ('') + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') exit(0) diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py index 1982792527..625b643d44 100644 --- a/automation/script/template-python/customize.py +++ b/automation/script/template-python/customize.py @@ -1,10 +1,11 @@ from cmind import utils import os + def preprocess(i): - print ('') - print ('Preprocessing ...') + print('') + print('Preprocessing ...') os_info = i['os_info'] @@ -16,15 +17,16 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) + print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', ''))) + + return {'return': 0} - return {'return':0} def postprocess(i): - print ('') - print ('Postprocessing ...') + print('') + print('Postprocessing ...') env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/automation/script/template-python/main.py b/automation/script/template-python/main.py index 9ba7bb751d..e3302f36fa 100644 --- a/automation/script/template-python/main.py +++ b/automation/script/template-python/main.py @@ -2,9 +2,9 @@ if __name__ == "__main__": - print ('') - print ('Main script:') - print ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) - print ('') + print('') + print('Main script:') + print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', ''))) + print('') exit(0) diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py index 1982792527..625b643d44 100644 --- a/automation/script/template-pytorch/customize.py +++ b/automation/script/template-pytorch/customize.py @@ -1,10 +1,11 @@ from cmind import utils import os + def preprocess(i): - print ('') - print ('Preprocessing ...') + print('') + print('Preprocessing ...') os_info = i['os_info'] @@ -16,15 +17,16 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) + print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', ''))) + + return {'return': 0} - return {'return':0} def postprocess(i): - print ('') - print ('Postprocessing ...') + print('') + print('Postprocessing ...') env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/automation/script/template-pytorch/main.py b/automation/script/template-pytorch/main.py index 3e49da450f..217aed3b9d 100644 --- a/automation/script/template-pytorch/main.py +++ b/automation/script/template-pytorch/main.py @@ -4,12 +4,12 @@ if __name__ == "__main__": - print ('') - print ('Main script:') - print ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) - print ('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA',''))) - print ('') - print ('PyTorch version: {}'.format(torch.__version__)) - print ('') + print('') + print('Main script:') + print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', ''))) + print('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA', ''))) + print('') + print('PyTorch version: {}'.format(torch.__version__)) + print('') exit(0) diff --git a/automation/script/template/customize.py b/automation/script/template/customize.py index d12f9b3e1d..273999d460 100644 --- a/automation/script/template/customize.py +++ b/automation/script/template/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/automation/utils/module.py b/automation/utils/module.py index 1af7e600dc..2a4851b0ad 100644 --- a/automation/utils/module.py +++ b/automation/utils/module.py @@ -3,6 +3,7 @@ from cmind.automation import Automation from cmind import utils + class CAutomation(Automation): """ Automation actions @@ -47,11 +48,11 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + print(json.dumps(i, indent=2)) - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def get_host_os_info(self, i): """ Get some host platform name (currently windows or linux) and OS bits @@ -85,17 +86,17 @@ def get_host_os_info(self, i): if platform.system().lower().startswith('win'): platform = 'windows' - info['bat_ext']='.bat' - info['set_env']='set ${key}=${value}' - info['env_separator']=';' - info['env_var']='%env_var%' - info['bat_rem']='rem ${rem}' - info['run_local_bat']='call ${bat_file}' - info['run_local_bat_from_python']='call ${bat_file}' - info['run_bat']='call ${bat_file}' - info['start_script']=['@echo off', ''] - info['env']={ - "CM_WINDOWS":"yes" + info['bat_ext'] = '.bat' + info['set_env'] = 'set ${key}=${value}' + info['env_separator'] = ';' + info['env_var'] = '%env_var%' + info['bat_rem'] = 'rem ${rem}' + info['run_local_bat'] = 'call ${bat_file}' + info['run_local_bat_from_python'] = 'call ${bat_file}' + info['run_bat'] = 'call ${bat_file}' + info['start_script'] = ['@echo off', ''] + info['env'] = { + "CM_WINDOWS": "yes" } else: if platform.system().lower().startswith('darwin'): @@ -103,17 +104,17 @@ def get_host_os_info(self, i): else: platform = 'linux' - info['bat_ext']='.sh' - info['set_env']='export ${key}="${value}"' - info['env_separator']=':' - info['env_var']='${env_var}' - info['set_exec_file']='chmod 755 "${file_name}"' - info['bat_rem']='# ${rem}' - info['run_local_bat']='. ./${bat_file}' - info['run_local_bat_from_python']='bash -c ". ./${bat_file}"' - info['run_bat']='. ${bat_file}' - info['start_script']=['#!/bin/bash', ''] - info['env']={} + info['bat_ext'] = '.sh' + info['set_env'] = 'export ${key}="${value}"' + info['env_separator'] = ':' + info['env_var'] = '${env_var}' + info['set_exec_file'] = 'chmod 755 "${file_name}"' + info['bat_rem'] = '# ${rem}' + info['run_local_bat'] = '. ./${bat_file}' + info['run_local_bat_from_python'] = 'bash -c ". ./${bat_file}"' + info['run_bat'] = '. ${bat_file}' + info['start_script'] = ['#!/bin/bash', ''] + info['env'] = {} info['platform'] = platform @@ -122,10 +123,12 @@ def get_host_os_info(self, i): obits = '32' if platform == 'windows': # Trying to get fast way to detect bits - if os.environ.get('ProgramW6432', '') != '' or os.environ.get('ProgramFiles(x86)', '') != '': # pragma: no cover + if os.environ.get('ProgramW6432', '') != '' or os.environ.get( + 'ProgramFiles(x86)', '') != '': # pragma: no cover obits = '64' else: - # On Linux use first getconf LONG_BIT and if doesn't work use python bits + # On Linux use first getconf LONG_BIT and if doesn't work use + # python bits obits = pbits @@ -135,25 +138,26 @@ def get_host_os_info(self, i): fn = r['file_name'] - cmd = 'getconf LONG_BIT > '+fn + cmd = 'getconf LONG_BIT > ' + fn rx = os.system(cmd) if rx == 0: - r = utils.load_txt(file_name = fn, remove_after_read = True) + r = utils.load_txt(file_name=fn, remove_after_read=True) if r['return'] == 0: s = r['string'].strip() if len(s) > 0 and len(s) < 4: obits = s else: - if os.path.isfile(fn): os.remove(fn) + if os.path.isfile(fn): + os.remove(fn) info['bits'] = obits info['python_bits'] = pbits return {'return': 0, 'info': info} - ############################################################################## + ########################################################################## def download_file(self, i): """ Download file using requests @@ -190,14 +194,14 @@ def download_file(self, i): url = i['url'] # Check file name - file_name = i.get('filename','') + file_name = i.get('filename', '') if file_name == '': parsed_url = parse.urlparse(url) file_name = os.path.basename(parsed_url.path) # Check path - path = i.get('path','') - if path is None or path=='': + path = i.get('path', '') + if path is None or path == '': path = os.getcwd() # Output file @@ -206,15 +210,15 @@ def download_file(self, i): if os.path.isfile(path_to_file): os.remove(path_to_file) - print ('Downloading to {}'.format(path_to_file)) - print ('') + print('Downloading to {}'.format(path_to_file)) + print('') # Download size = -1 downloaded = 0 chunk_size = i.get('chunk_size', 65536) - text = i.get('text','Downloaded: ') + text = i.get('text', 'Downloaded: ') if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ: verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes' @@ -228,22 +232,23 @@ def download_file(self, i): size_string = download.headers.get('Content-Length') if size_string is None: - transfer_encoding = download.headers.get('Transfer-Encoding', '') + transfer_encoding = download.headers.get( + 'Transfer-Encoding', '') if transfer_encoding != 'chunked': - return {'return':1, 'error':'did not receive file'} + return {'return': 1, 'error': 'did not receive file'} else: size_string = "0" size = int(size_string) with open(path_to_file, 'wb') as output: - for chunk in download.iter_content(chunk_size = chunk_size): + for chunk in download.iter_content(chunk_size=chunk_size): if chunk: output.write(chunk) if size == 0: continue - downloaded+=1 + downloaded += 1 percent = downloaded * chunk_size * 100 / size sys.stdout.write("\r{}{:3.0f}%".format(text, percent)) @@ -253,16 +258,17 @@ def download_file(self, i): sys.stdout.flush() except Exception as e: - return {'return':1, 'error':format(e)} + return {'return': 1, 'error': format(e)} - print ('') + print('') if size == 0: - file_stats=os.stat(path_to_file) + file_stats = os.stat(path_to_file) size = file_stats.st_size - return {'return': 0, 'filename':file_name, 'path': path_to_file, 'size':size} + return {'return': 0, 'filename': file_name, + 'path': path_to_file, 'size': size} - ############################################################################## + ########################################################################## def unzip_file(self, i): """ Unzip file @@ -288,7 +294,8 @@ def unzip_file(self, i): file_name = i['filename'] if not os.path.isfile(file_name): - return {'return':1, 'error':'file {} not found'.format(file_name)} + return {'return': 1, + 'error': 'file {} not found'.format(file_name)} console = i.get('out') == 'con' @@ -296,24 +303,25 @@ def unzip_file(self, i): file_name_handle = open(file_name, 'rb') file_name_zip = zipfile.ZipFile(file_name_handle) - info_files=file_name_zip.infolist() + info_files = file_name_zip.infolist() - path=i.get('path','') - if path is None or path=='': - path=os.getcwd() + path = i.get('path', '') + if path is None or path == '': + path = os.getcwd() - strip_folders = i.get('strip_folders',0) + strip_folders = i.get('strip_folders', 0) # Unpacking zip for info in info_files: f = info.filename permissions = info.external_attr - if not f.startswith('..') and not f.startswith('/') and not f.startswith('\\'): + if not f.startswith('..') and not f.startswith( + '/') and not f.startswith('\\'): f_zip = f - if strip_folders>0: - fsplit = f.split('/') # Zip standard on all OS + if strip_folders > 0: + fsplit = f.split('/') # Zip standard on all OS f = '/'.join(fsplit[strip_folders:]) file_path = os.path.join(path, f) @@ -338,9 +346,9 @@ def unzip_file(self, i): file_name_zip.close() file_name_handle.close() - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def compare_versions(self, i): """ Compare versions @@ -386,9 +394,9 @@ def compare_versions(self, i): comparison = -1 break - return {'return':0, 'comparison': comparison} + return {'return': 0, 'comparison': comparison} - ############################################################################## + ########################################################################## def json2yaml(self, i): """ Convert JSON file to YAML @@ -405,28 +413,31 @@ def json2yaml(self, i): * (error) (str): error string if return>0 """ - input_file = i.get('input','') + input_file = i.get('input', '') if input_file == '': - return {'return':1, 'error':'please specify --input={json file}'} + return {'return': 1, 'error': 'please specify --input={json file}'} - output_file = i.get('output','') + output_file = i.get('output', '') - r = utils.load_json(input_file, check_if_exists = True) - if r['return']>0: return r + r = utils.load_json(input_file, check_if_exists=True) + if r['return'] > 0: + return r meta = r['meta'] - if output_file=='': - output_file = input_file[:-5] if input_file.endswith('.json') else input_file - output_file+='.yaml' + if output_file == '': + output_file = input_file[:- + 5] if input_file.endswith('.json') else input_file + output_file += '.yaml' r = utils.save_yaml(output_file, meta) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def yaml2json(self, i): """ Convert YAML file to JSON @@ -443,28 +454,31 @@ def yaml2json(self, i): * (error) (str): error string if return>0 """ - input_file = i.get('input','') + input_file = i.get('input', '') if input_file == '': - return {'return':1, 'error':'please specify --input={yaml file}'} + return {'return': 1, 'error': 'please specify --input={yaml file}'} - output_file = i.get('output','') + output_file = i.get('output', '') - r = utils.load_yaml(input_file, check_if_exists = True) - if r['return']>0: return r + r = utils.load_yaml(input_file, check_if_exists=True) + if r['return'] > 0: + return r meta = r['meta'] - if output_file=='': - output_file = input_file[:-5] if input_file.endswith('.yaml') else input_file - output_file+='.json' + if output_file == '': + output_file = input_file[:- + 5] if input_file.endswith('.yaml') else input_file + output_file += '.json' r = utils.save_json(output_file, meta) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def sort_json(self, i): """ Sort JSON file @@ -481,27 +495,29 @@ def sort_json(self, i): * (error) (str): error string if return>0 """ - input_file = i.get('input','') + input_file = i.get('input', '') if input_file == '': - return {'return':1, 'error':'please specify --input={json file}'} + return {'return': 1, 'error': 'please specify --input={json file}'} - r = utils.load_json(input_file, check_if_exists = True) - if r['return']>0: return r + r = utils.load_json(input_file, check_if_exists=True) + if r['return'] > 0: + return r meta = r['meta'] - output_file = i.get('output','') + output_file = i.get('output', '') - if output_file=='': + if output_file == '': output_file = input_file r = utils.save_json(output_file, meta, sort_keys=True) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def dos2unix(self, i): """ Convert DOS file to UNIX (remove \r) @@ -518,27 +534,29 @@ def dos2unix(self, i): * (error) (str): error string if return>0 """ - input_file = i.get('input','') + input_file = i.get('input', '') if input_file == '': - return {'return':1, 'error':'please specify --input={txt file}'} + return {'return': 1, 'error': 'please specify --input={txt file}'} - r = utils.load_txt(input_file, check_if_exists = True) - if r['return']>0: return r + r = utils.load_txt(input_file, check_if_exists=True) + if r['return'] > 0: + return r - s = r['string'].replace('\r','') + s = r['string'].replace('\r', '') - output_file = i.get('output','') + output_file = i.get('output', '') - if output_file=='': + if output_file == '': output_file = input_file r = utils.save_txt(output_file, s) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def replace_string_in_file(self, i): """ Convert DOS file to UNIX (remove \r) @@ -561,34 +579,38 @@ def replace_string_in_file(self, i): input_file = i.get('input', '') if input_file == '': - return {'return':1, 'error':'please specify --input={txt file}'} + return {'return': 1, 'error': 'please specify --input={txt file}'} string = i.get('string', '') if string == '': - return {'return':1, 'error':'please specify --string={string to replace}'} + return {'return': 1, + 'error': 'please specify --string={string to replace}'} replacement = i.get('replacement', '') if replacement == '': - return {'return':1, 'error':'please specify --replacement={string to replace}'} + return {'return': 1, + 'error': 'please specify --replacement={string to replace}'} - output_file = i.get('output','') + output_file = i.get('output', '') - if output_file=='': + if output_file == '': output_file = input_file - r = utils.load_txt(input_file, check_if_exists = True) - if r['return']>0: return r + r = utils.load_txt(input_file, check_if_exists=True) + if r['return'] > 0: + return r - s = r['string'].replace('\r','') + s = r['string'].replace('\r', '') s = s.replace(string, replacement) r = utils.save_txt(output_file, s) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def create_toc_from_md(self, i): """ Convert DOS file to UNIX (remove \r) @@ -607,15 +629,16 @@ def create_toc_from_md(self, i): input_file = i.get('input', '') if input_file == '': - return {'return':1, 'error':'please specify --input={txt file}'} + return {'return': 1, 'error': 'please specify --input={txt file}'} - output_file = i.get('output','') + output_file = i.get('output', '') - if output_file=='': + if output_file == '': output_file = input_file + '.toc' - r = utils.load_txt(input_file, check_if_exists = True) - if r['return']>0: return r + r = utils.load_txt(input_file, check_if_exists=True) + if r['return'] > 0: + return r lines = r['string'].split('\n') @@ -630,33 +653,34 @@ def create_toc_from_md(self, i): if line.startswith('#'): j = line.find(' ') - if j>=0: + if j >= 0: title = line[j:].strip() - x = title.lower().replace(' ','-') + x = title.lower().replace(' ', '-') - for k in range(0,2): + for k in range(0, 2): if x.startswith('*'): - x=x[1:] + x = x[1:] if x.endswith('*'): - x=x[:-1] + x = x[:-1] for z in [':', '+', '.', '(', ')', ',']: x = x.replace(z, '') - y = ' '*(2*(j-1)) + '* ['+title+'](#'+x+')' + y = ' ' * (2 * (j - 1)) + '* [' + title + '](#' + x + ')' toc.append(y) toc.append('') toc.append('
') - r = utils.save_txt(output_file, '\n'.join(toc)+'\n') - if r['return']>0: return r + r = utils.save_txt(output_file, '\n'.join(toc) + '\n') + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def copy_to_clipboard(self, i): """ Copy string to a clipboard @@ -674,9 +698,10 @@ def copy_to_clipboard(self, i): * (error) (str): error string if return>0 """ - s = i.get('string','') + s = i.get('string', '') - if i.get('add_quotes',False): s='"'+s+'"' + if i.get('add_quotes', False): + s = '"' + s + '"' failed = False warning = '' @@ -724,17 +749,17 @@ def copy_to_clipboard(self, i): failed = True warning = format(e) - rr = {'return':0} + rr = {'return': 0} if failed: - if not i.get('skip_fail',False): - return {'return':1, 'error':warning} + if not i.get('skip_fail', False): + return {'return': 1, 'error': warning} - rr['warning']=warning + rr['warning'] = warning return rr - ############################################################################## + ########################################################################## def list_files_recursively(self, i): """ List files and concatenate into string separate by comma @@ -754,20 +779,21 @@ def list_files_recursively(self, i): for (dir_path, dir_names, file_names) in files: for f in file_names: - if s!='': s+=',' + if s != '': + s += ',' - if dir_path=='.': - dir_path2='' + if dir_path == '.': + dir_path2 = '' else: - dir_path2=dir_path[2:].replace('\\','/')+'/' + dir_path2 = dir_path[2:].replace('\\', '/') + '/' - s+=dir_path2+f + s += dir_path2 + f - print (s) + print(s) - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def generate_secret(self, i): """ Generate secret for web apps @@ -786,11 +812,11 @@ def generate_secret(self, i): import secrets s = secrets.token_urlsafe(16) - print (s) + print(s) - return {'return':0, 'secret': s} + return {'return': 0, 'secret': s} - ############################################################################## + ########################################################################## def detect_tags_in_artifact(self, i): """ Detect if there are tags in an artifact name (spaces) and update input @@ -808,18 +834,19 @@ def detect_tags_in_artifact(self, i): inp = i['input'] - artifact = inp.get('artifact','') + artifact = inp.get('artifact', '') if artifact == '.': - del(inp['artifact']) - elif ' ' in artifact: # or ',' in artifact: - del(inp['artifact']) - if 'parsed_artifact' in inp: del(inp['parsed_artifact']) + del (inp['artifact']) + elif ' ' in artifact: # or ',' in artifact: + del (inp['artifact']) + if 'parsed_artifact' in inp: + del (inp['parsed_artifact']) # Force substitute tags - inp['tags']=artifact.replace(' ',',') + inp['tags'] = artifact.replace(' ', ',') - return {'return':0} + return {'return': 0} - ############################################################################## + ########################################################################## def prune_input(self, i): """ Leave only input keys and remove the rest (to regenerate CM commands) @@ -842,12 +869,13 @@ def prune_input(self, i): import copy inp = i['input'] - extra_keys = i.get('extra_keys_starts_with',[]) + extra_keys = i.get('extra_keys_starts_with', []) i_run_cmd_arc = copy.deepcopy(inp) for k in inp: remove = False - if k in ['action', 'automation', 'cmd', 'out', 'parsed_automation', 'parsed_artifact', 'self_module']: + if k in ['action', 'automation', 'cmd', 'out', + 'parsed_automation', 'parsed_artifact', 'self_module']: remove = True if not remove: for ek in extra_keys: @@ -856,12 +884,12 @@ def prune_input(self, i): break if remove: - del(i_run_cmd_arc[k]) + del (i_run_cmd_arc[k]) - return {'return':0, 'new_input':i_run_cmd_arc} + return {'return': 0, 'new_input': i_run_cmd_arc} + ########################################################################## - ############################################################################## def uid(self, i): """ Generate CM UID. @@ -883,12 +911,12 @@ def uid(self, i): r = utils.gen_uid() if console: - print (r['uid']) + print(r['uid']) return r + ########################################################################## - ############################################################################## def system(self, i): """ Run system command and redirect output to string. @@ -916,32 +944,34 @@ def system(self, i): cmd = i['cmd'] if cmd == '': - return {'return':1, 'error': 'cmd is empty'} + return {'return': 1, 'error': 'cmd is empty'} - path = i.get('path','') - if path!='' and os.path.isdir(path): + path = i.get('path', '') + if path != '' and os.path.isdir(path): cur_dir = os.getcwd() os.chdir(path) - if i.get('stdout','')!='': - fn1=i['stdout'] + if i.get('stdout', '') != '': + fn1 = i['stdout'] fn1_delete = False else: r = utils.gen_tmp_file({}) - if r['return'] > 0: return r + if r['return'] > 0: + return r fn1 = r['file_name'] fn1_delete = True - if i.get('stderr','')!='': - fn2=i['stderr'] + if i.get('stderr', '') != '': + fn2 = i['stderr'] fn2_delete = False else: r = utils.gen_tmp_file({}) - if r['return'] > 0: return r + if r['return'] > 0: + return r fn2 = r['file_name'] fn2_delete = True - cmd += ' > '+fn1 + ' 2> '+fn2 + cmd += ' > ' + fn1 + ' 2> ' + fn2 rx = os.system(cmd) std = '' @@ -949,22 +979,26 @@ def system(self, i): stderr = '' if os.path.isfile(fn1): - r = utils.load_txt(file_name = fn1, remove_after_read = fn1_delete) - if r['return'] == 0: stdout = r['string'].strip() + r = utils.load_txt(file_name=fn1, remove_after_read=fn1_delete) + if r['return'] == 0: + stdout = r['string'].strip() if os.path.isfile(fn2): - r = utils.load_txt(file_name = fn2, remove_after_read = fn2_delete) - if r['return'] == 0: stderr = r['string'].strip() + r = utils.load_txt(file_name=fn2, remove_after_read=fn2_delete) + if r['return'] == 0: + stderr = r['string'].strip() std = stdout - if stderr!='': - if std!='': std+='\n' - std+=stderr + if stderr != '': + if std != '': + std += '\n' + std += stderr - if path!='' and os.path.isdir(path): + if path != '' and os.path.isdir(path): os.chdir(cur_dir) - return {'return':0, 'ret':rx, 'stdout':stdout, 'stderr':stderr, 'std':std} + return {'return': 0, 'ret': rx, 'stdout': stdout, + 'stderr': stderr, 'std': std} ############################################################ def load_cfg(self, i): @@ -983,7 +1017,8 @@ def load_cfg(self, i): """ - return utils.call_internal_module(self, __file__, 'module_cfg', 'load_cfg', i) + return utils.call_internal_module( + self, __file__, 'module_cfg', 'load_cfg', i) ############################################################ def select_cfg(self, i): @@ -1005,7 +1040,8 @@ def select_cfg(self, i): i['self_module'] = self - return utils.call_internal_module(self, __file__, 'module_cfg', 'select_cfg', i) + return utils.call_internal_module( + self, __file__, 'module_cfg', 'select_cfg', i) ############################################################ def print_yaml(self, i): @@ -1026,17 +1062,18 @@ def print_yaml(self, i): filename = i.get('file', '') if filename == '': - return {'return':1, 'error':'please specify --file={YAML file}'} + return {'return': 1, 'error': 'please specify --file={YAML file}'} - r = utils.load_yaml(filename,check_if_exists = True) - if r['return']>0: return r + r = utils.load_yaml(filename, check_if_exists=True) + if r['return'] > 0: + return r meta = r['meta'] import json - print (json.dumps(meta, indent=2)) + print(json.dumps(meta, indent=2)) - return {'return':0} + return {'return': 0} ############################################################ def print_json(self, i): @@ -1057,14 +1094,15 @@ def print_json(self, i): filename = i.get('file', '') if filename == '': - return {'return':1, 'error':'please specify --file={JSON file}'} + return {'return': 1, 'error': 'please specify --file={JSON file}'} - r = utils.load_json(filename,check_if_exists = True) - if r['return']>0: return r + r = utils.load_json(filename, check_if_exists=True) + if r['return'] > 0: + return r meta = r['meta'] import json - print (json.dumps(meta, indent=2)) + print(json.dumps(meta, indent=2)) - return {'return':0} + return {'return': 0} diff --git a/automation/utils/module_cfg.py b/automation/utils/module_cfg.py index 9e58d6ab47..36ec30915c 100644 --- a/automation/utils/module_cfg.py +++ b/automation/utils/module_cfg.py @@ -2,37 +2,40 @@ import cmind import copy -base_path={} -base_path_meta={} +base_path = {} +base_path_meta = {} + +########################################################################## + -################################################################################## def load_cfg(i): - tags = i.get('tags','') - artifact = i.get('artifact','') + tags = i.get('tags', '') + artifact = i.get('artifact', '') key = i.get('key', '') key_end = i.get('key_end', []) - ii={'action':'find', - 'automation':'cfg'} - if artifact!='': - ii['artifact']=artifact - elif tags!='': - ii['tags']=tags + ii = {'action': 'find', + 'automation': 'cfg'} + if artifact != '': + ii['artifact'] = artifact + elif tags != '': + ii['tags'] = tags - r=cmind.access(ii) - if r['return']>0: return r + r = cmind.access(ii) + if r['return'] > 0: + return r lst = r['list'] - prune = i.get('prune',{}) + prune = i.get('prune', {}) prune_key = prune.get('key', '') prune_key_uid = prune.get('key_uid', '') prune_meta_key = prune.get('meta_key', '') prune_meta_key_uid = prune.get('meta_key_uid', '') prune_uid = prune.get('uid', '') - prune_list = prune.get('list',[]) + prune_list = prune.get('list', []) # Checking individual files inside CM entry selection = [] @@ -42,11 +45,11 @@ def load_cfg(i): meta = l.meta full_path = l.path - meta['full_path']=full_path + meta['full_path'] = full_path add = True - if prune_key!='' and prune_key_uid!='': + if prune_key != '' and prune_key_uid != '': if prune_key_uid not in meta.get(prune_key, []): add = False @@ -60,25 +63,26 @@ def load_cfg(i): skip = False - if prune_meta_key!='' and prune_meta_key_uid!='': + if prune_meta_key != '' and prune_meta_key_uid != '': if prune_meta_key_uid not in main_meta.get(prune_meta_key, []): skip = True if skip: continue - all_tags = main_meta.get('tags',[]) + all_tags = main_meta.get('tags', []) files = os.listdir(path) for f in files: - if key!='' and not f.startswith(key): + if key != '' and not f.startswith(key): continue - if f.startswith('_') or (not f.endswith('.json') and not f.endswith('.yaml')): + if f.startswith('_') or (not f.endswith( + '.json') and not f.endswith('.yaml')): continue - if len(key_end)>0: + if len(key_end) > 0: skip = True for ke in key_end: if f.endswith(ke): @@ -92,14 +96,15 @@ def load_cfg(i): full_path_without_ext = full_path[:-5] r = cmind.utils.load_yaml_and_json(full_path_without_ext) - if r['return']>0: - print ('Warning: problem loading file {}'.format(full_path)) + if r['return'] > 0: + print('Warning: problem loading file {}'.format(full_path)) else: meta = r['meta'] # Check base r = process_base(meta, full_path) - if r['return']>0: return r + if r['return'] > 0: + return r meta = r['meta'] uid = meta['uid'] @@ -107,43 +112,48 @@ def load_cfg(i): # Check pruning add = True - if len(prune)>0: - if prune_uid!='' and uid != prune_uid: + if len(prune) > 0: + if prune_uid != '' and uid != prune_uid: add = False - if add and len(prune_list)>0 and uid not in prune_list: + if add and len( + prune_list) > 0 and uid not in prune_list: add = False - if add and prune_key!='' and prune_key_uid!='' and prune_key_uid != meta.get(prune_key, None): + if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get( + prune_key, None): add = False if add: - meta['full_path']=full_path + meta['full_path'] = full_path add_all_tags = copy.deepcopy(all_tags) - name = meta.get('name','') - if name=='': - name = ' '.join(meta.get('tags',[])) + name = meta.get('name', '') + if name == '': + name = ' '.join(meta.get('tags', [])) name = name.strip() meta['name'] = name file_tags = meta.get('tags', '').strip() - if file_tags=='': - if name!='': - add_all_tags += [v.lower() for v in name.split(' ')] + if file_tags == '': + if name != '': + add_all_tags += [v.lower() + for v in name.split(' ')] else: add_all_tags += file_tags.split(',') - meta['all_tags']=add_all_tags + meta['all_tags'] = add_all_tags - meta['main_meta']=main_meta + meta['main_meta'] = main_meta selection.append(meta) - return {'return':0, 'lst':lst, 'selection':selection} + return {'return': 0, 'lst': lst, 'selection': selection} + +########################################################################## + -################################################################################## def process_base(meta, full_path): global base_path, base_path_meta @@ -156,7 +166,8 @@ def process_base(meta, full_path): full_path_base = os.path.dirname(full_path) if not filename.endswith('.yaml') and not filename.endswith('.json'): - return {'return':1, 'error':'_base file {} in {} must be .yaml or .json'.format(filename, full_path)} + return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format( + filename, full_path)} if ':' in _base: x = _base.split(':') @@ -166,16 +177,18 @@ def process_base(meta, full_path): if full_path_base == '': # Find artifact - r = cmind.access({'action':'find', - 'automation':'cfg', - 'artifact':name}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'cfg', + 'artifact': name}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: + if len(lst) == 0: if not os.path.isfile(path): - return {'return':1, 'error':'_base artifact {} not found in {}'.format(name, full_path)} + return {'return': 1, 'error': '_base artifact {} not found in {}'.format( + name, full_path)} full_path_base = lst[0].path @@ -187,7 +200,8 @@ def process_base(meta, full_path): path = os.path.join(full_path_base, filename) if not os.path.isfile(path): - return {'return':1, 'error':'_base file {} not found in {}'.format(filename, full_path)} + return {'return': 1, 'error': '_base file {} not found in {}'.format( + filename, full_path)} if path in base_path_meta: base = copy.deepcopy(base_path_meta[path]) @@ -195,36 +209,37 @@ def process_base(meta, full_path): path_without_ext = path[:-5] r = cmind.utils.load_yaml_and_json(path_without_ext) - if r['return']>0: return r + if r['return'] > 0: + return r base = r['meta'] - base_path_meta[path]=copy.deepcopy(base) + base_path_meta[path] = copy.deepcopy(base) for k in meta: v = meta[k] if k not in base: - base[k]=v + base[k] = v else: if isinstance(v, str): # Only merge a few special keys and overwrite the rest - if k in ['tags','name']: + if k in ['tags', 'name']: base[k] += meta[k] else: base[k] = meta[k] - elif type(v) == list: - for vv in v: + +elif isinstance(v, elif) for vv in v: base[k].append(vv) - elif type(v) == dict: - base[k].merge(v) +elif isinstance(v, elif ) base[k].merge(v) meta = base - return {'return':0, 'meta':meta} + return {'return': 0, 'meta':meta} + +########################################################################## -################################################################################## def select_cfg(i): self_module = i['self_module'] @@ -234,8 +249,9 @@ def select_cfg(i): title = i.get('title', '') # Check if alias is not provided - r = self_module.cmind.access({'action':'find', 'automation':'cfg', 'tags':'basic,docker,configurations'}) - if r['return'] > 0: return r + r = self_module.cmind.access({'action': 'find', 'automation':'cfg', 'tags':'basic,docker,configurations'}) + if r['return'] > 0: + return r lst = r['list'] @@ -247,18 +263,18 @@ def select_cfg(i): if alias != '': for ext in ['.json', '.yaml']: - p1 = os.path.join(p, alias+ext) + p1 = os.path.join(p, alias +ext) if os.path.isfile(p1): - selector.append({'path':p1, 'alias':alias}) + selector.append({'path': p1, 'alias':alias}) break else: files = os.listdir(p) for f in files: - if not f.startswith('_cm') and (f.endswith('.json') or f.endswith('.yaml')): - selector.append({'path':os.path.join(p, f), 'alias':f[:-5]}) - + if not f.startswith('_cm') and ( + f.endswith('.json') or f.endswith('.yaml')): + selector.append({'path': os.path.join(p, f), 'alias':f[:-5]}) # Load meta for name and UID selector_with_meta = [] @@ -270,8 +286,8 @@ def select_cfg(i): full_path_without_ext = path[:-5] r = cmind.utils.load_yaml_and_json(full_path_without_ext) - if r['return']>0: - print ('Warning: problem loading configuration file {}'.format(path)) + if r['return'] >0: + print('Warning: problem loading configuration file {}'.format(path)) meta = r['meta'] @@ -281,17 +297,17 @@ def select_cfg(i): # Quit if no configurations found if len(selector_with_meta) == 0: - return {'return':16, 'error':'configuration was not found'} + return {'return': 16, 'error':'configuration was not found'} select = 0 if len(selector_with_meta) > 1: - xtitle = ' ' + title if title!='' else '' - print ('') - print ('Available{} configurations:'.format(xtitle)) + xtitle = ' ' + title if title != '' else '' + print('') + print('Available{} configurations:'.format(xtitle)) - print ('') + print('') - selector_with_meta = sorted(selector_with_meta, key = lambda x: x['meta'].get('name','')) + selector_with_meta = sorted(selector_with_meta, key = lambda x: x['meta'].get('name', '')) s = 0 for ss in selector_with_meta: alias = ss['alias'] @@ -299,23 +315,25 @@ def select_cfg(i): name = ss['meta'].get('name', '') x = name - if x!='': x+=' ' + if x!='': + x+=' ' x += '(' + uid + ')' - print (f'{s}) {x}'.format(s, x)) + print(f'{s}) {x}'.format(s, x)) - s+=1 + s += 1 - print ('') - select = input ('Enter configuration number of press Enter for 0: ') + print('') + select = input('Enter configuration number of press Enter for 0: ') - if select.strip() == '': select = '0' + if select.strip() == '': + select = '0' select = int(select) - if select<0 or select>=len(selector): - return {'return':1, 'error':'selection is out of range'} + if select <0 or select>=len(selector): + return {'return': 1, 'error':'selection is out of range'} ss = selector_with_meta[select] - return {'return':0, 'selection':ss} + return {'return': 0, 'selection':ss} diff --git a/debug.py b/debug.py index 9fd54803c3..db6003d731 100644 --- a/debug.py +++ b/debug.py @@ -5,5 +5,6 @@ print(sys.executable) -r = cmind.access('run script "print hello-world python" --debug_uid=f52670e5f3f345a2') +r = cmind.access( + 'run script "print hello-world python" --debug_uid=f52670e5f3f345a2') print(r) diff --git a/get_git_version.py b/get_git_version.py index 4b55cc2c82..0524f014ba 100644 --- a/get_git_version.py +++ b/get_git_version.py @@ -1,5 +1,6 @@ import subprocess + def get_git_commit_hash(): try: commit_hash = subprocess.check_output( @@ -10,5 +11,6 @@ def get_git_commit_hash(): except Exception: return "unknown" + if __name__ == "__main__": print(get_git_commit_hash()) diff --git a/git_commit_hash.txt b/git_commit_hash.txt index 98c6c6eef7..8a5eebd913 100644 --- a/git_commit_hash.txt +++ b/git_commit_hash.txt @@ -1 +1 @@ -8768d67e1f9187005bdb3c7f325e42998dc7fd8a +680b1c5e6efc432df34f77731911e3977069c99c diff --git a/script/activate-python-venv/customize.py b/script/activate-python-venv/customize.py index 938a016a05..5858212f93 100644 --- a/script/activate-python-venv/customize.py +++ b/script/activate-python-venv/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,17 +14,18 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - name = env.get('CM_NAME','') + name = env.get('CM_NAME', '') if name != '': name = name.strip().lower() - r = automation.update_deps({'deps':meta['prehook_deps'], - 'update_deps':{ - 'python-venv':{ - 'name':name - } - } - }) - if r['return']>0: return r - - return {'return':0} + r = automation.update_deps({'deps': meta['prehook_deps'], + 'update_deps': { + 'python-venv': { + 'name': name + } + } + }) + if r['return'] > 0: + return r + + return {'return': 0} diff --git a/script/add-custom-nvidia-system/customize.py b/script/add-custom-nvidia-system/customize.py index e9573338b1..dbab0597ea 100644 --- a/script/add-custom-nvidia-system/customize.py +++ b/script/add-custom-nvidia-system/customize.py @@ -2,16 +2,18 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -19,4 +21,4 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py index 1009beb13b..21b57daca2 100644 --- a/script/app-image-classification-onnx-py/customize.py +++ b/script/app-image-classification-onnx-py/customize.py @@ -2,6 +2,7 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] @@ -10,7 +11,8 @@ def preprocess(i): # print ('') # print ('Running preprocess function in customize.py ...') - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -25,41 +27,40 @@ def postprocess(i): # Saving predictions to JSON file to current directory # Should work with "cm docker script" ? - data = state.get('cm_app_image_classification_onnx_py',{}) + data = state.get('cm_app_image_classification_onnx_py', {}) fjson = 'cm-image-classification-onnx-py.json' fyaml = 'cm-image-classification-onnx-py.yaml' - output=env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT','') - if output!='': + output = env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '') + if output != '': if not os.path.exists(output): os.makedirs(output) - fjson=os.path.join(output, fjson) - fyaml=os.path.join(output, fyaml) + fjson = os.path.join(output, fjson) + fyaml = os.path.join(output, fyaml) try: import json with open(fjson, 'w', encoding='utf-8') as f: json.dump(data, f, ensure_ascii=False, indent=4) except Exception as e: - print ('CM warning: {}'.format(e)) - + print('CM warning: {}'.format(e)) try: import yaml with open(fyaml, 'w', encoding='utf-8') as f: yaml.dump(data, f) except Exception as e: - print ('CM warning: {}'.format(e)) + print('CM warning: {}'.format(e)) - top_classification = data.get('top_classification','') + top_classification = data.get('top_classification', '') - if env.get('CM_TMP_SILENT','')!='yes': - if top_classification!='': - print ('') + if env.get('CM_TMP_SILENT', '') != 'yes': + if top_classification != '': + print('') x = 'Top classification: {}'.format(top_classification) - print ('='*len(x)) - print (x) + print('=' * len(x)) + print(x) - return {'return':0} + return {'return': 0} diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py index 8057c39b5a..c2c5a6ceb0 100644 --- a/script/app-image-classification-onnx-py/src/onnx_classify.py +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 -# Extended by Grigori Fursin to support MLCommons CM workflow automation language +# Extended by Grigori Fursin to support MLCommons CM workflow automation +# language import os import onnxruntime as rt @@ -10,21 +11,27 @@ from PIL import Image -model_path = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH'] -input_layer_name = os.environ['CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME'] -output_layer_name = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME'] -normalize_data_bool = os.getenv('CK_ENV_ONNX_MODEL_NORMALIZE_DATA', '0') in ('YES', 'yes', 'ON', 'on', '1') -subtract_mean_bool = os.getenv('CK_ENV_ONNX_MODEL_SUBTRACT_MEAN', '0') in ('YES', 'yes', 'ON', 'on', '1') -given_channel_means = os.getenv('ML_MODEL_GIVEN_CHANNEL_MEANS','') +model_path = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH'] +input_layer_name = os.environ['CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME'] +output_layer_name = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME'] +normalize_data_bool = os.getenv( + 'CK_ENV_ONNX_MODEL_NORMALIZE_DATA', '0') in ( + 'YES', 'yes', 'ON', 'on', '1') +subtract_mean_bool = os.getenv( + 'CK_ENV_ONNX_MODEL_SUBTRACT_MEAN', '0') in ( + 'YES', 'yes', 'ON', 'on', '1') +given_channel_means = os.getenv('ML_MODEL_GIVEN_CHANNEL_MEANS', '') if given_channel_means: - given_channel_means = np.array(given_channel_means.split(' '), dtype=np.float32) + given_channel_means = np.array( + given_channel_means.split(' '), + dtype=np.float32) -imagenet_path = os.environ['CK_ENV_DATASET_IMAGENET_VAL'] -labels_path = os.environ['CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] -data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] -batch_size = int( os.environ['CK_BATCH_SIZE'] ) -batch_count = int( os.environ['CK_BATCH_COUNT'] ) -CPU_THREADS = int(os.getenv('CK_HOST_CPU_NUMBER_OF_PROCESSORS',0)) +imagenet_path = os.environ['CK_ENV_DATASET_IMAGENET_VAL'] +labels_path = os.environ['CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] +data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] +batch_size = int(os.environ['CK_BATCH_SIZE']) +batch_count = int(os.environ['CK_BATCH_COUNT']) +CPU_THREADS = int(os.getenv('CK_HOST_CPU_NUMBER_OF_PROCESSORS', 0)) def load_labels(labels_filepath): @@ -36,16 +43,17 @@ def load_labels(labels_filepath): def load_and_resize_image(image_filepath, height, width): - pillow_img = Image.open(image_filepath).resize((width, height)) # sic! The order of dimensions in resize is (W,H) + # sic! The order of dimensions in resize is (W,H) + pillow_img = Image.open(image_filepath).resize((width, height)) # Grigori fixed below - #input_data = np.float32(pillow_img) - input_data=np.asarray(pillow_img) - input_data=np.asarray(input_data, np.float32) + # input_data = np.float32(pillow_img) + input_data = np.asarray(pillow_img) + input_data = np.asarray(input_data, np.float32) # Normalize if normalize_data_bool: - input_data = input_data/127.5 - 1.0 + input_data = input_data / 127.5 - 1.0 # Subtract mean value if subtract_mean_bool: @@ -61,7 +69,7 @@ def load_and_resize_image(image_filepath, height, width): # print(nhwc_data.shape) return nhwc_data else: - nchw_data = nhwc_data.transpose(0,3,1,2) + nchw_data = nhwc_data.transpose(0, 3, 1, 2) # print(nchw_data.shape) return nchw_data @@ -70,38 +78,47 @@ def load_a_batch(batch_filenames): unconcatenated_batch_data = [] for image_filename in batch_filenames: image_filepath = image_filename - nchw_data = load_and_resize_image( image_filepath, height, width ) - unconcatenated_batch_data.append( nchw_data ) + nchw_data = load_and_resize_image(image_filepath, height, width) + unconcatenated_batch_data.append(nchw_data) batch_data = np.concatenate(unconcatenated_batch_data, axis=0) return batch_data - -#print("Device: " + rt.get_device()) - +# print("Device: " + rt.get_device()) sess_options = rt.SessionOptions() if CPU_THREADS > 0: sess_options.enable_sequential_execution = False sess_options.session_thread_pool_size = CPU_THREADS -if len(rt.get_all_providers()) > 1 and os.environ.get("USE_CUDA", "yes").lower() not in [ "0", "false", "off", "no" ]: - #Currently considering only CUDAExecutionProvider - sess = rt.InferenceSession(model_path, sess_options, providers=['CUDAExecutionProvider']) +if len(rt.get_all_providers()) > 1 and os.environ.get( + "USE_CUDA", "yes").lower() not in ["0", "false", "off", "no"]: + # Currently considering only CUDAExecutionProvider + sess = rt.InferenceSession( + model_path, + sess_options, + providers=['CUDAExecutionProvider']) else: - sess = rt.InferenceSession(model_path, sess_options, providers=["CPUExecutionProvider"]) - -input_layer_names = [ x.name for x in sess.get_inputs() ] # FIXME: check that input_layer_name belongs to this list -input_layer_name = input_layer_name or input_layer_names[0] - -output_layer_names = [ x.name for x in sess.get_outputs() ] # FIXME: check that output_layer_name belongs to this list -output_layer_name = output_layer_name or output_layer_names[0] - -model_input_shape = sess.get_inputs()[0].shape -model_classes = sess.get_outputs()[1].shape[1] -labels = load_labels(labels_path) -bg_class_offset = model_classes-len(labels) # 1 means the labels represent classes 1..1000 and the background class 0 has to be skipped + sess = rt.InferenceSession( + model_path, + sess_options, + providers=["CPUExecutionProvider"]) + +# FIXME: check that input_layer_name belongs to this list +input_layer_names = [x.name for x in sess.get_inputs()] +input_layer_name = input_layer_name or input_layer_names[0] + +# FIXME: check that output_layer_name belongs to this list +output_layer_names = [x.name for x in sess.get_outputs()] +output_layer_name = output_layer_name or output_layer_names[0] + +model_input_shape = sess.get_inputs()[0].shape +model_classes = sess.get_outputs()[1].shape[1] +labels = load_labels(labels_path) +# 1 means the labels represent classes 1..1000 and the background class 0 +# has to be skipped +bg_class_offset = model_classes - len(labels) if data_layout == 'NHWC': (samples, height, width, channels) = model_input_shape @@ -109,9 +126,9 @@ def load_a_batch(batch_filenames): (samples, channels, height, width) = model_input_shape print("") -print("Data layout: {}".format(data_layout) ) -print("Input layers: {}".format([ str(x) for x in sess.get_inputs()])) -print("Output layers: {}".format([ str(x) for x in sess.get_outputs()])) +print("Data layout: {}".format(data_layout)) +print("Input layers: {}".format([str(x) for x in sess.get_inputs()])) +print("Output layers: {}".format([str(x) for x in sess.get_outputs()])) print("Input layer name: " + input_layer_name) print("Expected input shape: {}".format(model_input_shape)) print("Output layer name: " + output_layer_name) @@ -126,26 +143,36 @@ def load_a_batch(batch_filenames): start_time = time.time() for batch_idx in range(batch_count): - print ('') - print ("Batch {}/{}:".format(batch_idx+1, batch_count)) - - batch_filenames = [ imagenet_path + '/' + "ILSVRC2012_val_00000{:03d}.JPEG".format(starting_index + batch_idx*batch_size + i) for i in range(batch_size) ] + print('') + print("Batch {}/{}:".format(batch_idx + 1, batch_count)) + + batch_filenames = [ + imagenet_path + + '/' + + "ILSVRC2012_val_00000{:03d}.JPEG".format( + starting_index + + batch_idx * + batch_size + + i) for i in range(batch_size)] # Grigori: trick to test models: - if os.environ.get('CM_IMAGE','')!='': - batch_filenames=[os.environ['CM_IMAGE']] + if os.environ.get('CM_IMAGE', '') != '': + batch_filenames = [os.environ['CM_IMAGE']] - batch_data = load_a_batch( batch_filenames ) - #print(batch_data.shape) + batch_data = load_a_batch(batch_filenames) + # print(batch_data.shape) - batch_predictions = sess.run([output_layer_name], {input_layer_name: batch_data})[0] + batch_predictions = sess.run( + [output_layer_name], { + input_layer_name: batch_data})[0] - cm_status = {'classifications':[]} + cm_status = {'classifications': []} - print ('') + print('') top_classification = '' for in_batch_idx in range(batch_size): - softmax_vector = batch_predictions[in_batch_idx][bg_class_offset:] # skipping the background class on the left (if present) + # skipping the background class on the left (if present) + softmax_vector = batch_predictions[in_batch_idx][bg_class_offset:] top5_indices = list(reversed(softmax_vector.argsort()))[:5] print(' * ' + batch_filenames[in_batch_idx] + ' :') @@ -154,14 +181,18 @@ def load_a_batch(batch_filenames): if top_classification == '': top_classification = labels[class_idx] - print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) + print( + "\t{}\t{}\t{}".format( + class_idx, + softmax_vector[class_idx], + labels[class_idx])) - cm_status['classifications'].append({'class_idx':int(class_idx), + cm_status['classifications'].append({'class_idx': int(class_idx), 'softmax': float(softmax_vector[class_idx]), - 'label':labels[class_idx]}) + 'label': labels[class_idx]}) - print ('') - print ('Top classification: {}'.format(top_classification)) + print('') + print('Top classification: {}'.format(top_classification)) cm_status['top_classification'] = top_classification avg_time = (time.time() - start_time) / batch_count @@ -169,4 +200,5 @@ def load_a_batch(batch_filenames): # Record cm_status to embedded it into CM workflows with open('tmp-run-state.json', 'w') as cm_file: - cm_file.write(json.dumps({'cm_app_image_classification_onnx_py':cm_status}, sort_keys=True, indent=2)) + cm_file.write(json.dumps( + {'cm_app_image_classification_onnx_py': cm_status}, sort_keys=True, indent=2)) diff --git a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py index 1afe3271b8..863b3a6513 100644 --- a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py +++ b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py @@ -11,23 +11,35 @@ import torchvision.models as models import imagenet_helper -from imagenet_helper import (load_preprocessed_batch, image_list, class_labels, BATCH_SIZE) +from imagenet_helper import ( + load_preprocessed_batch, + image_list, + class_labels, + BATCH_SIZE) -## Writing the results out: +# Writing the results out: # -RESULTS_DIR = os.getenv('CM_RESULTS_DIR') -FULL_REPORT = os.getenv('CM_SILENT_MODE', '0') in ('NO', 'no', 'OFF', 'off', '0') - -## Processing by batches: +RESULTS_DIR = os.getenv('CM_RESULTS_DIR') +FULL_REPORT = os.getenv( + 'CM_SILENT_MODE', + '0') in ( + 'NO', + 'no', + 'OFF', + 'off', + '0') + +# Processing by batches: # -BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1)) +BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1)) -## Enabling GPU if available and not disabled: +# Enabling GPU if available and not disabled: # -USE_CUDA = (os.getenv('USE_CUDA', '').strip()=='yes') +USE_CUDA = (os.getenv('USE_CUDA', '').strip() == 'yes') + +labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] -labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] def load_labels(labels_filepath): my_labels = [] @@ -37,11 +49,10 @@ def load_labels(labels_filepath): return my_labels -labels = load_labels(labels_path) - +labels = load_labels(labels_path) -data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] +data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] def main(): @@ -50,7 +61,7 @@ def main(): setup_time_begin = time.time() - bg_class_offset=0 + bg_class_offset = 0 # Cleanup results directory if os.path.isdir(RESULTS_DIR): @@ -60,7 +71,7 @@ def main(): # Load the [cached] Torch model path_to_model_pth = os.environ['CM_ML_MODEL_FILE_WITH_PATH'] - model=models.resnet50(pretrained=False) + model = models.resnet50(pretrained=False) model.load_state_dict(torch.load(path_to_model_pth)) model.eval() @@ -79,22 +90,23 @@ def main(): first_classification_time = 0 images_loaded = 0 - image_path = os.environ.get('CM_INPUT','') - if image_path !='': + image_path = os.environ.get('CM_INPUT', '') + if image_path != '': - normalize_data_bool=True - subtract_mean_bool=False + normalize_data_bool = True + subtract_mean_bool = False from PIL import Image def load_and_resize_image(image_filepath, height, width): - pillow_img = Image.open(image_filepath).resize((width, height)) # sic! The order of dimensions in resize is (W,H) + pillow_img = Image.open(image_filepath).resize( + (width, height)) # sic! The order of dimensions in resize is (W,H) input_data = np.float32(pillow_img) # Normalize if normalize_data_bool: - input_data = input_data/127.5 - 1.0 + input_data = input_data / 127.5 - 1.0 # Subtract mean value if subtract_mean_bool: @@ -110,27 +122,27 @@ def load_and_resize_image(image_filepath, height, width): # print(nhwc_data.shape) return nhwc_data else: - nchw_data = nhwc_data.transpose(0,3,1,2) + nchw_data = nhwc_data.transpose(0, 3, 1, 2) # print(nchw_data.shape) return nchw_data - BATCH_COUNT=1 - + BATCH_COUNT = 1 for batch_index in range(BATCH_COUNT): - batch_number = batch_index+1 + batch_number = batch_index + 1 if FULL_REPORT or (batch_number % 10 == 0): print("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) begin_time = time.time() - if image_path=='': - batch_data, image_index = load_preprocessed_batch(image_list, image_index) + if image_path == '': + batch_data, image_index = load_preprocessed_batch( + image_list, image_index) else: batch_data = load_and_resize_image(image_path, 224, 224) image_index = 1 - torch_batch = torch.from_numpy( batch_data ) + torch_batch = torch.from_numpy(batch_data) load_time = time.time() - begin_time total_load_time += load_time @@ -146,7 +158,7 @@ def load_and_resize_image(image_filepath, height, width): torch_batch = torch_batch.to('cuda') with torch.no_grad(): - batch_results = model( torch_batch ) + batch_results = model(torch_batch) classification_time = time.time() - begin_time if FULL_REPORT: @@ -159,7 +171,8 @@ def load_and_resize_image(image_filepath, height, width): # Process results for index_in_batch in range(BATCH_SIZE): - softmax_vector = batch_results[index_in_batch][bg_class_offset:] # skipping the background class on the left (if present) + # skipping the background class on the left (if present) + softmax_vector = batch_results[index_in_batch][bg_class_offset:] global_index = batch_index * BATCH_SIZE + index_in_batch res_file = os.path.join(RESULTS_DIR, image_list[global_index]) @@ -170,14 +183,18 @@ def load_and_resize_image(image_filepath, height, width): top5_indices = list(reversed(softmax_vector.argsort()))[:5] for class_idx in top5_indices: - print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) + print( + "\t{}\t{}\t{}".format( + class_idx, + softmax_vector[class_idx], + labels[class_idx])) print("") - test_time = time.time() - test_time_begin if BATCH_COUNT > 1: - avg_classification_time = (total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE) + avg_classification_time = ( + total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE) else: avg_classification_time = total_classification_time / images_loaded diff --git a/script/app-image-classification-tvm-onnx-py/src/classify.py b/script/app-image-classification-tvm-onnx-py/src/classify.py index 1dbea81431..20c1642889 100644 --- a/script/app-image-classification-tvm-onnx-py/src/classify.py +++ b/script/app-image-classification-tvm-onnx-py/src/classify.py @@ -16,7 +16,6 @@ import onnxruntime as rt - # Image conversion from MLPerf(tm) vision def center_crop(img, out_height, out_width): height, width, _ = img.shape @@ -28,7 +27,8 @@ def center_crop(img, out_height, out_width): return img -def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR): +def resize_with_aspectratio( + img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR): height, width, _ = img.shape new_height = int(100. * out_height / scale) new_width = int(100. * out_width / scale) @@ -50,37 +50,45 @@ def get_top5(all_probs): prob = all_probs[class_index] probs_with_classes.append((prob, class_index)) - sorted_probs = sorted(probs_with_classes, key = lambda pair: pair[0], reverse=True) + sorted_probs = sorted( + probs_with_classes, + key=lambda pair: pair[0], + reverse=True) return sorted_probs[0:5] + def run_case(dtype, image, target): - # Check image + # Check image import os import json import sys - STAT_REPEAT=os.environ.get('STAT_REPEAT','') - if STAT_REPEAT=='' or STAT_REPEAT==None: - STAT_REPEAT=10 - STAT_REPEAT=int(STAT_REPEAT) + STAT_REPEAT = os.environ.get('STAT_REPEAT', '') + if STAT_REPEAT == '' or STAT_REPEAT is None: + STAT_REPEAT = 10 + STAT_REPEAT = int(STAT_REPEAT) # FGG: set model files via CM env CATEG_FILE = 'synset.txt' synset = eval(open(os.path.join(CATEG_FILE)).read()) - files=[] - val={} + files = [] + val = {} # FGG: set timers import time - timers={} + timers = {} img_orig = cv2.imread(image) img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB) output_height, output_width, _ = 224, 224, 3 - img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_AREA) + img = resize_with_aspectratio( + img, + output_height, + output_width, + inter_pol=cv2.INTER_AREA) img = center_crop(img, output_height, output_width) img = np.asarray(img, dtype='float32') @@ -93,34 +101,35 @@ def run_case(dtype, image, target): import matplotlib.pyplot as plt img1 = img.transpose([1, 2, 0]) - arr_ = np.squeeze(img1) # you can give axis attribute if you wanna squeeze in specific dimension + # you can give axis attribute if you wanna squeeze in specific dimension + arr_ = np.squeeze(img1) plt.imshow(arr_) # plt.show() plt.savefig('pre-processed-image.png') # Load model - model_path=os.environ.get('CM_ML_MODEL_FILE_WITH_PATH','') - if model_path=='': - print ('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') + model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', '') + if model_path == '': + print('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') exit(1) opt = rt.SessionOptions() - if len(rt.get_all_providers()) > 1 and os.environ.get("USE_CUDA", "yes").lower() not in [ "0", "false", "off", "no" ]: - #Currently considering only CUDAExecutionProvider - sess = rt.InferenceSession(model_path, opt, providers=['CUDAExecutionProvider']) + if len(rt.get_all_providers()) > 1 and os.environ.get( + "USE_CUDA", "yes").lower() not in ["0", "false", "off", "no"]: + # Currently considering only CUDAExecutionProvider + sess = rt.InferenceSession( + model_path, opt, providers=['CUDAExecutionProvider']) else: - sess = rt.InferenceSession(model_path, opt, providers=["CPUExecutionProvider"]) + sess = rt.InferenceSession( + model_path, opt, providers=["CPUExecutionProvider"]) inputs = [meta.name for meta in sess.get_inputs()] outputs = [meta.name for meta in sess.get_outputs()] - print (inputs) - print (outputs) - - + print(inputs) + print(outputs) - - if os.environ.get('USE_TVM','')=='yes': + if os.environ.get('USE_TVM', '') == 'yes': import tvm from tvm import relay import onnx @@ -128,15 +137,15 @@ def run_case(dtype, image, target): del sess # Load model via ONNX to be used with TVM - print ('') - print ('ONNX: load model ...') - print ('') + print('') + print('ONNX: load model ...') + print('') onnx_model = onnx.load(model_path) # Init TVM # TBD: add tvm platform selector - if os.environ.get('USE_CUDA','')=='yes': + if os.environ.get('USE_CUDA', '') == 'yes': # TVM package must be built with CUDA enabled ctx = tvm.cuda(0) else: @@ -146,13 +155,14 @@ def run_case(dtype, image, target): build_conf = {'relay.backend.use_auto_scheduler': False} opt_lvl = int(os.environ.get('TVM_OPT_LEVEL', 3)) host = os.environ.get('CM_HOST_PLATFORM_FLAVOR') - if host == 'x86_64' and 'AMD' in os.environ.get('CM_HOST_CPU_VENDOR_ID',''): + if host == 'x86_64' and 'AMD' in os.environ.get( + 'CM_HOST_CPU_VENDOR_ID', ''): target = os.environ.get('TVM_TARGET', 'llvm -mcpu=znver2') else: target = os.environ.get('TVM_TARGET', 'llvm') - target_host=None - params={} + target_host = None + params = {} # New target API tvm_target = tvm.target.Target(target, host=target_host) @@ -160,30 +170,30 @@ def run_case(dtype, image, target): input_shape = (1, 3, 224, 224) shape_dict = {inputs[0]: input_shape} - print ('') - print ('TVM: import model ...') - print ('') + print('') + print('TVM: import model ...') + print('') # Extra param: opset=12 - mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True) + mod, params = relay.frontend.from_onnx( + onnx_model, shape_dict, freeze_params=True) - print ('') - print ('TVM: transform to static ...') - print ('') + print('') + print('TVM: transform to static ...') + print('') mod = relay.transform.DynamicToStatic()(mod) - print ('') - print ('TVM: apply extra optimizations ...') - print ('') + print('') + print('TVM: apply extra optimizations ...') + print('') # Padding optimization # Adds extra optimizations mod = relay.transform.FoldExplicitPadding()(mod) + print('') + print('TVM: build model ...') + print('') - print ('') - print ('TVM: build model ...') - print ('') - - executor=os.environ.get('MLPERF_TVM_EXECUTOR','graph') + executor = os.environ.get('MLPERF_TVM_EXECUTOR', 'graph') if executor == "graph" or executor == "debug": from tvm.contrib import graph_executor @@ -195,29 +205,28 @@ def run_case(dtype, image, target): params=params) lib = graph_module - print ('') - print ('TVM: init graph engine ...') - print ('') + print('') + print('TVM: init graph engine ...') + print('') sess = graph_executor.GraphModule(lib['default'](ctx)) - elif executor == "vm": from tvm.runtime.vm import VirtualMachine # Without history with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf): - vm_exec = relay.vm.compile(mod, target=tvm_target, params=params) + vm_exec = relay.vm.compile( + mod, target=tvm_target, params=params) r_exec = vm_exec - print ('') - print ('TVM: init VM ...') - print ('') + print('') + print('TVM: init VM ...') + print('') sess = VirtualMachine(r_exec, ctx) - # For now only graph sess.set_input(inputs[0], tvm.nd.array([img])) @@ -229,36 +238,31 @@ def run_case(dtype, image, target): for i in range(sess.get_num_outputs()): # Take only the output of batch size for dynamic batches - if len(output)<(i+1): + if len(output) < (i + 1): output.append([]) output[i].append(sess.get_output(i).asnumpy()[0]) - - else: - inp={inputs[0]:np.array([img], dtype=np.float32)} - output=sess.run(outputs, inp) - + inp = {inputs[0]: np.array([img], dtype=np.float32)} + output = sess.run(outputs, inp) + top1 = np.argmax(output[1]) - 1 # .asnumpy()) + top5 = [] + atop5 = get_top5(output[1][0]) # .asnumpy()) - top1 = np.argmax(output[1])-1 #.asnumpy()) - - top5=[] - atop5 = get_top5(output[1][0]) #.asnumpy()) - - print ('') + print('') print('Prediction Top1:', top1, synset[top1]) - print ('') + print('') print('Prediction Top5:') for p in atop5: - out=p[1]-1 - name=synset[out] - print (' * {} {}'.format(out, name)) + out = p[1] - 1 + name = synset[out] + print(' * {} {}'.format(out, name)) - ck_results={ - 'prediction':synset[top1] + ck_results = { + 'prediction': synset[top1] } with open('tmp-ck-timer.json', 'w') as ck_results_file: @@ -266,14 +270,20 @@ def run_case(dtype, image, target): return + if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('--image', type=str, help="Path to JPEG image.", default=None, required=True) + parser.add_argument( + '--image', + type=str, + help="Path to JPEG image.", + default=None, + required=True) parser.add_argument('--target', type=str, help="Target", default=None) args = parser.parse_args() - if args.image.strip().lower()=='': - print ('Please specify path to an image using CM_IMAGE environment variable!') + if args.image.strip().lower() == '': + print('Please specify path to an image using CM_IMAGE environment variable!') exit(1) # set parameter @@ -285,8 +295,8 @@ def run_case(dtype, image, target): data_shape = (batch_size,) + image_shape out_shape = (batch_size, num_classes) - dtype='float32' - if os.environ.get('CM_TVM_DTYPE','')!='': - dtype=os.environ['CM_TVM_DTYPE'] + dtype = 'float32' + if os.environ.get('CM_TVM_DTYPE', '') != '': + dtype = os.environ['CM_TVM_DTYPE'] run_case(dtype, args.image, args.target) diff --git a/script/app-image-corner-detection/customize.py b/script/app-image-corner-detection/customize.py index 88d65d1534..de1e344eeb 100644 --- a/script/app-image-corner-detection/customize.py +++ b/script/app-image-corner-detection/customize.py @@ -1,14 +1,15 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - script_path=i['run_script_input']['path'] + script_path = i['run_script_input']['path'] env["CM_SOURCE_FOLDER_PATH"] = script_path - env['CM_C_SOURCE_FILES']="susan.c" + env['CM_C_SOURCE_FILES'] = "susan.c" if 'CM_INPUT' not in env: env['CM_INPUT'] = os.path.join(script_path, 'data.pgm') @@ -18,24 +19,25 @@ def preprocess(i): if 'CM_RUN_DIR' not in env: output_path = os.path.join(script_path, "output") - if output_path!='' and not os.path.isdir(output_path): + if output_path != '' and not os.path.isdir(output_path): os.makedirs(output_path) env['CM_RUN_DIR'] = output_path - env['CM_RUN_SUFFIX']= env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c' + env['CM_RUN_SUFFIX'] = env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c' if os_info['platform'] == 'windows': - env['CM_BIN_NAME']='image-corner.exe' + env['CM_BIN_NAME'] = 'image-corner.exe' else: - env['CM_BIN_NAME']='image-corner' + env['CM_BIN_NAME'] = 'image-corner' env['+ LDCFLAGS'] = ["-lm"] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR']) - return {'return':0} + return {'return': 0} diff --git a/script/app-loadgen-generic-python/customize.py b/script/app-loadgen-generic-python/customize.py index 5d67a3da13..17c923552a 100644 --- a/script/app-loadgen-generic-python/customize.py +++ b/script/app-loadgen-generic-python/customize.py @@ -4,64 +4,67 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] env = i['env'] - if 'CM_ML_MODEL_FILE_WITH_PATH' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} run_opts = env.get('CM_RUN_OPTS', '') if env.get('CM_MLPERF_BACKEND', '') != '': - run_opts +=" -b "+env['CM_MLPERF_BACKEND'] + run_opts += " -b " + env['CM_MLPERF_BACKEND'] if env.get('CM_MLPERF_RUNNER', '') != '': - run_opts +=" -r "+env['CM_MLPERF_RUNNER'] + run_opts += " -r " + env['CM_MLPERF_RUNNER'] if env.get('CM_MLPERF_CONCURRENCY', '') != '': - run_opts +=" --concurrency "+env['CM_MLPERF_CONCURRENCY'] + run_opts += " --concurrency " + env['CM_MLPERF_CONCURRENCY'] if env.get('CM_MLPERF_EXECUTION_PROVIDER', '') != '': - run_opts +=" --ep "+env['CM_MLPERF_EXECUTION_PROVIDER'] + run_opts += " --ep " + env['CM_MLPERF_EXECUTION_PROVIDER'] if env.get('CM_MLPERF_INTRAOP', '') != '': - run_opts +=" --intraop "+env['CM_MLPERF_INTRAOP'] + run_opts += " --intraop " + env['CM_MLPERF_INTRAOP'] if env.get('CM_MLPERF_INTEROP', '') != '': - run_opts +=" --interop "+env['CM_MLPERF_INTEROP'] + run_opts += " --interop " + env['CM_MLPERF_INTEROP'] if env.get('CM_MLPERF_EXECMODE', '') != '': - run_opts +=" --execmode "+env['CM_MLPERF_EXECUTION_MODE'] + run_opts += " --execmode " + env['CM_MLPERF_EXECUTION_MODE'] if env.get('CM_MLPERF_LOADGEN_SAMPLES', '') != '': - run_opts +=" --samples "+env['CM_MLPERF_LOADGEN_SAMPLES'] + run_opts += " --samples " + env['CM_MLPERF_LOADGEN_SAMPLES'] if env.get('CM_MLPERF_LOADGEN_EXPECTED_QPS', '') != '': - run_opts +=" --loadgen_expected_qps "+env['CM_MLPERF_LOADGEN_EXPECTED_QPS'] + run_opts += " --loadgen_expected_qps " + \ + env['CM_MLPERF_LOADGEN_EXPECTED_QPS'] if env.get('CM_MLPERF_LOADGEN_DURATION_SEC', '') != '': - run_opts +=" --loadgen_duration_sec "+env['CM_MLPERF_LOADGEN_DURATION_SEC'] + run_opts += " --loadgen_duration_sec " + \ + env['CM_MLPERF_LOADGEN_DURATION_SEC'] if env.get('CM_MLPERF_OUTPUT_DIR', '') != '': - run_opts +=" --output "+env['CM_MLPERF_OUTPUT_DIR'] + run_opts += " --output " + env['CM_MLPERF_OUTPUT_DIR'] if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '': - run_opts +=" --model_code "+env['CM_ML_MODEL_CODE_WITH_PATH'] - + run_opts += " --model_code " + env['CM_ML_MODEL_CODE_WITH_PATH'] if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '': - run_opts +=" --model_cfg "+env['CM_ML_MODEL_CFG_WITH_PATH'] + run_opts += " --model_cfg " + env['CM_ML_MODEL_CFG_WITH_PATH'] else: # Check cfg from command line cfg = env.get('CM_ML_MODEL_CFG', {}) - if len(cfg)>0: + if len(cfg) > 0: del (env['CM_ML_MODEL_CFG']) - import json, tempfile + import json + import tempfile tfile = tempfile.NamedTemporaryFile(mode="w+", suffix='.json') fd, tfile = tempfile.mkstemp(suffix='.json', prefix='cm-cfg-') @@ -72,30 +75,32 @@ def preprocess(i): env['CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile - run_opts +=" --model_cfg " + tfile + run_opts += " --model_cfg " + tfile if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '': - run_opts +=" --model_sample_pickle "+env['CM_ML_MODEL_SAMPLE_WITH_PATH'] + run_opts += " --model_sample_pickle " + \ + env['CM_ML_MODEL_SAMPLE_WITH_PATH'] # Add path to file model weights at the end of command line - run_opts += ' '+env['CM_ML_MODEL_FILE_WITH_PATH'] + run_opts += ' ' + env['CM_ML_MODEL_FILE_WITH_PATH'] env['CM_RUN_OPTS'] = run_opts - print ('') - print ('Assembled flags: {}'.format(run_opts)) - print ('') + print('') + print('Assembled flags: {}'.format(run_opts)) + print('') + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') + tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') - if tfile!='' and os.path.isfile(tfile): + if tfile != '' and os.path.isfile(tfile): os.remove(tfile) - return {'return':0} + return {'return': 0} diff --git a/script/app-loadgen-generic-python/src/backend_onnxruntime.py b/script/app-loadgen-generic-python/src/backend_onnxruntime.py index e95e467b9f..371f44ffbe 100644 --- a/script/app-loadgen-generic-python/src/backend_onnxruntime.py +++ b/script/app-loadgen-generic-python/src/backend_onnxruntime.py @@ -45,7 +45,7 @@ def __init__( inter_op_threads=0, model_code='', # Not used here model_cfg={}, # Not used here - model_sample_pickle='' # Not used here + model_sample_pickle='' # Not used here ): self.model_path = model_path self.execution_provider = execution_provider @@ -58,11 +58,12 @@ def __init__( self.session_options.inter_op_num_threads = inter_op_threads def create(self) -> Model: - print ('Loading model: {}'.format(self.model_path)) + print('Loading model: {}'.format(self.model_path)) # model = onnx.load(self.model_path) session_eps = [self.execution_provider] session = ort.InferenceSession( -# model.SerializeToString(), self.session_options, providers=session_eps + # model.SerializeToString(), self.session_options, + # providers=session_eps self.model_path, self.session_options, providers=session_eps ) return XModel(session) @@ -72,12 +73,14 @@ class XModelInputSampler(ModelInputSampler): def __init__(self, model_factory: XModelFactory): model = model_factory.create() input_defs = model.session.get_inputs() - self.inputs: typing.Dict[str, typing.Tuple[np.dtype, typing.List[int]]] = dict() + self.inputs: typing.Dict[str, + typing.Tuple[np.dtype, + typing.List[int]]] = dict() for input in input_defs: input_name = input.name input_type = ONNX_TO_NP_TYPE_MAP[input.type] input_dim = [ - 1 if (x is None or (type(x) is str)) else x for x in input.shape + 1 if (x is None or (isinstance(x, str))) else x for x in input.shape ] self.inputs[input_name] = (input_type, input_dim) diff --git a/script/app-loadgen-generic-python/src/backend_pytorch.py b/script/app-loadgen-generic-python/src/backend_pytorch.py index fbfd63bd31..6fb7160282 100644 --- a/script/app-loadgen-generic-python/src/backend_pytorch.py +++ b/script/app-loadgen-generic-python/src/backend_pytorch.py @@ -16,6 +16,7 @@ xinput = input + class XModel(Model): def __init__(self, session): assert session is not None @@ -23,10 +24,10 @@ def __init__(self, session): def predict(self, input: ModelInput): - print ('') + print('') utils.print_host_memory_use('Host memory used') - print ('Running inference ...') + print('Running inference ...') with torch.no_grad(): output = self.session(input) @@ -54,59 +55,65 @@ def __init__( self.model_sample_pickle = model_sample_pickle self.execution_provider = execution_provider - def create(self) -> Model: - print ('') - print ('Loading model: {}'.format(self.model_path)) + print('') + print('Loading model: {}'.format(self.model_path)) if self.execution_provider == 'CPUExecutionProvider': torch_provider = 'cpu' elif self.execution_provider == 'CUDAExecutionProvider': torch_provider = 'cuda' if not torch.cuda.is_available(): - raise Exception('Error: CUDA is forced but not available or installed in PyTorch!') + raise Exception( + 'Error: CUDA is forced but not available or installed in PyTorch!') else: - raise Exception('Error: execution provider is unknown ({})!'.format(self.execution_provider)) + raise Exception( + 'Error: execution provider is unknown ({})!'.format( + self.execution_provider)) - checkpoint = torch.load(self.model_path, map_location=torch.device(torch_provider)) + checkpoint = torch.load(self.model_path, + map_location=torch.device(torch_provider)) if self.model_code == '': raise Exception('Error: path to model code was not provided!') if self.model_sample_pickle == '': - raise Exception('Error: path to model sample pickle was not provided!') + raise Exception( + 'Error: path to model sample pickle was not provided!') # Load sample import pickle - with open (self.model_sample_pickle, 'rb') as handle: + with open(self.model_sample_pickle, 'rb') as handle: self.input_sample = pickle.load(handle) # Check if has CM connector cm_model_module = os.path.join(self.model_code, 'cmc.py') if not os.path.isfile(cm_model_module): - raise Exception('cm.py interface for a PyTorch model was not found in {}'.format(self.model_code)) - - print ('') - print ('Collective Mind Connector for the model found: {}'.format(cm_model_module)) + raise Exception( + 'cm.py interface for a PyTorch model was not found in {}'.format( + self.model_code)) + print('') + print('Collective Mind Connector for the model found: {}'.format( + cm_model_module)) # Load CM interface for the model import sys sys.path.insert(0, self.model_code) - model_module=importlib.import_module('cmc') - del(sys.path[0]) + model_module = importlib.import_module('cmc') + del (sys.path[0]) # Init model - if len(self.model_cfg)>0: - print ('Model cfg: {}'.format(self.model_cfg)) + if len(self.model_cfg) > 0: + print('Model cfg: {}'.format(self.model_cfg)) r = model_module.model_init(checkpoint, self.model_cfg) - if r['return']>0: + if r['return'] > 0: raise Exception('Error: {}'.format(r['error'])) model = r['model'] - if torch_provider=='cuda': + if torch_provider == 'cuda': model.cuda() model.eval() diff --git a/script/app-loadgen-generic-python/src/loadgen/harness.py b/script/app-loadgen-generic-python/src/loadgen/harness.py index 69edd2ba95..a8fdd4e86b 100644 --- a/script/app-loadgen-generic-python/src/loadgen/harness.py +++ b/script/app-loadgen-generic-python/src/loadgen/harness.py @@ -58,7 +58,8 @@ def issue_query(self, query_samples): self._complete_query(result) # Called after the last call to issue queries in a series is made. - # Client can use this to flush any deferred queries rather than waiting for a timeout. + # Client can use this to flush any deferred queries rather than waiting + # for a timeout. def flush_queries(self): result = self.runner.flush_queries() logger.info(f"Queries flushed") diff --git a/script/app-loadgen-generic-python/src/loadgen/runners.py b/script/app-loadgen-generic-python/src/loadgen/runners.py index 1b78acba15..9c813a0278 100644 --- a/script/app-loadgen-generic-python/src/loadgen/runners.py +++ b/script/app-loadgen-generic-python/src/loadgen/runners.py @@ -10,7 +10,7 @@ logger = logging.getLogger(__name__) -######## Runner implementations +# Runner implementations class ModelRunnerInline(ModelRunner): @@ -172,7 +172,9 @@ def flush_queries(self) -> typing.Optional[QueryResult]: return result else: task_result = self.task.get() - result = {query_id: query_result for query_id, query_result in task_result} + result = { + query_id: query_result for query_id, + query_result in task_result} return result @staticmethod diff --git a/script/app-loadgen-generic-python/src/main.py b/script/app-loadgen-generic-python/src/main.py index 692293b78e..58f9291322 100644 --- a/script/app-loadgen-generic-python/src/main.py +++ b/script/app-loadgen-generic-python/src/main.py @@ -38,7 +38,7 @@ def main( loadgen_duration_sec: float ): - print ('=====================================================================') + print('=====================================================================') if backend == 'onnxruntime': from backend_onnxruntime import XModelFactory @@ -51,21 +51,21 @@ def main( # Load model cfg model_cfg_dict = {} - if model_cfg!='': + if model_cfg != '': import json with open(model_cfg) as mc: model_cfg_dict = json.load(mc) model_factory = XModelFactory( - model_path, - execution_provider, - execution_mode, - interop_threads, - intraop_threads, - model_code, - model_cfg_dict, - model_sample_pickle + model_path, + execution_provider, + execution_mode, + interop_threads, + intraop_threads, + model_code, + model_cfg_dict, + model_sample_pickle ) model_dataset = XModelInputSampler(model_factory) @@ -106,7 +106,10 @@ def main( # samples_per_query = Max(min_query_count, target_sample_count) output_path = "results" if not output_path else output_path - output_path = os.path.join(output_path, os.path.basename(model_path), runner_name) + output_path = os.path.join( + output_path, + os.path.basename(model_path), + runner_name) os.makedirs(output_path, exist_ok=True) output_settings = mlperf_loadgen.LogOutputSettings() @@ -135,7 +138,7 @@ def main( harness.issue_query, harness.flush_queries ) - print ('=====================================================================') + print('=====================================================================') logger.info("Test Started") mlperf_loadgen.StartTestWithLogSettings( @@ -143,27 +146,31 @@ def main( ) logger.info("Test Finished") - print ('=====================================================================') + print('=====================================================================') # Parse output file output_summary = {} - output_summary_path = os.path.join(output_path, "mlperf_log_summary.txt") + output_summary_path = os.path.join( + output_path, "mlperf_log_summary.txt") with open(output_summary_path, "r") as output_summary_file: for line in output_summary_file: - m = re.match(r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.]+).*", line) + m = re.match( + r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.]+).*", line) if m: output_summary[m.group(1).strip()] = m.group(2).strip() - logger.info("Observed QPS: " + output_summary.get("Samples per second")) + logger.info( + "Observed QPS: " + + output_summary.get("Samples per second")) logger.info("Result: " + output_summary.get("Result is")) mlperf_loadgen.DestroySUT(system_under_test) mlperf_loadgen.DestroyQSL(query_sample_libary) logger.info("Test Completed") - print ('=====================================================================') + print('=====================================================================') if __name__ == "__main__": - print ('') + print('') logging.basicConfig( level=logging.DEBUG, @@ -174,7 +181,11 @@ def main( parser.add_argument( "model_path", help="path to input model", default="models/yolov5s.onnx" ) - parser.add_argument("-b", "--backend", help="backend", default="onnxruntime") + parser.add_argument( + "-b", + "--backend", + help="backend", + default="onnxruntime") parser.add_argument("-o", "--output", help="path to store loadgen results") parser.add_argument( "-r", @@ -198,8 +209,16 @@ def main( parser.add_argument( "--ep", help="Execution Provider", default="CPUExecutionProvider" ) - parser.add_argument("--intraop", help="IntraOp threads", default=0, type=int) - parser.add_argument("--interop", help="InterOp threads", default=0, type=int) + parser.add_argument( + "--intraop", + help="IntraOp threads", + default=0, + type=int) + parser.add_argument( + "--interop", + help="InterOp threads", + default=0, + type=int) parser.add_argument( "--execmode", help="Execution Mode", @@ -212,11 +231,28 @@ def main( default=100, type=int, ) - parser.add_argument("--loadgen_expected_qps", help="Expected QPS", default=1, type=float) - parser.add_argument("--loadgen_duration_sec", help="Expected duration in sec.", default=1, type=float) - parser.add_argument("--model_code", help="(for PyTorch models) path to model code with cmc.py", default="") - parser.add_argument("--model_cfg", help="(for PyTorch models) path to model's configuration in JSON file", default="") - parser.add_argument("--model_sample_pickle", help="(for PyTorch models) path to a model sample in pickle format", default="") + parser.add_argument( + "--loadgen_expected_qps", + help="Expected QPS", + default=1, + type=float) + parser.add_argument( + "--loadgen_duration_sec", + help="Expected duration in sec.", + default=1, + type=float) + parser.add_argument( + "--model_code", + help="(for PyTorch models) path to model code with cmc.py", + default="") + parser.add_argument( + "--model_cfg", + help="(for PyTorch models) path to model's configuration in JSON file", + default="") + parser.add_argument( + "--model_sample_pickle", + help="(for PyTorch models) path to a model sample in pickle format", + default="") args = parser.parse_args() main( diff --git a/script/app-loadgen-generic-python/src/utils.py b/script/app-loadgen-generic-python/src/utils.py index 1fc04b0cfa..f7b0bfd7da 100644 --- a/script/app-loadgen-generic-python/src/utils.py +++ b/script/app-loadgen-generic-python/src/utils.py @@ -3,14 +3,16 @@ import os import psutil + def print_host_memory_use(text=''): pid = os.getpid() python_process = psutil.Process(pid) memoryUse = python_process.memory_info()[0] - if text == '': text = 'host memory use' + if text == '': + text = 'host memory use' - print('{}: {} MB'.format(text, int(memoryUse/1000000))) + print('{}: {} MB'.format(text, int(memoryUse / 1000000))) return diff --git a/script/app-mlperf-inference-amd/customize.py b/script/app-mlperf-inference-amd/customize.py index 7e8b96587d..e1958e5a85 100644 --- a/script/app-mlperf-inference-amd/customize.py +++ b/script/app-mlperf-inference-amd/customize.py @@ -2,41 +2,47 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} env['CM_MLPERF_AMD_SCRIPT_PATH'] = env['CM_TMP_CURRENT_SCRIPT_PATH'] - env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD") + env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD") if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} if "llama2" in env['CM_MODEL']: env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join(env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8") + env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join( + env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8") env['CM_RUN_CMD'] = "bash run-llama2.sh " else: - return {'return':1, 'error':'Model {} not supported'.format(env['CM_MODEL'])} - + return {'return': 1, 'error': 'Model {} not supported'.format( + env['CM_MODEL'])} - return {'return':0} - #return {'return':1, 'error': 'Run command needs to be tested'} + return {'return': 0} + # return {'return':1, 'error': 'Run command needs to be tested'} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py index 3445631dd3..f5c8e844c0 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py @@ -2,28 +2,33 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} source_files = [] script_path = i['run_script_input']['path'] - env['CM_SOURCE_FOLDER_PATH'] = os.path.join(script_path, env['CM_TMP_SRC_FOLDER']) + env['CM_SOURCE_FOLDER_PATH'] = os.path.join( + script_path, env['CM_TMP_SRC_FOLDER']) for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): if file.endswith(".c") or file.endswith(".cpp"): @@ -32,7 +37,7 @@ def preprocess(i): env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) if '+CPLUS_INCLUDE_PATH' not in env: - env['+CPLUS_INCLUDE_PATH'] = [] + env['+CPLUS_INCLUDE_PATH'] = [] env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) @@ -51,12 +56,14 @@ def preprocess(i): # add preprocessor flag like "#define CM_MODEL_RESNET50" env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" - env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + env['CM_MLPERF_BACKEND'].upper()) + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + + env['CM_MLPERF_BACKEND'].upper()) # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" - env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + env['CM_MLPERF_DEVICE'].upper()) + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + + env['CM_MLPERF_DEVICE'].upper()) if '+ LDCXXFLAGS' not in env: - env['+ LDCXXFLAGS'] = [ ] + env['+ LDCXXFLAGS'] = [] env['+ LDCXXFLAGS'] += [ "-lmlperf_loadgen", @@ -64,7 +71,8 @@ def preprocess(i): ] # e.g. -lonnxruntime if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: - env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + env['+ LDCXXFLAGS'].append('-l' + + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) # e.g. -lcudart if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) @@ -72,25 +80,30 @@ def preprocess(i): if env.get('CM_TMP_LINK_LIBS', []): libs = env['CM_TMP_LINK_LIBS'].split(",") for lib in libs: - env['+ LDCXXFLAGS'].append(' -l'+lib) + env['+ LDCXXFLAGS'].append(' -l' + lib) env['CM_LINKER_LANG'] = 'CXX' env['CM_RUN_DIR'] = os.getcwd() if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - if env.get('CM_DATASET_COMPRESSED', "no").lower() in [ "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''): - env['CM_HOST_USE_ALL_CORES'] = "yes" #Use all cores for input preprocessing + if env.get('CM_DATASET_COMPRESSED', "no").lower() in [ + "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''): + # Use all cores for input preprocessing + env['CM_HOST_USE_ALL_CORES'] = "yes" env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing" - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-dummy/customize.py b/script/app-mlperf-inference-dummy/customize.py index 36f310babc..74ff72927e 100644 --- a/script/app-mlperf-inference-dummy/customize.py +++ b/script/app-mlperf-inference-dummy/customize.py @@ -2,32 +2,37 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} r = get_run_cmd(env['CM_MODEL'], i) if r['return'] > 0: return r run_cmd = r['run_cmd'] - run_dir = r ['run_dir'] + run_dir = r['run_dir'] print(run_cmd) print(run_dir) - return {'return':1, 'error': 'Run command needs to be tested!'} + return {'return': 1, 'error': 'Run command needs to be tested!'} + def get_run_cmd(model, i): env = i['env'] @@ -49,12 +54,18 @@ def get_run_cmd(model, i): run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " submitter = "CTuning" - run_dir = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", "gptj-99") + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + "open", + submitter, + "code", + "gptj-99") return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-intel/customize.py b/script/app-mlperf-inference-intel/customize.py index ef02276990..57ab3887d0 100644 --- a/script/app-mlperf-inference-intel/customize.py +++ b/script/app-mlperf-inference-intel/customize.py @@ -2,27 +2,31 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} import json if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} ml_model = env['CM_MODEL'] - master_model = ml_model.replace("-99.9", "").replace("-99","") + master_model = ml_model.replace("-99.9", "").replace("-99", "") master_model = master_model.replace("gptj", "gpt-j") backend = env['CM_MLPERF_BACKEND'] @@ -34,7 +38,13 @@ def preprocess(i): if 'dlrm-v2' in ml_model: code_base_folder = "pytorch-cpu-int8" - harness_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', ml_model, code_base_folder) + harness_root = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + 'closed', + 'Intel', + 'code', + ml_model, + code_base_folder) env['CM_HARNESS_CODE_ROOT'] = harness_root @@ -52,18 +62,16 @@ def preprocess(i): if env['CM_MODEL'] == "retinanet": env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - - if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] env['CONDA_PREFIX'] = env['CM_CONDA_PREFIX'] - if env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration": if master_model == "resnet50": i['run_script_input']['script_name'] = "prepare_imagenet_calibration" @@ -72,39 +80,51 @@ def preprocess(i): elif "dlrm-v2" in master_model: i['run_script_input']['script_name'] = "calibrate_dlrm_v2_model" else: - calibration_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration', master_model, backend+"-"+device) + calibration_root = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + 'closed', + 'Intel', + 'calibration', + master_model, + backend + "-" + device) if "gpt" in env['CM_MODEL']: i['run_script_input']['script_name'] = "calibrate_gptj_int4_model" calibration_path = os.path.join(calibration_root, "INT4") env['CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path - env['INT4_CALIBRATION_DIR'] = os.path.join(calibration_path, "data", "quantized-int4-model") + env['INT4_CALIBRATION_DIR'] = os.path.join( + calibration_path, "data", "quantized-int4-model") elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation": if master_model == "resnet50": i['run_script_input']['script_name'] = "compile_resnet50" elif master_model == "retinanet": i['run_script_input']['script_name'] = "compile_retinanet" - env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join(os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth') + env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join( + os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth') elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness": print(f"Harness Root: {harness_root}") if "bert" in env['CM_MODEL']: i['run_script_input']['script_name'] = "build_bert_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "bert_inference") + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "bert_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "bert") elif "stable-diffusion" in env['CM_MODEL']: i['run_script_input']['script_name'] = "build_sdxl_harness" elif "resnet50" in env['CM_MODEL']: i['run_script_input']['script_name'] = "build_resnet50_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "resnet50_inference") + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "resnet50_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "resnet50") elif "retinanet" in env['CM_MODEL']: i['run_script_input']['script_name'] = "build_retinanet_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "retinanet_inference") + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "retinanet_inference") elif "gpt" in env['CM_MODEL']: i['run_script_input']['script_name'] = "build_gptj_harness" - env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "gptj_inference") + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "gptj_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "gptj") env['MLPERF_INFERENCE_ROOT'] = env['CM_MLPERF_INFERENCE_SOURCE'] if env.get('INTEL_GPTJ_INT4', '') == 'yes': @@ -119,19 +139,27 @@ def preprocess(i): model_precision = "int8" env['RUN_QUANTIZATION_CMD'] = "bash run_quantization.sh" if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": - final_model_path = os.path.join(harness_root, "data", f"gpt-j-{model_precision}-model", "best_model.pt") + final_model_path = os.path.join( + harness_root, "data", f"gpt-j-{model_precision}-model", "best_model.pt") else: - final_model_path = os.path.join(env['OUT_DIR'], "checkpoint-final-final-q4-j-int8-pc.bin") + final_model_path = os.path.join( + env['OUT_DIR'], "checkpoint-final-final-q4-j-int8-pc.bin") model_dir_name = f"{model_precision.upper()}_MODEL_DIR" env[model_dir_name] = os.path.dirname(final_model_path) if not os.path.exists(env[model_dir_name]): os.makedirs(env[model_dir_name]) env['CM_ML_MODEL_PATH'] = env[model_dir_name] env['CM_ML_MODEL_FILE_WITH_PATH'] = final_model_path - if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH', '') != '' and env.get('INT8_MODEL_DIR', '') != '': - shutil.copy(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'], env[model_dir_name]) - if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH', '') != '' and env.get('INT4_MODEL_DIR', '') != '': - shutil.copy(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'], env[model_dir_name]) + if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH', + '') != '' and env.get('INT8_MODEL_DIR', '') != '': + shutil.copy( + env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'], + env[model_dir_name]) + if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH', + '') != '' and env.get('INT4_MODEL_DIR', '') != '': + shutil.copy( + env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'], + env[model_dir_name]) elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness": print(f"Harness Root: {harness_root}") @@ -145,14 +173,20 @@ def preprocess(i): env['LOADGEN_MODE'] = 'Performance' if 'bert' in env['CM_MODEL']: - env['MODEL_PATH'] = os.path.dirname(os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) - env['DATASET_PATH'] = os.path.dirname(os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['MODEL_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['DATASET_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_RUN_CMD'] = "bash run_bert_harness.sh " + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" else "") + env['CM_RUN_CMD'] = "bash run_bert_harness.sh " + \ + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] + == "accuracy" else "") elif 'resnet50' in env['CM_MODEL']: - env['MODEL_PATH'] = os.path.dirname(os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) - env['DATASET_PATH'] = os.path.dirname(os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['MODEL_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['DATASET_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_resnet50_harness.sh')} " @@ -172,7 +206,9 @@ def preprocess(i): elif 'stable-diffusion' in env['CM_MODEL']: env['CM_RUN_DIR'] = i['run_script_input']['path'] - env['CM_RUN_CMD'] = "bash run_sdxl_harness.sh " + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" else "") + env['CM_RUN_CMD'] = "bash run_sdxl_harness.sh " + \ + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] + == "accuracy" else "") elif "gptj" in env['CM_MODEL']: env['CM_RUN_DIR'] = i['run_script_input']['path'] @@ -181,11 +217,13 @@ def preprocess(i): if env.get('INTEL_GPTJ_INT4', '') == 'yes': model_precision = "int4" env['INT4_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] - env['QUANTIZED_MODEL'] = os.path.join(env['INT4_MODEL_DIR'], "best_int4_model.pt") + env['QUANTIZED_MODEL'] = os.path.join( + env['INT4_MODEL_DIR'], "best_int4_model.pt") env['PRECISION'] = "int4_bf16_mixed" else: env['INT8_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] - env['QUANTIZED_MODEL'] = os.path.join(env["INT8_MODEL_DIR"], "best_model.pt") + env['QUANTIZED_MODEL'] = os.path.join( + env["INT8_MODEL_DIR"], "best_model.pt") env['PRECISION'] = "int8" elif env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v4.0": env['CM_RUN_CMD'] = "bash run_gptj_harness_v4_0.sh " @@ -193,18 +231,20 @@ def preprocess(i): if env['CM_MLPERF_RUN_STYLE'] == "test": env['TOTAL_SAMPLE_COUNT'] = env['CM_TEST_QUERY_COUNT'] else: - env['TOTAL_SAMPLE_COUNT'] = env.get('CM_MLPERF_MAX_QUERY_COUNT', env['CM_TEST_QUERY_COUNT']) + env['TOTAL_SAMPLE_COUNT'] = env.get( + 'CM_MLPERF_MAX_QUERY_COUNT', env['CM_TEST_QUERY_COUNT']) if env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": env['WORKERS_PER_PROC'] = 4 else: env['WORKERS_PER_PROC'] = 1 - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-mlcommons-cpp/customize.py b/script/app-mlperf-inference-mlcommons-cpp/customize.py index 0f4d74cba0..89356a2703 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/customize.py +++ b/script/app-mlperf-inference-mlcommons-cpp/customize.py @@ -2,6 +2,7 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] @@ -11,9 +12,9 @@ def preprocess(i): meta = i['meta'] if os_info['platform'] == 'windows': - print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') - print ('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') - print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') # # Currently support only LLVM on Windows # print ('# Forcing LLVM on Windows') # r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'compile-program': {'adr':{'compiler':{'tags':'llvm'}}}}}) @@ -22,14 +23,17 @@ def preprocess(i): env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} source_files = [] script_path = i['run_script_input']['path'] @@ -44,7 +48,7 @@ def preprocess(i): env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) if '+CPLUS_INCLUDE_PATH' not in env: - env['+CPLUS_INCLUDE_PATH'] = [] + env['+CPLUS_INCLUDE_PATH'] = [] env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) @@ -62,12 +66,14 @@ def preprocess(i): # add preprocessor flag like "#define CM_MODEL_RESNET50" env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" - env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + env['CM_MLPERF_BACKEND'].upper()) + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + + env['CM_MLPERF_BACKEND'].upper()) # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" - env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + env['CM_MLPERF_DEVICE'].upper()) + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + + env['CM_MLPERF_DEVICE'].upper()) if '+ LDCXXFLAGS' not in env: - env['+ LDCXXFLAGS'] = [ ] + env['+ LDCXXFLAGS'] = [] env['+ LDCXXFLAGS'] += [ "-lmlperf_loadgen", @@ -75,7 +81,8 @@ def preprocess(i): ] # e.g. -lonnxruntime if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: - env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + env['+ LDCXXFLAGS'].append('-l' + + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) # e.g. -lcudart if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) @@ -84,15 +91,18 @@ def preprocess(i): env['CM_RUN_DIR'] = os.getcwd() if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index fc16ba0ff3..477190c676 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -4,6 +4,7 @@ import shutil import subprocess + def preprocess(i): os_info = i['os_info'] @@ -12,17 +13,17 @@ def preprocess(i): script_path = i['run_script_input']['path'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": - return {'return':0} + return {'return': 0} - if env.get('CM_MLPERF_POWER','') == "yes": + if env.get('CM_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN","")!='' else False + rerun = True if env.get("CM_RERUN", "") != '' else False if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" @@ -31,63 +32,80 @@ def preprocess(i): env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" if 'CM_MODEL' not in env: - return {'return': 1, 'error': "Please select a variation specifying the model to run"} + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} - #if env['CM_MODEL'] == "resnet50": + # if env['CM_MODEL'] == "resnet50": # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'], # "val_map.txt") # ret = os.system(cmd) - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ + env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " if 'CM_MLPERF_LOADGEN_QPS' not in env: env['CM_MLPERF_LOADGEN_QPS_OPT'] = "" else: - env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + env['CM_MLPERF_LOADGEN_QPS'] + env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ + env['CM_MLPERF_LOADGEN_QPS'] - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT'] + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT'] if 'CM_NUM_THREADS' not in env: if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) else: env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE','') != '' and str(env.get('CM_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in [ "true", "1", "yes"] : - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE']) + if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get( + 'CM_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ + str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE']) - if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE','') != '': - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + str(env['CM_MLPERF_LOADGEN_BATCH_SIZE']) + if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ + str(env['CM_MLPERF_LOADGEN_BATCH_SIZE']) - if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT','') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and (env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL'] or 'llama2' in env['CM_MODEL'] or 'mixtral' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE','') != "valid": - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] + if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and ( + env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL'] or 'llama2' in env['CM_MODEL'] or 'mixtral' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE', '') != "valid": + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - x="" if os_info['platform'] == 'windows' else "'" + x = "" if os_info['platform'] == 'windows' else "'" inference_src_version = env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION', '') version_tuple = None if inference_src_version: version_tuple = tuple(map(int, inference_src_version.split('.'))) - if version_tuple and version_tuple >= (4,1,1): - pass # mlperf_conf is automatically loaded by the loadgen + if version_tuple and version_tuple >= (4, 1, 1): + pass # mlperf_conf is automatically loaded by the loadgen else: if "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"]: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + x+ env['CM_MLPERF_CONF'] + x + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ + x + env['CM_MLPERF_CONF'] + x else: - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf "+ x + env['CM_MLPERF_CONF'] + x + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ + x + env['CM_MLPERF_CONF'] + x - if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get('CM_MLPERF_INFERENCE_API_SERVER','')=='' and "llama2-70b" not in env['CM_MODEL']: + if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get( + 'CM_MLPERF_INFERENCE_API_SERVER', '') == '' and "llama2-70b" not in env['CM_MODEL']: env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH') if not env['MODEL_DIR']: - env['MODEL_DIR'] = os.path.dirname(env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH', ''))) + env['MODEL_DIR'] = os.path.dirname( + env.get( + 'CM_MLPERF_CUSTOM_MODEL_PATH', + env.get( + 'CM_ML_MODEL_FILE_WITH_PATH', + ''))) RUN_CMD = "" state['RUN'] = {} @@ -98,33 +116,37 @@ def preprocess(i): NUM_THREADS = env['CM_NUM_THREADS'] if int(NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu": - NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU + NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU - if env['CM_MODEL'] in [ 'resnet50', 'retinanet', 'stable-diffusion-xl' ] : - scenario_extra_options += " --threads " + NUM_THREADS + if env['CM_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: + scenario_extra_options += " --threads " + NUM_THREADS ml_model_name = env['CM_MODEL'] if 'CM_MLPERF_USER_CONF' in env: user_conf_path = env['CM_MLPERF_USER_CONF'] - x="" if os_info['platform'] == 'windows' else "'" + x = "" if os_info['platform'] == 'windows' else "'" if 'llama2-70b' in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"]: - scenario_extra_options += " --user-conf " + x + user_conf_path + x + scenario_extra_options += " --user-conf " + x + user_conf_path + x else: - scenario_extra_options += " --user_conf " + x + user_conf_path + x + scenario_extra_options += " --user_conf " + x + user_conf_path + x mode = env['CM_MLPERF_LOADGEN_MODE'] mode_extra_options = "" - if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ 'resnet50', 'retinanet' ]: - #dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] - if env.get('CM_MLPERF_LAST_RELEASE') not in [ "v2.0", "v2.1" ]: - dataset_options = " --use_preprocessed_dataset --cache_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ + 'resnet50', 'retinanet']: + # dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + if env.get('CM_MLPERF_LAST_RELEASE') not in ["v2.0", "v2.1"]: + dataset_options = " --use_preprocessed_dataset --cache_dir " + \ + env['CM_DATASET_PREPROCESSED_PATH'] else: dataset_options = "" if env['CM_MODEL'] == "retinanet": - dataset_options += " --dataset-list "+ env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + dataset_options += " --dataset-list " + \ + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] elif env['CM_MODEL'] == "resnet50": - dataset_options += " --dataset-list "+ os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + dataset_options += " --dataset-list " + \ + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') else: if 'CM_DATASET_PREPROCESSED_PATH' in env: @@ -137,7 +159,7 @@ def preprocess(i): dataset_options = '' - if env.get('CM_MLPERF_EXTRA_DATASET_ARGS','') != '': + if env.get('CM_MLPERF_EXTRA_DATASET_ARGS', '') != '': dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS'] if mode == "accuracy": @@ -155,38 +177,47 @@ def preprocess(i): env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd() mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') - cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) + cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, + mode_extra_options, dataset_options, mlperf_implementation) if env.get('CM_NETWORK_LOADGEN', '') == "lon": run_cmd = i['state']['mlperf_inference_run_cmd'] env['CM_SSH_RUN_COMMANDS'] = [] - env['CM_SSH_RUN_COMMANDS'].append(run_cmd.replace("--network=lon", "--network=sut") + " &") - + env['CM_SSH_RUN_COMMANDS'].append( + run_cmd.replace( + "--network=lon", + "--network=sut") + " &") env['CM_MLPERF_RUN_CMD'] = cmd env['CM_RUN_DIR'] = run_dir env['CM_RUN_CMD'] = cmd - env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') #for tvm + env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') # for tvm - if env.get('CM_HOST_PLATFORM_FLAVOR','') == "arm64": + if env.get('CM_HOST_PLATFORM_FLAVOR', '') == "arm64": env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64" - return {'return':0} + return {'return': 0} + -def get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, implementation="reference"): +def get_run_cmd(os_info, env, scenario_extra_options, + mode_extra_options, dataset_options, implementation="reference"): if implementation == "reference": - return get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_options, dataset_options) + return get_run_cmd_reference( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options) if implementation == "nvidia": - return get_run_cmd_nvidia(os_info, env, scenario_extra_options, mode_extra_options, dataset_options) + return get_run_cmd_nvidia( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options) return "", os.getcwd() -def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_options, dataset_options): +def get_run_cmd_reference( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options): - if env['CM_MODEL'] in [ "gptj-99", "gptj-99.9" ]: + if env['CM_MODEL'] in ["gptj-99", "gptj-99.9"]: - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j") + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j") if env.get('CM_NETWORK_LOADGEN', '') != "lon": cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \ " main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ @@ -206,62 +237,78 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio cmd = cmd + gpu_options env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] - if env['CM_MODEL'] in [ "resnet50", "retinanet" ]: + if env['CM_MODEL'] in ["resnet50", "retinanet"]: - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "vision", "classification_and_detection") - env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] - if env.get('CM_MLPERF_VISION_DATASET_OPTION','') == '' and env.get('CM_MLPERF_DEVICE') != "tpu": + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "vision", + "classification_and_detection") + env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + if env.get('CM_MLPERF_VISION_DATASET_OPTION', '') == '' and env.get( + 'CM_MLPERF_DEVICE') != "tpu": if os_info['platform'] == 'windows': - cmd = "python python/main.py --profile "+env['CM_MODEL']+"-"+env['CM_MLPERF_BACKEND'] + \ - " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_PREPROCESSED_PATH'] + \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ - " --output " + env['OUTPUT_DIR'] + " " + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + dataset_options + cmd = "python python/main.py --profile " + env['CM_MODEL'] + "-" + env['CM_MLPERF_BACKEND'] + \ + " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + " --output " + env['OUTPUT_DIR'] + " " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options else: cmd = "./run_local.sh " + env['CM_MLPERF_BACKEND'] + ' ' + \ - env['CM_MODEL'] + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + dataset_options + env['CM_MODEL'] + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options return cmd, env['RUN_DIR'] if env['CM_MLPERF_BACKEND'] == "ncnn": - env['MODEL_FILE'] = os.path.join(os.path.dirname(env.get('CM_ML_MODEL_FILE_WITH_PATH')), "resnet50_v1") + env['MODEL_FILE'] = os.path.join( + os.path.dirname( + env.get('CM_ML_MODEL_FILE_WITH_PATH')), + "resnet50_v1") else: - env['MODEL_FILE'] = env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH')) + env['MODEL_FILE'] = env.get( + 'CM_MLPERF_CUSTOM_MODEL_PATH', + env.get('CM_ML_MODEL_FILE_WITH_PATH')) if not env['MODEL_FILE']: return {'return': 1, 'error': 'No valid model file found!'} - env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] - extra_options = " --output "+ env['CM_MLPERF_OUTPUT_DIR'] +" --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ - " --dataset-path "+env['CM_DATASET_PREPROCESSED_PATH']+" --model "+env['MODEL_FILE'] + \ - " --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + extra_options = " --output " + env['CM_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \ + " --preprocessed_dir " + env['CM_DATASET_PREPROCESSED_PATH'] if env.get('CM_MLPERF_DEVICE') == "tpu": - cmd = "cd '" + os.path.join(env['RUN_DIR'],"python") + "' && "+env.get('CM_SUDO', "")+" "+env['CM_PYTHON_BIN_WITH_PATH']+ " main.py "+\ - "--backend "+env['CM_MLPERF_BACKEND']+ " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] +" --device tpu "+ \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + extra_options + cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env.get('CM_SUDO', "") + " " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\ + "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + " --device tpu " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + mode_extra_options + dataset_options + extra_options else: - cmd = "cd '" + os.path.join(env['RUN_DIR'],"python") + "' && "+env['CM_PYTHON_BIN_WITH_PATH']+ " main.py "+\ - "--backend "+env['CM_MLPERF_BACKEND']+ " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + extra_options + cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\ + "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + mode_extra_options + dataset_options + extra_options env['SKIP_VERIFY_ACCURACY'] = True elif "bert" in env['CM_MODEL']: - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "bert") - env['MODEL_FILE'] = env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH')) + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "language", "bert") + env['MODEL_FILE'] = env.get( + 'CM_MLPERF_CUSTOM_MODEL_PATH', + env.get('CM_ML_MODEL_FILE_WITH_PATH')) if not env['MODEL_FILE']: return {'return': 1, 'error': 'No valid model file found!'} - if env.get('CM_MLPERF_QUANTIZATION') in [ "on", True, "1", "True" ]: + if env.get('CM_MLPERF_QUANTIZATION') in ["on", True, "1", "True"]: quantization_options = " --quantized" else: quantization_options = "" - cmd = env['CM_PYTHON_BIN_WITH_PATH']+ " run.py --backend=" + env['CM_MLPERF_BACKEND'] + " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + quantization_options + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + mode_extra_options + dataset_options + quantization_options if env['CM_MLPERF_BACKEND'] == "deepsparse": - cmd += " --batch_size=" + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + " --model_path=" + env['MODEL_FILE'] + cmd += " --batch_size=" + \ + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + " --model_path=" + env['MODEL_FILE'] if env.get('CM_MLPERF_CUSTOM_MODEL_PATH', '') != '': env['CM_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE'] @@ -276,57 +323,69 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_RNNT_PATH'] cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['CM_MLPERF_BACKEND'] + \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --manifest " + env['CM_DATASET_PREPROCESSED_JSON'] + \ - " --dataset_dir " + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") + \ - " --pytorch_config_toml " + os.path.join("pytorch", "configs", "rnnt.toml") + \ - " --pytorch_checkpoint " + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ - " --log_dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + mode_extra_options + dataset_options + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --manifest " + env['CM_DATASET_PREPROCESSED_JSON'] + \ + " --dataset_dir " + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") + \ + " --pytorch_config_toml " + os.path.join("pytorch", "configs", "rnnt.toml") + \ + " --pytorch_checkpoint " + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + " --log_dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options env['SKIP_VERIFY_ACCURACY'] = True elif "stable-diffusion-xl" in env['CM_MODEL']: - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image") + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image") if env.get('+PYTHONPATH', '') == '': env['+PYTHONPATH'] = [] - env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", "fid")) + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools", + "fid")) backend = env['CM_MLPERF_BACKEND'] - device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] not in [ "gpu", "rocm" ] else "cuda" + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] not in [ + "gpu", "rocm"] else "cuda" max_batchsize = env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --profile " + 'stable-diffusion-xl-pytorch ' + \ - " --dataset " + 'coco-1024' + \ - " --dataset-path " + env['CM_DATASET_PATH_ROOT'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \ - " --device " + device + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + \ - " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ - " --model-path " + env['CM_ML_MODEL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --profile " + 'stable-diffusion-xl-pytorch ' + \ + " --dataset " + 'coco-1024' + \ + " --dataset-path " + env['CM_DATASET_PATH_ROOT'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \ + " --device " + device + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ + " --model-path " + env['CM_ML_MODEL_PATH'] if "--max-batchsize" not in cmd: cmd += " --max-batchsize " + max_batchsize - if env.get('CM_COCO2014_SAMPLE_ID_PATH','') != '': + if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '': cmd += " --ids-path " + env['CM_COCO2014_SAMPLE_ID_PATH'] elif "llama2-70b" in env['CM_MODEL']: - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b") + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "language", + "llama2-70b") backend = env['CM_MLPERF_BACKEND'] device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + \ - " --device " + device.replace("cuda", "cuda:0") + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + \ - " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --device " + device.replace("cuda", "cuda:0") + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] if env.get('CM_MLPERF_INFERENCE_API_SERVER', '') != '': - env['CM_VLLM_SERVER_MODEL_NAME'] = env.get("CM_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct" - #env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" + env['CM_VLLM_SERVER_MODEL_NAME'] = env.get( + "CM_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct" + # env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" cmd += f" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm " else: cmd += f" --model-path {env['LLAMA2_CHECKPOINT_PATH']}" @@ -338,18 +397,21 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio cmd = cmd.replace("--max-batchsize", "--batch-size") elif "mixtral-8x7b" in env['CM_MODEL']: - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b") + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "language", + "mixtral-8x7b") backend = env['CM_MLPERF_BACKEND'] device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ - " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ - " --dataset-path " + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ - " --device " + device.replace("cuda", "cuda:0") + \ - env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - scenario_extra_options + mode_extra_options + \ - " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ - ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ - " --model-path " + env['MIXTRAL_CHECKPOINT_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ + " --device " + device.replace("cuda", "cuda:0") + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + " --model-path " + env['MIXTRAL_CHECKPOINT_PATH'] cmd = cmd.replace("--count", "--total-sample-count") cmd = cmd.replace("--max-batchsize", "--batch-size") @@ -357,18 +419,19 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_3DUNET_PATH'] backend = env['CM_MLPERF_BACKEND'] if env['CM_MLPERF_BACKEND'] != 'tf' else 'tensorflow' - cmd = env['CM_PYTHON_BIN_WITH_PATH']+ " run.py --backend=" + backend + " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + backend + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ - " --model="+env['CM_ML_MODEL_FILE_WITH_PATH'] + \ - " --preprocessed_data_dir="+env['CM_DATASET_KITS19_PREPROCESSED_PATH'] + \ + " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + " --preprocessed_data_dir=" + env['CM_DATASET_KITS19_PREPROCESSED_PATH'] + \ scenario_extra_options + mode_extra_options + dataset_options env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] env['SKIP_VERIFY_ACCURACY'] = True - elif "dlrm" in env['CM_MODEL']: # DLRM is in draft stage + elif "dlrm" in env['CM_MODEL']: # DLRM is in draft stage - env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch") + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch") if 'multihot-criteo-sample' in env['CM_ML_MODEL_DATASET_TYPE']: dataset = "multihot-criteo-sample" elif 'multihot-criteo' in env['CM_ML_MODEL_DATASET_TYPE']: @@ -380,7 +443,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio mlperf_bin_loader_string = " --mlperf-bin-loader" else: mlperf_bin_loader_string = "" - if env.get('CM_ML_MODEL_DEBUG','') == 'yes': + if env.get('CM_ML_MODEL_DEBUG', '') == 'yes': config = " --max-ind-range=10000000 --data-sub-sample-rate=0.875 " else: config = " --max-ind-range=40000000 " @@ -395,23 +458,24 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" and env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": mode_extra_options += " --samples-per-query-offline=1" - cmd = " ./run_local.sh " + env['CM_MLPERF_BACKEND'] + \ + cmd = " ./run_local.sh " + env['CM_MLPERF_BACKEND'] + \ ' dlrm ' + dataset + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ config + mlperf_bin_loader_string + \ ' --samples-to-aggregate-quantile-file=./tools/dist_quantile.txt ' + \ scenario_extra_options + mode_extra_options + dataset_options + gpu_options cmd = cmd.replace("--count", "--count-queries") - env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] - if env.get('CM_NETWORK_LOADGEN', '') in [ "lon", "sut" ]: + if env.get('CM_NETWORK_LOADGEN', '') in ["lon", "sut"]: cmd = cmd + " " + "--network " + env['CM_NETWORK_LOADGEN'] if env.get('CM_NETWORK_LOADGEN_SUT_SERVERS', []): sut_servers = env['CM_NETWORK_LOADGEN_SUT_SERVERS'] - cmd += " --sut_server '"+"','".join(sut_servers)+"' " + cmd += " --sut_server '" + "','".join(sut_servers) + "' " return cmd, env['RUN_DIR'] + def postprocess(i): env = i['env'] @@ -419,4 +483,4 @@ def postprocess(i): inp = i['input'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py index 705f1e3539..090d1b072a 100644 --- a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py +++ b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py @@ -30,7 +30,7 @@ from typing import Dict, Tuple, List, Optional from code.common.fix_sys_path import ScopedRestrictedImport -#with ScopedRestrictedImport(): +# with ScopedRestrictedImport(): import numpy as np import torch # Retinanet model source requires GPU installation of PyTorch 1.10 from torchvision.transforms import functional as F @@ -45,16 +45,27 @@ from code.common.runner import EngineRunner, get_input_format from code.common.systems.system_list import SystemClassifications from code.plugin import load_trt_plugin -RetinanetEntropyCalibrator = import_module("code.retinanet.tensorrt.calibrator").RetinanetEntropyCalibrator +RetinanetEntropyCalibrator = import_module( + "code.retinanet.tensorrt.calibrator").RetinanetEntropyCalibrator G_RETINANET_NUM_CLASSES = 264 G_RETINANET_IMG_SIZE = (800, 800) G_RETINANET_INPUT_SHAPE = (3, 800, 800) G_OPENIMAGE_CALSET_PATH = "build/data/open-images-v6-mlperf/calibration/train/data" G_OPENIMAGE_CALMAP_PATH = "data_maps/open-images-v6-mlperf/cal_map.txt" -G_OPENIMAGE_VALSET_PATH = os.path.join(os.environ.get("CM_DATASET_PATH", "build/data/open-images-v6-mlperf"), "validation", "data") +G_OPENIMAGE_VALSET_PATH = os.path.join( + os.environ.get( + "CM_DATASET_PATH", + "build/data/open-images-v6-mlperf"), + "validation", + "data") G_OPENIMAGE_VALMAP_PATH = "data_maps/open-images-v6-mlperf/val_map.txt" -G_OPENIMAGE_ANNO_PATH = os.path.join(os.environ.get("CM_DATASET_PATH","build/data/open-images-v6-mlperf"), "annotations", "openimages-mlperf.json") +G_OPENIMAGE_ANNO_PATH = os.path.join( + os.environ.get( + "CM_DATASET_PATH", + "build/data/open-images-v6-mlperf"), + "annotations", + "openimages-mlperf.json") G_OPENIMAGE_PREPROCESSED_INT8_PATH = "build/preprocessed_data/open-images-v6-mlperf/validation/Retinanet/int8_linear" # Using EfficientNMS now G_RETINANET_CALIBRATION_CACHE_PATH = "code/retinanet/tensorrt/calibrator.cache" @@ -91,7 +102,8 @@ class FirstLayerConvActPoolTacticSelector(trt.IAlgorithmSelector): def select_algorithms(self, ctx, choices): if "Conv_0 + 1783 + Mul_1 + 1785 + Add_2 + Relu_3 + MaxPool_4" in ctx.name: # Apply to the first layer # MLPINF-1833: Disabled CaskConvActPool for TRT 8.5.0.4 - # TRT 8.5.0.4 has a bug with CaskConvActPool which has been fixed since 8.5.0.5 + # TRT 8.5.0.4 has a bug with CaskConvActPool which has been fixed + # since 8.5.0.5 forbidden_set = { -3689373275198309793, # 0xccccb68da7fc3a5f -4219016963003938541, # 0xc5730a6ceacd8913 @@ -116,7 +128,8 @@ def select_algorithms(self, ctx, choices): -7700711094551245800, # 0xf126325c0aa4aa02 -1070112490556970494, # 0x97d50e90c139753e } - filtered_idxs = [idx for idx, choice in enumerate(choices) if choice.algorithm_variant.tactic not in forbidden_set] + filtered_idxs = [idx for idx, choice in enumerate( + choices) if choice.algorithm_variant.tactic not in forbidden_set] to_ret = filtered_idxs else: # By default, say that all tactics are acceptable: @@ -149,7 +162,8 @@ def __init__(self, engine_file, batch_size, precision, onnx_path, self.dla_core = None # Initiate the plugin and logger - self.logger = TRT_LOGGER # Use the global singleton, which is required by TRT. + # Use the global singleton, which is required by TRT. + self.logger = TRT_LOGGER self.logger.min_severity = trt.Logger.VERBOSE if self.verbose else trt.Logger.INFO load_trt_plugin("retinanet") trt.init_libnvinfer_plugins(self.logger, "") @@ -159,7 +173,8 @@ def __init__(self, engine_file, batch_size, precision, onnx_path, self.create_trt_engine() else: if not os.path.exists(engine_file): - raise RuntimeError(f"Cannot find engine file {engine_file}. Please supply the onnx file or engine file.") + raise RuntimeError( + f"Cannot find engine file {engine_file}. Please supply the onnx file or engine file.") self.runner = EngineRunner(self.engine_file, verbose=verbose) @@ -170,11 +185,15 @@ def __init__(self, engine_file, batch_size, precision, onnx_path, def apply_flag(self, flag): """Apply a TRT builder flag.""" - self.builder_config.flags = (self.builder_config.flags) | (1 << int(flag)) + self.builder_config.flags = ( + self.builder_config.flags) | ( + 1 << int(flag)) def clear_flag(self, flag): """Clear a TRT builder flag.""" - self.builder_config.flags = (self.builder_config.flags) & ~(1 << int(flag)) + self.builder_config.flags = ( + self.builder_config.flags) & ~( + 1 << int(flag)) # Helper function to build a TRT engine from ONNX file def create_trt_engine(self): @@ -195,7 +214,9 @@ def create_trt_engine(self): # Calibrator for int8 preprocessed_data_dir = "build/preprocessed_data" - calib_image_dir = os.path.join(preprocessed_data_dir, "open-images-v6-mlperf/calibration/Retinanet/fp32") + calib_image_dir = os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf/calibration/Retinanet/fp32") self.calibrator = RetinanetEntropyCalibrator(data_dir=calib_image_dir, cache_file=self.cache_file, batch_size=10, max_batches=50, force_calibration=False, calib_data_map=G_OPENIMAGE_CALMAP_PATH) @@ -208,13 +229,15 @@ def create_trt_engine(self): else: raise Exception(f"{self.precision} not supported yet.") - self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + self.network = self.builder.create_network( + 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) model = onnx.load(self.onnx_path) parser = trt.OnnxParser(self.network, self.logger) success = parser.parse(onnx._serialize(model)) if not success: err_desc = parser.get_error(0).desc() - raise RuntimeError(f"Retinanet onnx model processing failed! Error: {err_desc}") + raise RuntimeError( + f"Retinanet onnx model processing failed! Error: {err_desc}") # Set the network input type if self.precision == "int8": @@ -236,7 +259,8 @@ def create_trt_engine(self): min_shape[0] = 1 max_shape = trt.Dims(input_shape) max_shape[0] = self.batch_size - profile.set_shape(input_name, min_shape, max_shape, max_shape) + profile.set_shape( + input_name, min_shape, max_shape, max_shape) if not profile: raise RuntimeError("Invalid optimization profile!") self.builder_config.add_optimization_profile(profile) @@ -250,7 +274,8 @@ def create_trt_engine(self): engine = self.builder.build_engine(self.network, self.builder_config) engine_inspector = engine.create_engine_inspector() - layer_info = engine_inspector.get_engine_information(trt.LayerInformationFormat.ONELINE) + layer_info = engine_inspector.get_engine_information( + trt.LayerInformationFormat.ONELINE) logging.info("========= TensorRT Engine Layer Information =========") logging.info(layer_info) @@ -264,14 +289,16 @@ def run_openimage(self, num_samples=8): image_ids = cocoGt.getImgIds() cat_ids = cocoGt.getCatIds() num_images = min(num_samples, len(image_ids)) - print(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + print( + f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") detections = [] batch_idx = 0 for image_idx in range(0, num_images, self.batch_size): # Print Progress if batch_idx % 20 == 0: - print(f"Processing batch: {batch_idx} image: {image_idx}/{num_images}") + print( + f"Processing batch: {batch_idx} image: {image_idx}/{num_images}") end_idx = min(image_idx + self.batch_size, num_images) imgs = [] @@ -279,14 +306,24 @@ def run_openimage(self, num_samples=8): for idx in range(image_idx, end_idx): image_id = image_ids[idx] if self.precision == "fp32": - # Load the image using pytorch routine, but perform extra resize+normalize steps - img = load_img_pytorch(os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]), do_transform=True).numpy() + # Load the image using pytorch routine, but perform extra + # resize+normalize steps + img = load_img_pytorch( + os.path.join( + self.image_dir, + cocoGt.imgs[image_id]["file_name"]), + do_transform=True).numpy() elif self.precision == "int8": - img = np.load(os.path.join(G_OPENIMAGE_PREPROCESSED_INT8_PATH, cocoGt.imgs[image_id]["file_name"] + '.npy')) + img = np.load( + os.path.join( + G_OPENIMAGE_PREPROCESSED_INT8_PATH, + cocoGt.imgs[image_id]["file_name"] + + '.npy')) else: raise Exception(f"Unsupported precision {self.precision}") imgs.append(img) - img_original_sizes.append([cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]]) + img_original_sizes.append( + [cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]]) if self.precision == "fp32": imgs = np.ascontiguousarray(np.stack(imgs), dtype=np.float32) @@ -298,7 +335,8 @@ def run_openimage(self, num_samples=8): if self.verbose: duration = time.time() - start_time - logging.info(f"Batch {batch_idx} >>> Inference time: {duration}") + logging.info( + f"Batch {batch_idx} >>> Inference time: {duration}") # Concatted outputs is in the shape of [BS, 7001] # image_ids (duplicate of score for loadgen): [BS, 1000, 1] @@ -318,7 +356,14 @@ def run_openimage(self, num_samples=8): for prediction_idx in range(0, keep_count): # Each detection is in the order of [dummy_image_idx, xmin, ymin, xmax, ymax, score, label] # This is pre-callback (otherwise x and y are swapped). - single_detection = concat_output[idx * 7001 + prediction_idx * 7: idx * 7001 + prediction_idx * 7 + 7] + single_detection = concat_output[idx * + 7001 + + prediction_idx * + 7: idx * + 7001 + + prediction_idx * + 7 + + 7] loc = single_detection[1:5] label = single_detection[6] score = single_detection[5] @@ -371,8 +416,10 @@ class PytorchTester: To run this tester, you would need to clone the repo, and mount it to the container. """ - def __init__(self, pyt_ckpt_path, training_repo_path, batch_size=8, output_file="build/retinanet_pytorch.out"): - ssd_model_path = os.path.join(training_repo_path, "single_stage_detector", "ssd") + def __init__(self, pyt_ckpt_path, training_repo_path, + batch_size=8, output_file="build/retinanet_pytorch.out"): + ssd_model_path = os.path.join( + training_repo_path, "single_stage_detector", "ssd") with ScopedRestrictedImport([ssd_model_path] + sys.path): from model.retinanet import retinanet_from_backbone pyt_model = retinanet_from_backbone( @@ -409,7 +456,8 @@ def run_openimage(self, num_samples=8): image_ids = cocoGt.getImgIds() cat_ids = cocoGt.getCatIds() num_images = min(num_samples, len(image_ids)) - print(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + print( + f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") coco_detections = [] for image_idx in range(0, num_images, self.batch_size): @@ -418,7 +466,8 @@ def run_openimage(self, num_samples=8): imgs = [] for idx in range(image_idx, end_idx): image_id = image_ids[idx] - image_path = os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]) + image_path = os.path.join( + self.image_dir, cocoGt.imgs[image_id]["file_name"]) img = load_img_pytorch(image_path).to(self.device) imgs.append(img) # print(cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]) @@ -426,7 +475,11 @@ def run_openimage(self, num_samples=8): img = [] for idx in range(image_idx, end_idx): image_id = image_ids[idx] - tensor = load_img_pytorch(os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]), do_transform=True).numpy() + tensor = load_img_pytorch( + os.path.join( + self.image_dir, + cocoGt.imgs[image_id]["file_name"]), + do_transform=True).numpy() print(tensor.shape) img.append(tensor) img = np.ascontiguousarray(np.stack(img), dtype=np.float32) @@ -445,7 +498,8 @@ def run_openimage(self, num_samples=8): # Convert from lrtb to [xmin, ymin, w, h] for cocoeval box_pred = boxes[pred_idx][:] xmin, ymin, xmax, ymax = box_pred - box_pred = np.array([xmin, ymin, xmax - xmin, ymax - ymin], dtype=np.float32) + box_pred = np.array( + [xmin, ymin, xmax - xmin, ymax - ymin], dtype=np.float32) score_pred = float(scores[pred_idx]) label_pred = int(labels[pred_idx]) coco_detection = { @@ -526,24 +580,40 @@ def main(): # Pytorch Tester if args.pytorch: # TODO: Check existence of training repo. - logging.info(f"Running Accuracy test for Pytorch reference implementation.") - if args.training_repo_path is None or not os.path.exists(args.training_repo_path): - raise RuntimeError("Please pull mlcommon training repo from https://github.com/mlcommons/training, and specify with --training_repo_path") - pt_tester = PytorchTester(args.pyt_ckpt_path, args.training_repo_path, args.batch_size) + logging.info( + f"Running Accuracy test for Pytorch reference implementation.") + if args.training_repo_path is None or not os.path.exists( + args.training_repo_path): + raise RuntimeError( + "Please pull mlcommon training repo from https://github.com/mlcommons/training, and specify with --training_repo_path") + pt_tester = PytorchTester( + args.pyt_ckpt_path, + args.training_repo_path, + args.batch_size) pt_acc = pt_tester.run_openimage(args.num_samples) - logging.info(f"Pytorch mAP Score: {pt_acc}, Reference: 0.375, % of ref: {pt_acc / 0.375}") + logging.info( + f"Pytorch mAP Score: {pt_acc}, Reference: 0.375, % of ref: {pt_acc / 0.375}") else: # TRT Tester - logging.info(f"Running accuracy test for retinanet using {args.engine_file} ...") - tester = TRTTester(args.engine_file, args.batch_size, args.trt_precision, args.onnx_path, args.skip_engine_build, args.verbose) + logging.info( + f"Running accuracy test for retinanet using {args.engine_file} ...") + tester = TRTTester( + args.engine_file, + args.batch_size, + args.trt_precision, + args.onnx_path, + args.skip_engine_build, + args.verbose) # acc = tester.run_openimage(args.num_samples) acc = tester.run_openimage(args.num_samples) - logging.info(f"mAP Score: {acc}, Reference: 0.375, % of ref: {acc / 0.375}") + logging.info( + f"mAP Score: {acc}, Reference: 0.375, % of ref: {acc / 0.375}") # To run the TRT tester: # python3 -m code.retinanet.tensorrt.infer --engine_file /tmp/retina.b8.int8.engine --num_samples=1200 --batch_size=8 --trt_precision int8 # To run the pytorch tester: - # python3 -m code.retinanet.tensorrt.infer --pytorch --num_samples=1200 --batch_size=8 + # python3 -m code.retinanet.tensorrt.infer --pytorch --num_samples=1200 + # --batch_size=8 if __name__ == "__main__": diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 354fb4afdb..a09fd9715e 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -2,26 +2,30 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if str(env.get('CM_RUN_STATE_DOCKER', '')).lower() in ['1', 'true', 'yes']: return {'return': 0} if env.get('CM_MODEL', '') == '': - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] if env.get('CM_MLPERF_DEVICE', '') == '': - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - if env.get('CM_MLPERF_SKIP_RUN', '') == "yes" and make_command == "run_harness": + if env.get('CM_MLPERF_SKIP_RUN', + '') == "yes" and make_command == "run_harness": return {'return': 0} env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] @@ -36,42 +40,66 @@ def preprocess(i): cmds.append(f"make prebuild NETWORK_NODE=SUT") if env['CM_MODEL'] == "resnet50": - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'imagenet') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'imagenet') if not os.path.exists(target_data_path): - cmds.append(f"ln -sf {env['CM_DATASET_IMAGENET_PATH']} {target_data_path}") + cmds.append( + f"ln -sf {env['CM_DATASET_IMAGENET_PATH']} {target_data_path}") - model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'ResNet50', 'resnet50_v1.onnx') + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'ResNet50', + 'resnet50_v1.onnx') if not os.path.exists(os.path.dirname(model_path)): cmds.append(f"mkdir -p {os.path.dirname(model_path)}") if not os.path.exists(model_path): - cmds.append(f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + cmds.append( + f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") model_name = "resnet50" elif "bert" in env['CM_MODEL']: - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'squad') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'squad') if not os.path.exists(target_data_path): cmds.append("make download_data BENCHMARKS='bert'") - fp32_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'bert_large_v1_1.onnx') - int8_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'bert_large_v1_1_fake_quant.onnx') - vocab_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'vocab.txt') + fp32_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'bert_large_v1_1.onnx') + int8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'bert_large_v1_1_fake_quant.onnx') + vocab_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'vocab.txt') if not os.path.exists(os.path.dirname(fp32_model_path)): cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") if not os.path.exists(fp32_model_path): - cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}") + cmds.append( + f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}") if not os.path.exists(int8_model_path): - cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}") + cmds.append( + f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}") if not os.path.exists(vocab_path): - cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}") + cmds.append( + f"ln -sf {env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}") model_name = "bert" model_path = fp32_model_path elif "stable-diffusion" in env["CM_MODEL"]: - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'coco', 'SDXL') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'coco', 'SDXL') tsv_file = os.path.join(target_data_path, "captions_5k_final.tsv") if os.path.exists(tsv_file): with open(tsv_file, "r") as file: @@ -81,11 +109,19 @@ def preprocess(i): shutil.rmtree(target_data_path) if not os.path.exists(tsv_file): os.makedirs(target_data_path, exist_ok=True) - #cmds.append("make download_data BENCHMARKS='stable-diffusion-xl'") + # cmds.append("make download_data BENCHMARKS='stable-diffusion-xl'") env['CM_REQUIRE_COCO2014_DOWNLOAD'] = 'yes' - cmds.append(f"cp -r \$CM_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv" ) - cmds.append(f"cp -r \$CM_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt" ) - fp16_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'SDXL', 'official_pytorch', 'fp16', 'stable_diffusion_fp16') + cmds.append( + f"cp -r \\$CM_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv") + cmds.append( + f"cp -r \\$CM_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt") + fp16_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'SDXL', + 'official_pytorch', + 'fp16', + 'stable_diffusion_fp16') if not os.path.exists(os.path.dirname(fp16_model_path)): cmds.append(f"mkdir -p {os.path.dirname(fp16_model_path)}") @@ -94,102 +130,165 @@ def preprocess(i): if os.path.islink(fp16_model_path): cmds.append(f"rm -f {fp16_model_path}") env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' - cmds.append(f"cp -r \$SDXL_CHECKPOINT_PATH {fp16_model_path}") + cmds.append(f"cp -r \\$SDXL_CHECKPOINT_PATH {fp16_model_path}") model_name = "stable-diffusion-xl" model_path = fp16_model_path elif "3d-unet" in env['CM_MODEL']: - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'kits19', 'data') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'KiTS19', + 'kits19', + 'data') target_data_path_base_dir = os.path.dirname(target_data_path) if not os.path.exists(target_data_path_base_dir): cmds.append(f"mkdir -p {target_data_path_base_dir}") - inference_cases_json_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'inference_cases.json') - calibration_cases_json_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'calibration_cases.json') + inference_cases_json_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'inference_cases.json') + calibration_cases_json_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'calibration_cases.json') - if not os.path.exists(target_data_path) or not os.path.exists(inference_cases_json_path) or not os.path.exists(calibration_cases_json_path): - #cmds.append(f"ln -sf {env['CM_DATASET_PATH']} {target_data_path}") + if not os.path.exists(target_data_path) or not os.path.exists( + inference_cases_json_path) or not os.path.exists(calibration_cases_json_path): + # cmds.append(f"ln -sf {env['CM_DATASET_PATH']} {target_data_path}") cmds.append("make download_data BENCHMARKS='3d-unet'") - model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', '3d-unet-kits19', '3dUNetKiTS19.onnx') + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + '3d-unet-kits19', + '3dUNetKiTS19.onnx') model_name = "3d-unet" elif "rnnt" in env['CM_MODEL']: - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'LibriSpeech', 'dev-clean') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'LibriSpeech', + 'dev-clean') target_data_path_base_dir = os.path.dirname(target_data_path) if not os.path.exists(target_data_path_base_dir): cmds.append(f"mkdir -p {target_data_path_base_dir}") if not os.path.exists(target_data_path): - #cmds.append(f"ln -sf {env['CM_DATASET_LIBRISPEECH_PATH']} {target_data_path}") + # cmds.append(f"ln -sf {env['CM_DATASET_LIBRISPEECH_PATH']} {target_data_path}") cmds.append("make download_data BENCHMARKS='rnnt'") - model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'rnn-t', 'DistributedDataParallel_1576581068.9962234-epoch-100.pt') + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'rnn-t', + 'DistributedDataParallel_1576581068.9962234-epoch-100.pt') model_name = "rnnt" elif "pdlrm" in env['CM_MODEL']: - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'criteo') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'criteo') if not os.path.exists(target_data_path): - cmds.append(f"ln -sf {env['CM_DATASET_PREPROCESSED_PATH']} {target_data_path}") - - model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'dlrm', 'tb00_40M.pt') + cmds.append( + f"ln -sf {env['CM_DATASET_PREPROCESSED_PATH']} {target_data_path}") + + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'dlrm', + 'tb00_40M.pt') if not os.path.exists(os.path.dirname(model_path)): cmds.append(f"mkdir -p {os.path.dirname(model_path)}") if not os.path.exists(model_path): - cmds.append(f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + cmds.append( + f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") model_name = "dlrm" elif "dlrm-v2" in env['CM_MODEL']: model_name = "dlrm-v2" elif env['CM_MODEL'] == "retinanet": - #print(env) + # print(env) dataset_path = env['CM_DATASET_OPENIMAGES_PATH'] - #return {'return': 1, 'error': 'error'} + # return {'return': 1, 'error': 'error'} annotations_path = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] - target_data_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf') + target_data_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf') if not os.path.exists(target_data_path_dir): cmds.append(f"mkdir -p {target_data_path_dir}") target_data_path = os.path.join(target_data_path_dir, 'annotations') if not os.path.exists(target_data_path): cmds.append(f"ln -sf {annotations_path} {target_data_path}") - target_data_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf', 'validation') + target_data_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'open-images-v6-mlperf', + 'validation') if not os.path.exists(target_data_path_dir): cmds.append(f"mkdir -p {target_data_path_dir}") target_data_path = os.path.join(target_data_path_dir, 'data') if not os.path.exists(target_data_path): cmds.append(f"ln -sf {dataset_path} {target_data_path}") - calibration_dataset_path=env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] - target_data_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf','calibration', 'train') + calibration_dataset_path = env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] + target_data_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'open-images-v6-mlperf', + 'calibration', + 'train') if not os.path.exists(target_data_path_dir): cmds.append(f"mkdir -p {target_data_path_dir}") target_data_path = os.path.join(target_data_path_dir, 'data') if not os.path.exists(target_data_path): - cmds.append(f"ln -sf {calibration_dataset_path} {target_data_path}") - - preprocessed_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data') - target_model_path_dir = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'retinanet-resnext50-32x4d') + cmds.append( + f"ln -sf {calibration_dataset_path} {target_data_path}") + + preprocessed_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'preprocessed_data') + target_model_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'retinanet-resnext50-32x4d') if not os.path.exists(target_model_path_dir): cmds.append(f"mkdir -p {target_model_path_dir}") - model_path = os.path.join(target_model_path_dir, 'retinanet-fpn-torch2.1-postprocessed.onnx') - alt_model_path = os.path.join(target_model_path_dir, 'retinanet-fpn-torch2.2-postprocessed.onnx') + model_path = os.path.join( + target_model_path_dir, + 'retinanet-fpn-torch2.1-postprocessed.onnx') + alt_model_path = os.path.join( + target_model_path_dir, + 'retinanet-fpn-torch2.2-postprocessed.onnx') if not os.path.exists(model_path) and os.path.exists(alt_model_path): cmds.append(f"ln -s {alt_model_path} {model_path}") model_name = "retinanet" elif "gptj" in env['CM_MODEL']: - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'cnn-daily-mail', 'cnn_eval.json') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'cnn-daily-mail', + 'cnn_eval.json') if not os.path.exists(target_data_path): cmds.append("make download_data BENCHMARKS='gptj'") - fp32_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'GPTJ-6B', 'checkpoint-final') - fp8_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'GPTJ-6B', 'fp8-quantized-ammo', env['CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX']) - vocab_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'vocab.txt') + fp32_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'GPTJ-6B', + 'checkpoint-final') + fp8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'GPTJ-6B', + 'fp8-quantized-ammo', + env['CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX']) + vocab_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'vocab.txt') if not os.path.exists(os.path.dirname(fp32_model_path)): cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") @@ -197,74 +296,114 @@ def preprocess(i): cmds.append(f"mkdir -p {os.path.dirname(fp8_model_path)}") if not os.path.exists(fp32_model_path): - env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' # download via prehook_deps + # download via prehook_deps + env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' if make_command == "build_engine": - cmds.append(f"cp -r $CM_ML_MODEL_FILE_WITH_PATH {fp32_model_path}") + cmds.append( + f"cp -r $CM_ML_MODEL_FILE_WITH_PATH {fp32_model_path}") model_name = "gptj" model_path = fp8_model_path elif "llama2" in env["CM_MODEL"]: # path to which the data file is present - target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca') + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'preprocessed_data', + 'open_orca') # path to the dataset file - target_data_file_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca','open_orca_gpt4_tokenized_llama.sampled_24576.pkl') + target_data_file_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'preprocessed_data', + 'open_orca', + 'open_orca_gpt4_tokenized_llama.sampled_24576.pkl') tmp_tp_size = env['CM_NVIDIA_TP_SIZE'] if tmp_tp_size == "1": - fp8_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'],'models','Llama2','fp8-quantized-ammo',f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8-02072024') + fp8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'Llama2', + 'fp8-quantized-ammo', + f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8-02072024') else: - fp8_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'],'models','Llama2','fp8-quantized-ammo',f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8') + fp8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'Llama2', + 'fp8-quantized-ammo', + f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8') if not os.path.exists(target_data_file_path): if env.get('CM_NVIDIA_LLAMA_DATASET_FILE_PATH', '') == '': - return {'return': 1, 'error': 'Please specify the path to LLAMA2 dataset (pickle file)'} + return { + 'return': 1, 'error': 'Please specify the path to LLAMA2 dataset (pickle file)'} if not os.path.exists(target_data_path): cmds.append(f"mkdir {target_data_path}") - cmds.append(f"ln -sf {env['CM_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}") - - + cmds.append( + f"ln -sf {env['CM_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}") model_name = "llama2-70b" model_path = fp8_model_path - #cmds.append(f"make prebuild") + # cmds.append(f"make prebuild") if make_command == "download_model": if not os.path.exists(model_path): if "llama2" in env['CM_MODEL']: if not os.path.exists(os.path.join(model_path, 'config.json')): - return {'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'} + return { + 'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'} else: cmds.append(f"make download_model BENCHMARKS='{model_name}'") elif "stable-diffusion" in env['CM_MODEL']: folders = ["clip1", "clip2", "unetxl", "vae"] for folder in folders: - onnx_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'SDXL', 'onnx_models', folder, 'model.onnx') + onnx_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'SDXL', + 'onnx_models', + folder, + 'model.onnx') if not os.path.exists(onnx_model_path): env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' - cmds.append(f"make download_model BENCHMARKS='{model_name}'") + cmds.append( + f"make download_model BENCHMARKS='{model_name}'") break if scenario.lower() == "singlestream": - ammo_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'SDXL', 'ammo_models', 'unetxl.int8', 'unet.onnx') + ammo_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'SDXL', + 'ammo_models', + 'unetxl.int8', + 'unet.onnx') if not os.path.exists(ammo_model_path): env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' - cmds.append(f"make download_model BENCHMARKS='{model_name}'") + cmds.append( + f"make download_model BENCHMARKS='{model_name}'") else: - return {'return':0} + return {'return': 0} elif make_command == "preprocess_data": if env['CM_MODEL'] == "rnnt": - cmds.append(f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}") - cmds.append(f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}") + cmds.append( + f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}") + cmds.append( + f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}") if "llama2" in env["CM_MODEL"]: # Preprocessing script in the inference results repo is not checking whether the preprocessed # file is already there, so we are handling it here. - target_preprocessed_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca', 'input_ids_padded.npy') + target_preprocessed_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'preprocessed_data', + 'open_orca', + 'input_ids_padded.npy') if not os.path.exists(target_preprocessed_data_path): cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") else: cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") else: - scenario=scenario.lower() + scenario = scenario.lower() if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": test_mode = "AccuracyOnly" @@ -272,11 +411,15 @@ def preprocess(i): test_mode = "PerformanceOnly" elif env['CM_MLPERF_LOADGEN_MODE'] == "compliance": test_mode = "" - test_name = env.get('CM_MLPERF_LOADGEN_COMPLIANCE_TEST', 'test01').lower() - env['CM_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format(test_name) + test_name = env.get( + 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST', + 'test01').lower() + env['CM_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format( + test_name) make_command = "run_audit_{}_once".format(test_name) else: - return {'return': 1, 'error': 'Unsupported mode: {}'.format(env['CM_MLPERF_LOADGEN_MODE'])} + return {'return': 1, 'error': 'Unsupported mode: {}'.format( + env['CM_MLPERF_LOADGEN_MODE'])} run_config = '' @@ -298,8 +441,10 @@ def preprocess(i): run_config += f" --server_target_qps={server_target_qps}" target_latency = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') - singlestream_target_latency = env.get('CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY') - multistream_target_latency = env.get('CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY') + singlestream_target_latency = env.get( + 'CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY') + multistream_target_latency = env.get( + 'CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY') if target_latency: target_latency_ns = int(float(target_latency) * 1000000) if scenario == "singlestream" and not singlestream_target_latency: @@ -308,10 +453,12 @@ def preprocess(i): run_config += f" --multi_stream_expected_latency_ns={target_latency_ns}" if singlestream_target_latency: - singlestream_target_latency_ns = int(float(singlestream_target_latency) * 1000000) + singlestream_target_latency_ns = int( + float(singlestream_target_latency) * 1000000) run_config += f" --single_stream_expected_latency_ns={singlestream_target_latency_ns}" if multistream_target_latency: - multistream_target_latency_ns = int(float(multistream_target_latency) * 1000000) + multistream_target_latency_ns = int( + float(multistream_target_latency) * 1000000) run_config += f" --multi_stream_expected_latency_ns={multistream_target_latency_ns}" high_accuracy = "99.9" in env['CM_MODEL'] @@ -320,20 +467,20 @@ def preprocess(i): use_lon = env.get('CM_MLPERF_NVIDIA_HARNESS_LON') if use_lon: - config_ver_list.append( "lon_node") - #run_config += " --lon_node" + config_ver_list.append("lon_node") + # run_config += " --lon_node" maxq = env.get('CM_MLPERF_NVIDIA_HARNESS_MAXQ') if maxq: - config_ver_list.append( "maxq") + config_ver_list.append("maxq") if high_accuracy: - config_ver_list.append( "high_accuracy") + config_ver_list.append("high_accuracy") use_triton = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_TRITON') if use_triton: run_config += " --use_triton " - config_ver_list.append( "triton") + config_ver_list.append("triton") if config_ver_list: run_config += f" --config_ver={'_'.join(config_ver_list)}" @@ -354,7 +501,8 @@ def preprocess(i): if gpu_copy_streams: run_config += f" --gpu_copy_streams={gpu_copy_streams}" - gpu_inference_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS') + gpu_inference_streams = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS') if gpu_inference_streams: run_config += f" --gpu_inference_streams={gpu_inference_streams}" @@ -362,7 +510,8 @@ def preprocess(i): if dla_copy_streams: run_config += f" --dla_copy_streams={dla_copy_streams}" - dla_inference_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS') + dla_inference_streams = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS') if dla_inference_streams: run_config += f" --dla_inference_streams={dla_inference_streams}" @@ -378,7 +527,8 @@ def preprocess(i): if input_format: run_config += f" --input_format={input_format}" - performance_sample_count = env.get('CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT') + performance_sample_count = env.get( + 'CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT') if performance_sample_count: run_config += f" --performance_sample_count={performance_sample_count}" @@ -390,12 +540,16 @@ def preprocess(i): if audio_batch_size: run_config += f" --audio_batch_size={audio_batch_size}" - disable_encoder_plugin = str(env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', '')) - if disable_encoder_plugin and disable_encoder_plugin.lower() not in [ "no", "false", "0", "" ]: + disable_encoder_plugin = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', '')) + if disable_encoder_plugin and disable_encoder_plugin.lower() not in [ + "no", "false", "0", ""]: run_config += " --disable_encoder_plugin" - disable_beta1_smallk = str(env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', '')) - if disable_beta1_smallk and disable_beta1_smallk.lower() in [ "yes", "true", "1" ]: + disable_beta1_smallk = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', '')) + if disable_beta1_smallk and disable_beta1_smallk.lower() in [ + "yes", "true", "1"]: run_config += " --disable_beta1_smallk" workspace_size = env.get('CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE') @@ -410,42 +564,59 @@ def preprocess(i): run_config += f" --log_dir={log_dir}" use_graphs = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS', '')) - if use_graphs and use_graphs.lower() not in [ "no", "false", "0", "" ]: + if use_graphs and use_graphs.lower() not in ["no", "false", "0", ""]: run_config += " --use_graphs" - use_deque_limit = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT')) - if use_deque_limit and use_deque_limit.lower() not in [ "no", "false", "0" ]: + use_deque_limit = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT')) + if use_deque_limit and use_deque_limit.lower() not in [ + "no", "false", "0"]: run_config += " --use_deque_limit" - deque_timeout_usec = env.get('CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC') + deque_timeout_usec = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC') if deque_timeout_usec: run_config += f" --deque_timeout_usec={deque_timeout_usec}" - use_cuda_thread_per_device = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', '')) - if use_cuda_thread_per_device and use_cuda_thread_per_device.lower() not in [ "no", "false", "0", "" ]: + use_cuda_thread_per_device = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', '')) + if use_cuda_thread_per_device and use_cuda_thread_per_device.lower() not in [ + "no", "false", "0", ""]: run_config += " --use_cuda_thread_per_device" - run_infer_on_copy_streams = str(env.get('CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) - if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ "no", "false", "0", "" ]: + run_infer_on_copy_streams = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) + if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ + "no", "false", "0", ""]: run_config += " --run_infer_on_copy_streams" - start_from_device = str(env.get('CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE', '')) - if start_from_device and start_from_device.lower() not in [ "no", "false", "0", "" ]: + start_from_device = str( + env.get( + 'CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE', + '')) + if start_from_device and start_from_device.lower() not in [ + "no", "false", "0", ""]: run_config += " --start_from_device" - end_on_device = str(env.get('CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE', '')) - if end_on_device and end_on_device.lower() not in [ "no", "false", "0", "" ]: + end_on_device = str( + env.get( + 'CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE', + '')) + if end_on_device and end_on_device.lower() not in [ + "no", "false", "0", ""]: run_config += " --end_on_device" max_dlas = env.get('CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS') if max_dlas: run_config += f" --max_dlas={max_dlas}" - graphs_max_seqlen = env.get('CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN') + graphs_max_seqlen = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN') if graphs_max_seqlen: run_config += f" --graphs_max_seqlen={graphs_max_seqlen}" - num_issue_query_threads = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS') + num_issue_query_threads = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS') if num_issue_query_threads: run_config += f" --num_issue_query_threads={num_issue_query_threads}" @@ -453,16 +624,19 @@ def preprocess(i): if soft_drop: run_config += f" --soft_drop={soft_drop}" - use_small_tile_gemm_plugin = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', '')) - if use_small_tile_gemm_plugin and use_small_tile_gemm_plugin.lower() not in [ "no", "false", "0", "" ]: + use_small_tile_gemm_plugin = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', '')) + if use_small_tile_gemm_plugin and use_small_tile_gemm_plugin.lower() not in [ + "no", "false", "0", ""]: run_config += f" --use_small_tile_gemm_plugin" - audio_buffer_num_lines = env.get('CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES') + audio_buffer_num_lines = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES') if audio_buffer_num_lines: run_config += f" --audio_buffer_num_lines={audio_buffer_num_lines}" use_fp8 = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_FP8', '')) - if use_fp8 and use_fp8.lower() not in [ "no", "false", "0", "" ]: + if use_fp8 and use_fp8.lower() not in ["no", "false", "0", ""]: run_config += f" --use_fp8" if "llama2" in env["CM_MODEL"]: @@ -470,18 +644,21 @@ def preprocess(i): run_config += f" --tensor_parallelism={tmp_tp_size}" enable_sort = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') - if enable_sort and enable_sort.lower() not in [ "no", "false", "0" ]: + if enable_sort and enable_sort.lower() not in ["no", "false", "0"]: run_config += f" --enable_sort" - sdxl_server_batcher_time_limit = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') + sdxl_server_batcher_time_limit = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') if sdxl_server_batcher_time_limit: run_config += f" --sdxl_batcher_time_limit {sdxl_server_batcher_time_limit}" - num_sort_segments = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS') + num_sort_segments = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS') if num_sort_segments: run_config += f" --num_sort_segments={num_sort_segments}" - embedding_weights_on_gpu_part = env.get('CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '') + embedding_weights_on_gpu_part = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '') if embedding_weights_on_gpu_part != '': run_config += f" --embedding_weights_on_gpu_part={embedding_weights_on_gpu_part}" @@ -489,8 +666,12 @@ def preprocess(i): if num_warmups != '': run_config += f" --num_warmups={num_warmups}" - skip_postprocess = str(env.get('CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS', '')) - if skip_postprocess and skip_postprocess.lower() not in [ "no", "false", "0", "" ]: + skip_postprocess = str( + env.get( + 'CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS', + '')) + if skip_postprocess and skip_postprocess.lower() not in [ + "no", "false", "0", ""]: run_config += f" --skip_postprocess" if test_mode: @@ -498,9 +679,12 @@ def preprocess(i): else: test_mode_string = "" - extra_build_engine_options_string = env.get('CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '') + extra_build_engine_options_string = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '') - extra_run_options_string = env.get('CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS', '') #will be ignored during build engine + extra_run_options_string = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS', + '') # will be ignored during build engine if "stable-diffusion" in env["CM_MODEL"]: extra_build_engine_options_string += f" --model_path {os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'SDXL/')}" @@ -516,11 +700,12 @@ def preprocess(i): # print(env) - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-qualcomm/customize.py b/script/app-mlperf-inference-qualcomm/customize.py index e99e538dd0..53d0a44be7 100644 --- a/script/app-mlperf-inference-qualcomm/customize.py +++ b/script/app-mlperf-inference-qualcomm/customize.py @@ -2,23 +2,27 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} kilt_root = env['CM_KILT_CHECKOUT_PATH'] @@ -39,10 +43,11 @@ def preprocess(i): env['+ CXXFLAGS'] = [] if '+CPLUS_INCLUDE_PATH' not in env: - env['+CPLUS_INCLUDE_PATH'] = [] + env['+CPLUS_INCLUDE_PATH'] = [] if env['CM_MLPERF_DEVICE'] == "qaic": - env['kilt_model_root'] = os.path.dirname(env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']) + env['kilt_model_root'] = os.path.dirname( + env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']) if env.get('CM_MODEL') == "resnet50": env['dataset_imagenet_preprocessed_subset_fof'] = env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] @@ -50,14 +55,19 @@ def preprocess(i): elif "bert" in env.get('CM_MODEL'): env['dataset_squad_tokenized_max_seq_length'] = env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] - env['dataset_squad_tokenized_root'] = env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] - env['dataset_squad_tokenized_input_ids'] = os.path.basename(env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS']) - env['dataset_squad_tokenized_input_mask'] = os.path.basename(env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK']) - env['dataset_squad_tokenized_segment_ids'] = os.path.basename(env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS']) + env['dataset_squad_tokenized_root'] = env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] + env['dataset_squad_tokenized_input_ids'] = os.path.basename( + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS']) + env['dataset_squad_tokenized_input_mask'] = os.path.basename( + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK']) + env['dataset_squad_tokenized_segment_ids'] = os.path.basename( + env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS']) elif "retinanet" in env.get('CM_MODEL'): - env['kilt_prior_bin_path'] = os.path.join(kilt_root, "plugins", "nms-abp", "data") - env['kilt_object_detection_preprocessed_subset_fof'] = os.path.basename(env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST']) + env['kilt_prior_bin_path'] = os.path.join( + kilt_root, "plugins", "nms-abp", "data") + env['kilt_object_detection_preprocessed_subset_fof'] = os.path.basename( + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST']) env['kilt_object_detection_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] env['+ CXXFLAGS'].append("-DMODEL_RX50") env['+ CXXFLAGS'].append("-DSDK_1_11_X") @@ -66,46 +76,70 @@ def preprocess(i): if loc_offset: env['+ CXXFLAGS'].append("-DMODEL_RX50") - keys = [ 'LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE' ] + keys = ['LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE'] if env.get('CM_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes': env['+ CXXFLAGS'].append("-DUSE_MULTIPLE_SCALES_OFFSETS=1") - for j in range(0,4): + for j in range(0, 4): keys.append(f'LOC_OFFSET{j}') keys.append(f'LOC_SCALE{j}') keys.append(f'CONF_OFFSET{j}') keys.append(f'CONF_SCALE{j}') for key in keys: - value = env.get('CM_QAIC_MODEL_RETINANET_'+key, '') + value = env.get('CM_QAIC_MODEL_RETINANET_' + key, '') if value != '': env['+ CXXFLAGS'].append(f" -D{key}_={value} ") if env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_SERVER': - source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "server", "pack.cpp")) - source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "server", "server.cpp")) + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "network", + "bert", + "server", + "pack.cpp")) + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "network", + "bert", + "server", + "server.cpp")) env['+ CXXFLAGS'].append("-DNETWORK_DIVISION=1") elif env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_CLIENT': - #source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp")) - #env['+CPLUS_INCLUDE_PATH'].append(kilt_root) - #source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp")) + # source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp")) + # env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + # source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp")) env['+ CXXFLAGS'].append("-DNETWORK_DIVISION") elif env.get('CM_BENCHMARK', '') == 'STANDALONE_BERT': - source_files.append(os.path.join(kilt_root, "benchmarks", "standalone", "bert", "pack.cpp")) + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "standalone", + "bert", + "pack.cpp")) script_path = i['run_script_input']['path'] if env['CM_MODEL'] == "retinanet": env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): if file.endswith(".c") or file.endswith(".cpp"): source_files.append(file) if 'SERVER' not in env.get('CM_BENCHMARK', ''): - source_files.append(os.path.join(kilt_root, "benchmarks", "harness", "harness.cpp")) + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "harness", + "harness.cpp")) - #source_files.append(env['CM_QAIC_API_SRC_FILE']) + # source_files.append(env['CM_QAIC_API_SRC_FILE']) env['+CPLUS_INCLUDE_PATH'].append(kilt_root) env['+C_INCLUDE_PATH'].append(kilt_root) @@ -117,7 +151,14 @@ def preprocess(i): env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) elif env['CM_MLPERF_DEVICE'] == 'qaic': - source_files.append(os.path.join(kilt_root, "devices", "qaic", "api", "master", "QAicInfApi.cpp")) + source_files.append( + os.path.join( + kilt_root, + "devices", + "qaic", + "api", + "master", + "QAicInfApi.cpp")) print(f"Compiling the source files: {source_files}") env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) @@ -131,14 +172,16 @@ def preprocess(i): env['+ CXXFLAGS'].append("-DKILT_DEVICE_" + env['device'].upper()) # add preprocessor flag like "#define CM_MODEL_RESNET50" - #env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" - env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + env['CM_MLPERF_BACKEND'].upper()) + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + + env['CM_MLPERF_BACKEND'].upper()) # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" - env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + env['CM_MLPERF_DEVICE'].upper()) + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + + env['CM_MLPERF_DEVICE'].upper()) if '+ LDCXXFLAGS' not in env: - env['+ LDCXXFLAGS'] = [ ] + env['+ LDCXXFLAGS'] = [] env['+ LDCXXFLAGS'] += [ "-lmlperf_loadgen", @@ -147,7 +190,8 @@ def preprocess(i): ] # e.g. -lonnxruntime if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: - env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + env['+ LDCXXFLAGS'].append('-l' + + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) # e.g. -lcudart if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) @@ -159,13 +203,16 @@ def preprocess(i): env['CM_RUN_DIR'] = env.get('CM_MLPERF_OUTPUT_DIR', os.getcwd()) if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") if 'CM_MLPERF_USER_CONF' not in env: - env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF']# to LOADGEN_MLPERF_CONF - env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF']# to LOADGEN_USER_CONF + # to LOADGEN_MLPERF_CONF + env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF'] + # to LOADGEN_USER_CONF + env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF'] env['loadgen_scenario'] = env['CM_MLPERF_LOADGEN_SCENARIO'] loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] @@ -176,14 +223,14 @@ def preprocess(i): elif loadgen_mode == 'compliance': kilt_loadgen_mode = 'PerformanceOnly' else: - return {'return':1, 'error': 'Unknown loadgen mode'} + return {'return': 1, 'error': 'Unknown loadgen mode'} env['loadgen_mode'] = kilt_loadgen_mode + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference-redhat/customize.py b/script/app-mlperf-inference-redhat/customize.py index 522bafcb3a..add2a916f4 100644 --- a/script/app-mlperf-inference-redhat/customize.py +++ b/script/app-mlperf-inference-redhat/customize.py @@ -2,37 +2,42 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the backend'} + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} r = get_run_cmd(env['CM_MODEL'], i) if r['return'] > 0: return r run_cmd = r['run_cmd'] - run_dir = r ['run_dir'] + run_dir = r['run_dir'] print(run_cmd) print(run_dir) env['CM_MLPERF_RUN_CMD'] = run_cmd env['CM_RUN_DIR'] = run_dir env['CM_RUN_CMD'] = run_cmd - return {'return':0} - #return {'return':1, 'error': 'Run command needs to be tested'} + return {'return': 0} + # return {'return':1, 'error': 'Run command needs to be tested'} + def get_run_cmd(model, i): env = i['env'] @@ -54,7 +59,12 @@ def get_run_cmd(model, i): run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " submitter = "CTuning" - run_dir = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", "gptj-99") + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + "open", + submitter, + "code", + "gptj-99") return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} @@ -65,7 +75,9 @@ def get_run_cmd(model, i): outdir = env['CM_MLPERF_OUTPUT_DIR'] mlperf_conf_path = env['CM_MLPERF_CONF'] user_conf_path = env['CM_MLPERF_USER_CONF'] - api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost:8000/v1') + api_server = env.get( + 'CM_MLPERF_INFERENCE_API_SERVER', + 'localhost:8000/v1') api_model_name = env['CM_VLLM_SERVER_MODEL_NAME'] dataset_path = env['CM_DATASET_OPENORCA_PATH'] precision = env['CM_MLPERF_MODEL_PRECISION'] @@ -76,12 +88,18 @@ def get_run_cmd(model, i): run_cmd = f"python3 -u 'main.py' --scenario {scenario} --model-path {api_model_name} --api-model-name {api_model_name} --api-server {api_server} --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " submitter = "RedHat-Supermicro" - run_dir = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", model) + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + "open", + submitter, + "code", + model) return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-inference/build_dockerfiles.py b/script/app-mlperf-inference/build_dockerfiles.py index 4f5e7603ad..72aeaf766d 100644 --- a/script/app-mlperf-inference/build_dockerfiles.py +++ b/script/app-mlperf-inference/build_dockerfiles.py @@ -3,48 +3,48 @@ import pathlib current_file_path = pathlib.Path(__file__).parent.resolve() docker_os = { - "ubuntu": ["18.04","20.04","22.04"], - "rhel": ["9"] - } + "ubuntu": ["18.04", "20.04", "22.04"], + "rhel": ["9"] +} dataset = { - "resnet50": "imagenet", - "retinanet": "openimages", - "bert-99.9": "squad" + "resnet50": "imagenet", + "retinanet": "openimages", + "bert-99.9": "squad" +} +variations = { + "resnet50": { + "tensorflow": { + "cpu": ["python"] + }, + "onnxruntime": { + "cpu": ["python", "cpp"] + }, + "pytorch": { + "cpu": [] + } + }, + "retinanet": { + "tensorflow": { + }, + "onnxruntime": { + "cpu": ["python", "cpp"] + }, + "pytorch": { + "cpu": ["python"] } -variations = { - "resnet50": { - "tensorflow": { - "cpu": [ "python" ] - }, - "onnxruntime": { - "cpu": [ "python", "cpp" ] - }, - "pytorch": { - "cpu": [ ] - } + }, + "bert-99.9": { + "tensorflow": { + "cpu": ["python"] }, - "retinanet": { - "tensorflow": { - }, - "onnxruntime": { - "cpu": [ "python", "cpp" ] - }, - "pytorch": { - "cpu": [ "python" ] - } + "onnxruntime": { + "cpu": ["python"] }, - "bert-99.9": { - "tensorflow": { - "cpu": [ "python" ] - }, - "onnxruntime": { - "cpu": [ "python" ] - }, - "pytorch": { - "cpu": [] - } + "pytorch": { + "cpu": [] } } +} for _os in docker_os: for version in docker_os[_os]: @@ -52,46 +52,56 @@ for backend in variations[model]: for device in variations[model][backend]: for implementation in variations[model][backend][device]: - variation_string=",_"+model+",_"+backend+",_"+device+",_"+implementation - file_name_ext = "_" + implementation + "_" + backend+"_"+device - dockerfile_path = os.path.join(current_file_path,'dockerfiles', model, _os +'_'+version+ file_name_ext +'.Dockerfile') + variation_string = ",_" + model + ",_" + \ + backend + ",_" + device + ",_" + implementation + file_name_ext = "_" + implementation + "_" + backend + "_" + device + dockerfile_path = os.path.join( + current_file_path, + 'dockerfiles', + model, + _os + + '_' + + version + + file_name_ext + + '.Dockerfile') cm_input = {'action': 'run', - 'automation': 'script', - 'tags': 'app,mlperf,inference,generic'+variation_string, - 'adr': {'compiler': - {'tags': 'gcc'}, - 'inference-src': - {'tags': '_octoml'}, - 'openimages-preprocessed': - {'tags': '_50'} - }, - 'print_deps': True, - 'quiet': True, - 'silent': True, - 'fake_run': True - } + 'automation': 'script', + 'tags': 'app,mlperf,inference,generic' + variation_string, + 'adr': {'compiler': + {'tags': 'gcc'}, + 'inference-src': + {'tags': '_octoml'}, + 'openimages-preprocessed': + {'tags': '_50'} + }, + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True + } r = cmind.access(cm_input) print_deps = r['new_state']['print_deps'] - comments = [ "#RUN " + dep for dep in print_deps ] + comments = ["#RUN " + dep for dep in print_deps] comments.append("") - comments.append("# Run CM workflow for MLPerf inference") + comments.append( + "# Run CM workflow for MLPerf inference") cm_docker_input = {'action': 'run', - 'automation': 'script', - 'tags': 'build,dockerfile', - 'docker_os': _os, - 'docker_os_version': version, - 'file_path': dockerfile_path, - 'comments': comments, - 'run_cmd': 'cm run script --tags=app,mlperf,inference,generic'+variation_string+' --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml', - 'script_tags': 'app,mlperf,inference,generic', - 'quiet': True, - 'print_deps': True, - 'real_run': True - } + 'automation': 'script', + 'tags': 'build,dockerfile', + 'docker_os': _os, + 'docker_os_version': version, + 'file_path': dockerfile_path, + 'comments': comments, + 'run_cmd': 'cm run script --tags=app,mlperf,inference,generic' + variation_string + ' --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml', + 'script_tags': 'app,mlperf,inference,generic', + 'quiet': True, + 'print_deps': True, + 'real_run': True + } r = cmind.access(cm_docker_input) if r['return'] > 0: print(r) exit(1) - print ('') - print ("Dockerfile generated at " + dockerfile_path) + print('') + print("Dockerfile generated at " + dockerfile_path) diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 4178a1c506..41fd8570b9 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -10,7 +10,8 @@ import sys import mlperf_utils import re -from datetime import datetime,timezone +from datetime import datetime, timezone + def preprocess(i): @@ -18,27 +19,36 @@ def preprocess(i): state = i['state'] if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'nvidia': - if env.get('CM_NVIDIA_GPU_NAME', '') in [ "rtx_4090", "a100", "t4", "l4", "orin", "custom" ]: - env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + env['CM_NVIDIA_GPU_NAME'] + if env.get('CM_NVIDIA_GPU_NAME', '') in [ + "rtx_4090", "a100", "t4", "l4", "orin", "custom"]: + env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + \ + env['CM_NVIDIA_GPU_NAME'] env['CM_NVIDIA_GPU_MEMORY'] = '' else: - gpu_memory = i['state'].get('cm_cuda_device_prop','').get('Global memory') - gpu_memory_size = str(int((float(gpu_memory)/(1024*1024*1024) +7)/8) * 8) + gpu_memory = i['state'].get( + 'cm_cuda_device_prop', '').get('Global memory') + gpu_memory_size = str( + int((float(gpu_memory) / (1024 * 1024 * 1024) + 7) / 8) * 8) env['CM_NVIDIA_GPU_MEMORY'] = gpu_memory_size env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = '' if 'cmd' in i['input']: - state['mlperf_inference_run_cmd'] = "cm run script " + " ".join(i['input']['cmd']) + state['mlperf_inference_run_cmd'] = "cm run script " + \ + " ".join(i['input']['cmd']) state['mlperf-inference-implementation'] = {} run_state = i['run_script_input']['run_state'] - state['mlperf-inference-implementation']['script_id'] = run_state['script_id']+":"+",".join(run_state['script_variation_tags']) + state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ + ":" + ",".join(run_state['script_variation_tags']) + + if env.get('CM_VLLM_SERVER_MODEL_NAME', '') != '' and env.get( + 'CM_ML_MODEL_FULL_NAME', '') == '': + env['CM_ML_MODEL_FULL_NAME'] = env['CM_VLLM_SERVER_MODEL_NAME'].replace( + "/", "_") - if env.get('CM_VLLM_SERVER_MODEL_NAME', '') != '' and env.get('CM_ML_MODEL_FULL_NAME', '') == '': - env['CM_ML_MODEL_FULL_NAME'] = env['CM_VLLM_SERVER_MODEL_NAME'].replace("/", "_") + return {'return': 0} - return {'return':0} def postprocess(i): @@ -51,7 +61,7 @@ def postprocess(i): env['CMD'] = '' state = i['state'] - #if env.get('CM_MLPERF_USER_CONF', '') == '': + # if env.get('CM_MLPERF_USER_CONF', '') == '': # return {'return': 0} output_dir = env['CM_MLPERF_OUTPUT_DIR'] @@ -60,100 +70,133 @@ def postprocess(i): mode = env['CM_MLPERF_LOADGEN_MODE'] - if not os.path.exists(output_dir) or not os.path.exists(os.path.join(output_dir, "mlperf_log_summary.txt")): + if not os.path.exists(output_dir) or not os.path.exists( + os.path.join(output_dir, "mlperf_log_summary.txt")): # No output, fake_run? return {'return': 0} - #in power mode copy the log files from tmp_power directory + # in power mode copy the log files from tmp_power directory if env.get('CM_MLPERF_POWER', '') == "yes" and mode == "performance": - mlperf_power_logs_dir = os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "..", "power") - mlperf_ranging_logs_dir = os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "..", "ranging") + mlperf_power_logs_dir = os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], "..", "power") + mlperf_ranging_logs_dir = os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], "..", "ranging") - if os.path.exists(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "power")): + if os.path.exists(os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], "power")): if os.path.exists(mlperf_power_logs_dir): shutil.rmtree(mlperf_power_logs_dir) - shutil.copytree(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "power"), mlperf_power_logs_dir) - - if os.path.exists(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "ranging")): + shutil.copytree( + os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], + "power"), + mlperf_power_logs_dir) + + if os.path.exists(os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], "ranging")): if os.path.exists(mlperf_ranging_logs_dir): shutil.rmtree(mlperf_ranging_logs_dir) - shutil.copytree(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "ranging"), mlperf_ranging_logs_dir) - - if os.path.exists(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")): - shutil.copyfile(os.path.join(env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt"), os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "spl.txt")) + shutil.copytree( + os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], + "ranging"), + mlperf_ranging_logs_dir) + + if os.path.exists(os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")): + shutil.copyfile( + os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], + "run_1", + "spl.txt"), + os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], + "spl.txt")) model = env['CM_MODEL'] model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model) - if mode == "accuracy" or mode== "compliance" and env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": + if mode == "accuracy" or mode == "compliance" and env[ + 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": if model == "resnet50": accuracy_filename = "accuracy-imagenet.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ - accuracy_filename) + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + accuracy_filename) dataset_args = " --imagenet-val-file " + \ - os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") accuracy_log_file_option_name = " --mlperf-accuracy-file " - datatype_option = " --dtype "+env['CM_IMAGENET_ACCURACY_DTYPE'] + datatype_option = " --dtype " + env['CM_IMAGENET_ACCURACY_DTYPE'] elif model == "retinanet": accuracy_filename = "accuracy-openimages.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ - accuracy_filename) - dataset_args = " --openimages-dir " + os.getcwd() #just to make the script happy + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + accuracy_filename) + dataset_args = " --openimages-dir " + \ + os.getcwd() # just to make the script happy accuracy_log_file_option_name = " --mlperf-accuracy-file " datatype_option = "" elif 'bert' in model: accuracy_filename = "accuracy-squad.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) - dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + env['CM_DATASET_SQUAD_VOCAB_PATH'] + "' --out_file predictions.json " + accuracy_filepath = os.path.join( + env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) + dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + \ + env['CM_DATASET_SQUAD_VOCAB_PATH'] + \ + "' --out_file predictions.json " accuracy_log_file_option_name = " --log_file " - datatype_option = " --output_dtype "+env['CM_SQUAD_ACCURACY_DTYPE'] + datatype_option = " --output_dtype " + \ + env['CM_SQUAD_ACCURACY_DTYPE'] elif 'stable-diffusion-xl' in model: - pass #No compliance check for now + pass # No compliance check for now elif 'gpt' in model: - pass #No compliance check for now + pass # No compliance check for now elif 'llama2-70b' in model: - pass #No compliance check for now + pass # No compliance check for now elif 'mixtral-8x7b' in model: - pass #No compliance check for now + pass # No compliance check for now else: - pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test - #return {'return': 1, 'error': f'Accuracy paths not done for model {model}'} + pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test + # return {'return': 1, 'error': f'Accuracy paths not done for model + # {model}'} scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] if not state.get('cm-mlperf-inference-results'): state['cm-mlperf-inference-results'] = {} if not state.get('cm-mlperf-inference-results-last'): state['cm-mlperf-inference-results-last'] = {} - if not state['cm-mlperf-inference-results'].get(state['CM_SUT_CONFIG_NAME']): + if not state['cm-mlperf-inference-results'].get( + state['CM_SUT_CONFIG_NAME']): state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {} - if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']].get(model): + if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ].get(model): state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {} - if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model].get(scenario): - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario] = {} - + if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model].get(scenario): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario] = {} - #if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode == "performance" and scenario != "Server": + # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode == + # "performance" and scenario != "Server": if mode == "performance" and scenario != "Server": os.chdir(output_dir) if not os.path.exists("mlperf_log_summary.txt"): return {'return': 0} - if scenario in [ "Offline", "Server" ]: + if scenario in ["Offline", "Server"]: metric = "target_qps" elif scenario.endswith("Stream"): metric = "target_latency" else: - return {'return': 1, 'error': 'Unsupported scenario: {}'.format(scenario)} + return {'return': 1, + 'error': 'Unsupported scenario: {}'.format(scenario)} import re import yaml pattern = {} pattern["Offline"] = "Samples per second: (.*)\n" - pattern["SingleStream"] = "Mean latency \(ns\)\s*:(.*)" - pattern["MultiStream"] = "Mean latency \(ns\)\s*:(.*)" + pattern["SingleStream"] = "Mean latency \\(ns\\)\\s*:(.*)" + pattern["MultiStream"] = "Mean latency \\(ns\\)\\s*:(.*)" print("\n") with open("mlperf_log_summary.txt", "r") as fp: summary = fp.read() @@ -161,11 +204,12 @@ def postprocess(i): result = re.findall(pattern[scenario], summary) if not result: - return {'return': 1, 'error': f'No {metric} found in performance summary. Pattern checked "{pattern[metric]}"'} + return { + 'return': 1, 'error': f'No {metric} found in performance summary. Pattern checked "{pattern[metric]}"'} value = result[0].strip() - if "\(ns\)" in pattern[scenario]: - value = str(float(value)/1000000) #convert to milliseconds + if "\\(ns\\)" in pattern[scenario]: + value = str(float(value) / 1000000) # convert to milliseconds sut_name = state['CM_SUT_CONFIG_NAME'] sut_config = state['CM_SUT_CONFIG'][sut_name] @@ -174,24 +218,35 @@ def postprocess(i): sut_config[model_full_name][scenario] = {} sut_config[model_full_name][scenario][metric] = value - print(f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} updated as {value}") + print( + f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} updated as {value}") print(f"New config stored in {sut_config_path}") with open(sut_config_path, "w") as f: yaml.dump(sut_config, f) - - if mode in [ "performance", "accuracy" ]: - #if measurements file exist read it + if mode in ["performance", "accuracy"]: + # if measurements file exist read it if os.path.exists("measurements.json"): with open("measurements.json", "r") as file: measurements = json.load(file) # Load JSON data from the file else: measurements = {} - measurements['starting_weights_filename'] = env.get('CM_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get('CM_ML_MODEL_FILE', measurements.get('starting_weights_filename', ''))) - measurements['retraining'] = env.get('CM_ML_MODEL_RETRAINING', measurements.get('retraining', 'no')) - measurements['input_data_types'] = env.get('CM_ML_MODEL_INPUTS_DATA_TYPE', measurements.get('input_data_types', 'fp32')) - measurements['weight_data_types'] = env.get('CM_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get('weight_data_types', 'fp32')) - measurements['weight_transformations'] = env.get('CM_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get('weight_transformations', 'none')) + measurements['starting_weights_filename'] = env.get( + 'CM_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( + 'CM_ML_MODEL_FILE', measurements.get( + 'starting_weights_filename', ''))) + measurements['retraining'] = env.get( + 'CM_ML_MODEL_RETRAINING', measurements.get( + 'retraining', 'no')) + measurements['input_data_types'] = env.get( + 'CM_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( + 'input_data_types', 'fp32')) + measurements['weight_data_types'] = env.get( + 'CM_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( + 'weight_data_types', 'fp32')) + measurements['weight_transformations'] = env.get( + 'CM_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( + 'weight_transformations', 'none')) os.chdir(output_dir) @@ -201,20 +256,22 @@ def postprocess(i): mlperf_log_summary = '' if os.path.isfile("mlperf_log_summary.txt"): with open("mlperf_log_summary.txt", "r") as fp: - mlperf_log_summary=fp.read() + mlperf_log_summary = fp.read() - if mlperf_log_summary!='': - state['app_mlperf_inference_log_summary']={} + if mlperf_log_summary != '': + state['app_mlperf_inference_log_summary'] = {} for x in mlperf_log_summary.split('\n'): y = x.split(': ') - if len(y)==2: - state['app_mlperf_inference_log_summary'][y[0].strip().lower()]=y[1].strip() + if len(y) == 2: + state['app_mlperf_inference_log_summary'][y[0].strip().lower() + ] = y[1].strip() - if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ "no", "0", "false"]: + if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ + "no", "0", "false"]: print("\n") print(mlperf_log_summary) - with open ("measurements.json", "w") as fp: + with open("measurements.json", "w") as fp: json.dump(measurements, fp, indent=2) cm_sut_info = {} @@ -223,7 +280,7 @@ def postprocess(i): cm_sut_info['device'] = env['CM_MLPERF_DEVICE'] cm_sut_info['framework'] = state['CM_SUT_META']['framework'] cm_sut_info['run_config'] = env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] - with open(os.path.join(result_sut_folder_path,"cm-sut-info.json"), "w") as fp: + with open(os.path.join(result_sut_folder_path, "cm-sut-info.json"), "w") as fp: json.dump(cm_sut_info, fp, indent=2) system_meta = state['CM_SUT_META'] @@ -231,15 +288,16 @@ def postprocess(i): json.dump(system_meta, fp, indent=2) # map the custom model for inference result to the official model - # if custom model name is not set, the official model name will be mapped to itself + # if custom model name is not set, the official model name will be + # mapped to itself official_model_name = model model_mapping = {model_full_name: official_model_name} with open("model_mapping.json", "w") as fp: json.dump(model_mapping, fp, indent=2) - # Add to the state - state['app_mlperf_inference_measurements'] = copy.deepcopy(measurements) + state['app_mlperf_inference_measurements'] = copy.deepcopy( + measurements) if os.path.exists(env['CM_MLPERF_CONF']): shutil.copy(env['CM_MLPERF_CONF'], 'mlperf.conf') @@ -247,94 +305,110 @@ def postprocess(i): if os.path.exists(env['CM_MLPERF_USER_CONF']): shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf') - result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) + result, valid, power_result = mlperf_utils.get_result_from_log( + env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) power = None power_efficiency = None if power_result: power_result_split = power_result.split(",") - if len(power_result_split) == 2: #power and power efficiency + if len(power_result_split) == 2: # power and power efficiency power = power_result_split[0] power_efficiency = power_result_split[1] - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][mode] = result - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][mode+'_valid'] = valid.get(mode, False) + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) state['cm-mlperf-inference-results-last'][mode] = result - state['cm-mlperf-inference-results-last'][mode+'_valid'] = valid.get(mode, False) + state['cm-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) if power: - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario]['power'] = power - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario]['power_valid'] = valid['power'] + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario]['power'] = power + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario]['power_valid'] = valid['power'] state['cm-mlperf-inference-results-last']['power'] = power state['cm-mlperf-inference-results-last']['power_valid'] = valid['power'] if power_efficiency: - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario]['power_efficiency'] = power_efficiency + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario]['power_efficiency'] = power_efficiency state['cm-mlperf-inference-results-last']['power_efficiency'] = power_efficiency # Record basic host info host_info = { - "os_version":platform.platform(), - "cpu_version":platform.processor(), - "python_version":sys.version, - "cm_version":cm.__version__ + "os_version": platform.platform(), + "cpu_version": platform.processor(), + "python_version": sys.version, + "cm_version": cm.__version__ } x = '' - if env.get('CM_HOST_OS_FLAVOR','')!='': x+=env['CM_HOST_OS_FLAVOR'] - if env.get('CM_HOST_OS_VERSION','')!='': x+=' '+env['CM_HOST_OS_VERSION'] - if x!='': host_info['os_version_sys'] = x + if env.get('CM_HOST_OS_FLAVOR', '') != '': + x += env['CM_HOST_OS_FLAVOR'] + if env.get('CM_HOST_OS_VERSION', '') != '': + x += ' ' + env['CM_HOST_OS_VERSION'] + if x != '': + host_info['os_version_sys'] = x - if env.get('CM_HOST_SYSTEM_NAME','')!='': host_info['system_name']=env['CM_HOST_SYSTEM_NAME'] + if env.get('CM_HOST_SYSTEM_NAME', '') != '': + host_info['system_name'] = env['CM_HOST_SYSTEM_NAME'] # Check CM automation repository repo_name = 'mlcommons@cm4mlops' repo_hash = '' - r = cm.access({'action':'find', 'automation':'repo', 'artifact':'mlcommons@cm4mlops,9e97bb72b0474657'}) - if r['return']==0 and len(r['list'])==1: + r = cm.access({'action': 'find', 'automation': 'repo', + 'artifact': 'mlcommons@cm4mlops,9e97bb72b0474657'}) + if r['return'] == 0 and len(r['list']) == 1: repo_path = r['list'][0].path if os.path.isdir(repo_path): repo_name = os.path.basename(repo_path) # Check dev - #if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' + # if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' - r = cm.access({'action':'system', - 'automation':'utils', - 'path':repo_path, - 'cmd':'git rev-parse HEAD'}) + r = cm.access({'action': 'system', + 'automation': 'utils', + 'path': repo_path, + 'cmd': 'git rev-parse HEAD'}) if r['return'] == 0 and r['ret'] == 0: repo_hash = r['stdout'] host_info['cm_repo_name'] = repo_name host_info['cm_repo_git_hash'] = repo_hash - with open ("cm-host-info.json", "w") as fp: - fp.write(json.dumps(host_info, indent=2)+'\n') + with open("cm-host-info.json", "w") as fp: + fp.write(json.dumps(host_info, indent=2) + '\n') # Prepare README if "cmd" in inp: - cmd = "cm run script \\\n\t"+" \\\n\t".join(inp['cmd']) - xcmd = "cm run script "+xsep+"\n\t" + (" "+xsep+"\n\t").join(inp['cmd']) + cmd = "cm run script \\\n\t" + " \\\n\t".join(inp['cmd']) + xcmd = "cm run script " + xsep + "\n\t" + \ + (" " + xsep + "\n\t").join(inp['cmd']) else: cmd = "" xcmd = "" readme_init = "This experiment is generated using the [MLCommons Collective Mind automation framework (CM)](https://github.com/mlcommons/cm4mlops).\n\n" - readme_init+= "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" + readme_init += "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLCommons CM version: {}\n\n".format(platform.platform(), - platform.processor(), sys.version, cm.__version__) + platform.processor(), sys.version, cm.__version__) x = repo_name - if repo_hash!='': x+=' --checkout='+str(repo_hash) + if repo_hash != '': + x += ' --checkout=' + str(repo_hash) - readme_body += "## CM Run Command\n\nSee [CM installation guide](https://docs.mlcommons.org/inference/install/).\n\n"+ \ - "```bash\npip install -U cmind\n\ncm rm cache -f\n\ncm pull repo {}\n\n{}\n```".format(x, xcmd) + readme_body += "## CM Run Command\n\nSee [CM installation guide](https://docs.mlcommons.org/inference/install/).\n\n" + \ + "```bash\npip install -U cmind\n\ncm rm cache -f\n\ncm pull repo {}\n\n{}\n```".format( + x, xcmd) - readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts),\n"+ \ + readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts),\n" + \ " you should simply reload {} without checkout and clean CM cache as follows:*\n\n".format(repo_name) + \ - "```bash\ncm rm repo {}\ncm pull repo {}\ncm rm cache -f\n\n```".format(repo_name, repo_name) + "```bash\ncm rm repo {}\ncm pull repo {}\ncm rm cache -f\n\n```".format( + repo_name, repo_name) extra_readme_init = '' extra_readme_body = '' @@ -345,15 +419,15 @@ def postprocess(i): script_adr = inp.get('adr', {}) cm_input = {'action': 'run', - 'automation': 'script', - 'tags': script_tags, - 'adr': script_adr, - 'print_deps': True, - 'env': env, - 'quiet': True, - 'silent': True, - 'fake_run': True - } + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'print_deps': True, + 'env': env, + 'quiet': True, + 'silent': True, + 'fake_run': True + } r = cm.access(cm_input) if r['return'] > 0: return r @@ -361,26 +435,28 @@ def postprocess(i): print_deps = r['new_state']['print_deps'] count = 1 for dep in print_deps: - extra_readme_body += "\n\n" + str(count) +". `" +dep+ "`\n" - count = count+1 + extra_readme_body += "\n\n" + str(count) + ". `" + dep + "`\n" + count = count + 1 - if state.get('mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): extra_readme_body += "\n## Dependent CM scripts for the MLPerf Inference Implementation\n" print_deps = state['mlperf-inference-implementation']['print_deps'] count = 1 for dep in print_deps: - extra_readme_body += "\n\n" + str(count) +". `" +dep+"`\n" - count = count+1 + extra_readme_body += "\n\n" + \ + str(count) + ". `" + dep + "`\n" + count = count + 1 readme = readme_init + readme_body extra_readme = extra_readme_init + extra_readme_body - with open ("README.md", "w") as fp: + with open("README.md", "w") as fp: fp.write(readme) if extra_readme: - with open ("README-extra.md", "w") as fp: + with open("README-extra.md", "w") as fp: fp.write(extra_readme) elif mode == "compliance": @@ -391,7 +467,12 @@ def postprocess(i): COMPLIANCE_DIR = output_dir OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) - SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") + SCRIPT_PATH = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + test, + "run_verification.py") if test == "TEST06": cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" else: @@ -406,7 +487,7 @@ def postprocess(i): automation = i['automation'] SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, - "create_accuracy_baseline.sh") + "create_accuracy_baseline.sh") TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01") OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy") if not os.path.exists(OUTPUT_DIR): @@ -415,16 +496,19 @@ def postprocess(i): ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy") if not os.path.exists(ACCURACY_DIR): print("Accuracy run not yet completed") - return {'return':1, 'error': 'TEST01 needs accuracy run to be completed first'} + return { + 'return': 1, 'error': 'TEST01 needs accuracy run to be completed first'} cmd = "cd " + TEST01_DIR + " && bash " + SCRIPT_PATH + " " + os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json") + " " + \ - os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json") + os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json") env['CMD'] = cmd - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'verify_accuracy'}) - if r['return']>0: + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: return r - verify_accuracy_file = os.path.join(TEST01_DIR, "verify_accuracy.txt") + verify_accuracy_file = os.path.join( + TEST01_DIR, "verify_accuracy.txt") with open(verify_accuracy_file, 'r') as file: data = file.read().replace('\n', '\t') @@ -432,76 +516,124 @@ def postprocess(i): print("\nDeterministic TEST01 failed... Trying with non-determinism.\n") # #Normal test failed, trying the check with non-determinism - CMD = "cd "+ ACCURACY_DIR+" && "+ env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ - os.path.join(TEST01_DIR, "mlperf_log_accuracy_baseline.json") + dataset_args + datatype_option + " > " + \ - os.path.join(OUTPUT_DIR, "baseline_accuracy.txt") + CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + os.path.join(TEST01_DIR, "mlperf_log_accuracy_baseline.json") + dataset_args + datatype_option + " > " + \ + os.path.join(OUTPUT_DIR, "baseline_accuracy.txt") env['CMD'] = CMD - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'verify_accuracy'}) - if r['return']>0: return r + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r - CMD = "cd " + ACCURACY_DIR + " && "+env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ - os.path.join(TEST01_DIR, "mlperf_log_accuracy.json") + dataset_args + datatype_option + " > " + \ - os.path.join(OUTPUT_DIR, "compliance_accuracy.txt") + CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + os.path.join(TEST01_DIR, "mlperf_log_accuracy.json") + dataset_args + datatype_option + " > " + \ + os.path.join(OUTPUT_DIR, "compliance_accuracy.txt") env['CMD'] = CMD - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'verify_accuracy'}) - if r['return']>0: return r + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r import submission_checker as checker - is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) if test != "TEST06" else True - state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][test] = "passed" if is_valid else "failed" + is_valid = checker.check_compliance_perf_dir( + COMPLIANCE_DIR) if test != "TEST06" else True + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario][test] = "passed" if is_valid else "failed" # portion of the code where the avg utilisation and system informations are extracted - # NOTE: The section is under development and print statements are added for further debugging + # NOTE: The section is under development and print statements are added + # for further debugging if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": import pandas as pd system_utilisation_info_dump = {} logs_dir = output_dir # logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) - sys_utilisation_log = pd.read_csv(os.path.join(logs_dir, 'sys_utilisation_info.txt'), dtype={'cpu_utilisation': float, 'used_memory_gb': float}) + sys_utilisation_log = pd.read_csv( + os.path.join( + logs_dir, + 'sys_utilisation_info.txt'), + dtype={ + 'cpu_utilisation': float, + 'used_memory_gb': float}) with open(os.path.join(logs_dir, 'mlperf_log_detail.txt'), 'r') as file: log_txt = file.read() - #patterns for matching the power_begin and power_end in mlperf log + # patterns for matching the power_begin and power_end in mlperf log pattern_begin = r'\"key\"\:\s\"power_begin\"\,\s\"value\"\:\s\"(.*?)\"' pattern_end = r'\"key\"\:\s\"power_end\"\,\s\"value\"\:\s\"(.*?)\"' # match the patterns with the text present in the log details file match_begin = re.findall(pattern_begin, log_txt)[0] match_end = re.findall(pattern_end, log_txt)[0] - power_begin_time = pd.Timestamp(datetime.strptime(match_begin, '%m-%d-%Y %H:%M:%S.%f')).replace(tzinfo=timezone.utc) - power_end_time = pd.Timestamp(datetime.strptime(match_end, '%m-%d-%Y %H:%M:%S.%f')).replace(tzinfo=timezone.utc) - #converts timestamp key value to datetime objects - sys_utilisation_log['timestamp'] = pd.to_datetime(sys_utilisation_log['timestamp']) + power_begin_time = pd.Timestamp(datetime.strptime( + match_begin, '%m-%d-%Y %H:%M:%S.%f')).replace(tzinfo=timezone.utc) + power_end_time = pd.Timestamp(datetime.strptime( + match_end, '%m-%d-%Y %H:%M:%S.%f')).replace(tzinfo=timezone.utc) + # converts timestamp key value to datetime objects + sys_utilisation_log['timestamp'] = pd.to_datetime( + sys_utilisation_log['timestamp']) ''' for i in range(len(sys_utilisation_log['timestamp'])): print(f"{sys_utilisation_log['timestamp'][i]} {power_begin_time}") print(sys_utilisation_log['timestamp'][i]>=power_begin_time) ''' - #print(f"{sys_utilisation_log['timestamp'][0]} {power_begin_time}") - #print(sys_utilisation_log['timestamp'][0]>=power_begin_time) + # print(f"{sys_utilisation_log['timestamp'][0]} {power_begin_time}") + # print(sys_utilisation_log['timestamp'][0]>=power_begin_time) filtered_log = sys_utilisation_log[(sys_utilisation_log['timestamp'] >= power_begin_time) & - (sys_utilisation_log['timestamp'] <= power_end_time)] - #print(filtered_log) + (sys_utilisation_log['timestamp'] <= power_end_time)] + # print(filtered_log) # Calculate average of cpu_utilisation and used_memory_gb - system_utilisation_info_dump["avg_cpu_utilisation"] = filtered_log['cpu_utilisation'].mean() - system_utilisation_info_dump["avg_used_memory_gb"] = filtered_log['used_memory_gb'].mean() + system_utilisation_info_dump["avg_cpu_utilisation"] = filtered_log['cpu_utilisation'].mean( + ) + system_utilisation_info_dump["avg_used_memory_gb"] = filtered_log['used_memory_gb'].mean( + ) print("\nSystem utilisation info for the current run:") print(system_utilisation_info_dump) print("\n") - if state.get('mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): - env['CM_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join(output_dir, "cm-version-info.json") - env['CM_MLPERF_RUN_DEPS_GRAPH'] = os.path.join(output_dir, "cm-deps.png") - env['CM_MLPERF_RUN_DEPS_MERMAID'] = os.path.join(output_dir, "cm-deps.mmd") + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): + env['CM_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( + output_dir, "cm-version-info.json") + env['CM_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( + output_dir, "cm-deps.png") + env['CM_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( + output_dir, "cm-deps.mmd") with open(os.path.join(output_dir, "cm-version-info.json"), "w") as f: - f.write(json.dumps(state['mlperf-inference-implementation']['version_info'], indent=2)) + f.write( + json.dumps( + state['mlperf-inference-implementation']['version_info'], + indent=2)) if env.get('CM_DUMP_SYSTEM_INFO', True): - dump_script_output("detect,os", env, state, 'new_env', os.path.join(output_dir, "os_info.json")) - dump_script_output("detect,cpu", env, state, 'new_env', os.path.join(output_dir, "cpu_info.json")) - env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(env['CM_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") - dump_script_output("dump,pip,freeze", env, state, 'new_state', os.path.join(output_dir, "pip_freeze.json")) + dump_script_output( + "detect,os", + env, + state, + 'new_env', + os.path.join( + output_dir, + "os_info.json")) + dump_script_output( + "detect,cpu", + env, + state, + 'new_env', + os.path.join( + output_dir, + "cpu_info.json")) + env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + dump_script_output( + "dump,pip,freeze", + env, + state, + 'new_state', + os.path.join( + output_dir, + "pip_freeze.json")) + + return {'return': 0} - return {'return':0} def dump_script_output(script_tags, env, state, output_key, dump_file): diff --git a/script/app-mlperf-training-nvidia/customize.py b/script/app-mlperf-training-nvidia/customize.py index 7163c8d04f..0d5c53c44d 100644 --- a/script/app-mlperf-training-nvidia/customize.py +++ b/script/app-mlperf-training-nvidia/customize.py @@ -4,6 +4,7 @@ import shutil import subprocess + def preprocess(i): os_info = i['os_info'] @@ -12,46 +13,59 @@ def preprocess(i): script_path = i['run_script_input']['path'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": - return {'return':0} + return {'return': 0} - if env.get('CM_MLPERF_POWER','') == "yes": + if env.get('CM_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN","")!='' else False + rerun = True if env.get("CM_RERUN", "") != '' else False if 'CM_MLPERF_MODEL' not in env: - return {'return': 1, 'error': "Please select a variation specifying the model to run"} - + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} if 'CM_NUM_THREADS' not in env: if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) else: env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") - + print("Using MLCommons Training source from '" + + env['CM_MLPERF_TRAINING_SOURCE'] + "'") NUM_THREADS = env['CM_NUM_THREADS'] if "bert" in env['CM_MLPERF_MODEL']: - env['CM_RUN_DIR'] = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "NVIDIA", "benchmarks", "bert", "implementations", "pytorch-22.09") + env['CM_RUN_DIR'] = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "NVIDIA", + "benchmarks", + "bert", + "implementations", + "pytorch-22.09") if "resnet" in env['CM_MLPERF_MODEL']: - env['CM_RUN_DIR'] = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "NVIDIA", "benchmarks", "resnet", "implementations", "mxnet-22.04") + env['CM_RUN_DIR'] = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "NVIDIA", + "benchmarks", + "resnet", + "implementations", + "mxnet-22.04") env['CM_RESULTS_DIR'] = os.getcwd() - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/app-mlperf-training-reference/customize.py b/script/app-mlperf-training-reference/customize.py index 55f12fb47c..4469394700 100644 --- a/script/app-mlperf-training-reference/customize.py +++ b/script/app-mlperf-training-reference/customize.py @@ -4,6 +4,7 @@ import shutil import subprocess + def preprocess(i): os_info = i['os_info'] @@ -12,41 +13,46 @@ def preprocess(i): script_path = i['run_script_input']['path'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": - return {'return':0} + return {'return': 0} if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": - return {'return':0} + return {'return': 0} - if env.get('CM_MLPERF_POWER','') == "yes": + if env.get('CM_MLPERF_POWER', '') == "yes": power = "yes" else: power = "no" - rerun = True if env.get("CM_RERUN","")!='' else False + rerun = True if env.get("CM_RERUN", "") != '' else False if 'CM_MLPERF_MODEL' not in env: - return {'return': 1, 'error': "Please select a variation specifying the model to run"} - + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} if 'CM_NUM_THREADS' not in env: if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) else: env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") - + print("Using MLCommons Training source from '" + + env['CM_MLPERF_TRAINING_SOURCE'] + "'") NUM_THREADS = env['CM_NUM_THREADS'] if "bert" in env['CM_MLPERF_MODEL']: - env['CM_RUN_DIR'] = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "language_model", "tensorflow", "bert") + env['CM_RUN_DIR'] = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + "language_model", + "tensorflow", + "bert") + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/app-stable-diffusion-onnx-py/process.py b/script/app-stable-diffusion-onnx-py/process.py index 0f759089b7..86bbd3c3b6 100644 --- a/script/app-stable-diffusion-onnx-py/process.py +++ b/script/app-stable-diffusion-onnx-py/process.py @@ -4,31 +4,33 @@ from optimum.onnxruntime import ORTStableDiffusionPipeline -output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT','') +output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT', '') f = os.path.join(output, 'output.png') if os.path.isfile(f): os.remove(f) -cm_model_path = os.environ.get('CM_ML_MODEL_PATH','') +cm_model_path = os.environ.get('CM_ML_MODEL_PATH', '') if cm_model_path == '': - print ('Error: CM_ML_MODEL_PATH env is not defined') + print('Error: CM_ML_MODEL_PATH env is not defined') exit(1) -device = os.environ.get('CM_DEVICE','') +device = os.environ.get('CM_DEVICE', '') -pipeline = ORTStableDiffusionPipeline.from_pretrained(cm_model_path, local_files_only=True).to(device) +pipeline = ORTStableDiffusionPipeline.from_pretrained( + cm_model_path, local_files_only=True).to(device) -text = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT','') -if text == '': text = "a photo of an astronaut riding a horse on mars" +text = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '') +if text == '': + text = "a photo of an astronaut riding a horse on mars" -print ('') -print ('Generating imaged based on "{}"'.format(text)) +print('') +print('Generating imaged based on "{}"'.format(text)) image = pipeline(text).images[0] image.save(f) -print ('Image recorded to "{}"'.format(f)) +print('Image recorded to "{}"'.format(f)) diff --git a/script/authenticate-github-cli/customize.py b/script/authenticate-github-cli/customize.py index a873791f43..f4adae9931 100644 --- a/script/authenticate-github-cli/customize.py +++ b/script/authenticate-github-cli/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -23,10 +24,11 @@ def preprocess(i): env['CM_RUN_CMD'] = cmd quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/benchmark-any-mlperf-inference-implementation/customize.py b/script/benchmark-any-mlperf-inference-implementation/customize.py index ae034f55d5..e7f7acc795 100644 --- a/script/benchmark-any-mlperf-inference-implementation/customize.py +++ b/script/benchmark-any-mlperf-inference-implementation/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -28,23 +29,31 @@ def preprocess(i): power = env.get('POWER', '') - if str(power).lower() in [ "yes", "true" ]: - POWER_STRING = " --power=yes --adr.mlperf-power-client.power_server=" + env.get('POWER_SERVER', '192.168.0.15') + " --adr.mlperf-power-client.port=" + str(env.get('POWER_SERVER_PORT', '4950')) + " " + if str(power).lower() in ["yes", "true"]: + POWER_STRING = " --power=yes --adr.mlperf-power-client.power_server=" + env.get( + 'POWER_SERVER', + '192.168.0.15') + " --adr.mlperf-power-client.port=" + str( + env.get( + 'POWER_SERVER_PORT', + '4950')) + " " else: POWER_STRING = "" if not devices: - return {'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} + return { + 'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} cmds = [] run_script_content = '#!/bin/bash\n\n' - run_script_content += "POWER_STRING=\"" +POWER_STRING +"\"\n" - run_script_content += "DIVISION=\"" + env['DIVISION'] +"\"\n" - run_script_content += "CATEGORY=\"" + env['CATEGORY'] +"\"\n" - run_script_content += "EXTRA_ARGS=\"" + env.get('EXTRA_ARGS', '') +"\"\n" - run_script_content += 'source '+ os.path.join(script_path, "run-template.sh") + "\nPOWER_STRING=\"" +POWER_STRING +"\"\n\n" + run_script_content += "POWER_STRING=\"" + POWER_STRING + "\"\n" + run_script_content += "DIVISION=\"" + env['DIVISION'] + "\"\n" + run_script_content += "CATEGORY=\"" + env['CATEGORY'] + "\"\n" + run_script_content += "EXTRA_ARGS=\"" + env.get('EXTRA_ARGS', '') + "\"\n" + run_script_content += 'source ' + \ + os.path.join(script_path, "run-template.sh") + \ + "\nPOWER_STRING=\"" + POWER_STRING + "\"\n\n" - run_file_name = 'tmp-'+implementation+'-run' + run_file_name = 'tmp-' + implementation + '-run' for model in models: env['MODEL'] = model @@ -60,7 +69,7 @@ def preprocess(i): assemble_tflite_cmds(cmds) if env.get('CM_HOST_CPU_ARCHITECTURE', '') == "aarch64": - extra_tags=",_armnn,_use-neon" + extra_tags = ",_armnn,_use-neon" cmd = f'export extra_tags="{extra_tags}"' cmds.append(cmd) assemble_tflite_cmds(cmds) @@ -90,7 +99,8 @@ def preprocess(i): elif "llama2-70b" in model: backends = "pytorch" if not backends: - return {'return': 1, 'error': f'No backend specified for the model: {model}.'} + return { + 'return': 1, 'error': f'No backend specified for the model: {model}.'} backends = backends.split(",") else: @@ -100,14 +110,28 @@ def preprocess(i): for device in devices: add_to_run_cmd = '' - offline_target_qps = (((state.get(model, {})).get(device, {})).get(backend, {})).get('offline_target_qps') + offline_target_qps = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('offline_target_qps') if offline_target_qps: add_to_run_cmd += f" --offline_target_qps={offline_target_qps}" - server_target_qps = (((state.get(model, {})).get(device, {})).get(backend, {})).get('server_target_qps') + server_target_qps = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('server_target_qps') if server_target_qps: add_to_run_cmd += f" --server_target_qps={server_target_qps}" - else: #try to do a test run with reasonable number of samples to get and record the actual system performance + else: # try to do a test run with reasonable number of samples to get and record the actual system performance if device == "cpu": if model == "resnet50": test_query_count = 1000 @@ -120,27 +144,37 @@ def preprocess(i): test_query_count = 2000 cmd = f'run_test "{model}" "{backend}" "{test_query_count}" "{implementation}" "{device}" "$find_performance_cmd"' cmds.append(cmd) - #second argument is unused for submission_cmd + # second argument is unused for submission_cmd cmd = f'run_test "{model}" "{backend}" "100" "{implementation}" "{device}" "$submission_cmd" "{add_to_run_cmd}"' - singlestream_target_latency = (((state.get(model, {})).get(device, {})).get(backend, {})).get('singlestream_target_latency') + singlestream_target_latency = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('singlestream_target_latency') if singlestream_target_latency: cmd += f" --singlestream_target_latency={singlestream_target_latency}" cmds.append(cmd) - run_script_content += "\n\n" +"\n\n".join(cmds) + run_script_content += "\n\n" + "\n\n".join(cmds) - with open(os.path.join(script_path, run_file_name+".sh"), 'w') as f: + with open(os.path.join(script_path, run_file_name + ".sh"), 'w') as f: f.write(run_script_content) print(run_script_content) run_script_input = i['run_script_input'] - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':run_file_name}) + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': run_file_name}) + + if r['return'] > 0: + return r - if r['return']>0: return r + return {'return': 0} - return {'return':0} def assemble_tflite_cmds(cmds): cmd = 'run "$tflite_accuracy_cmd"' @@ -151,8 +185,9 @@ def assemble_tflite_cmds(cmds): cmds.append(cmd) return + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py index c3236fb325..4ac5b9e213 100644 --- a/script/benchmark-program-mlperf/customize.py +++ b/script/benchmark-program-mlperf/customize.py @@ -1,11 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -14,7 +16,6 @@ def postprocess(i): env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD') - if env.get('CM_MLPERF_POWER', '') == "yes": if env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no': @@ -57,4 +58,4 @@ def postprocess(i): # Just use the existing CM_RUN_CMD if no ranging run is needed env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD', '').strip() - return {'return':0} + return {'return': 0} diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 2e051607e6..2dd6ffc1c4 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -1,21 +1,23 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] q = '"' if os_info['platform'] == 'windows' else "'" - if env.get('CM_RUN_CMD','') == '': - if env.get('CM_BIN_NAME','') == '': + if env.get('CM_RUN_CMD', '') == '': + if env.get('CM_BIN_NAME', '') == '': x = 'run.exe' if os_info['platform'] == 'windows' else 'run.out' env['CM_BIN_NAME'] = x if os_info['platform'] == 'windows': - env['CM_RUN_CMD'] = env.get('CM_RUN_PREFIX','') + env['CM_BIN_NAME'] - if env.get('CM_RUN_SUFFIX','')!='': - env['CM_RUN_CMD'] += ' '+env['CM_RUN_SUFFIX'] + env['CM_RUN_CMD'] = env.get( + 'CM_RUN_PREFIX', '') + env['CM_BIN_NAME'] + if env.get('CM_RUN_SUFFIX', '') != '': + env['CM_RUN_CMD'] += ' ' + env['CM_RUN_SUFFIX'] else: if env['CM_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: @@ -28,26 +30,33 @@ def preprocess(i): env['CM_RUN_PREFIX'] = CM_RUN_PREFIX - CM_RUN_SUFFIX = (env['CM_REDIRECT_OUT'] + ' ') if 'CM_REDIRECT_OUT' in env else '' - CM_RUN_SUFFIX += (env['CM_REDIRECT_ERR'] + ' ') if 'CM_REDIRECT_ERR' in env else '' + CM_RUN_SUFFIX = ( + env['CM_REDIRECT_OUT'] + + ' ') if 'CM_REDIRECT_OUT' in env else '' + CM_RUN_SUFFIX += (env['CM_REDIRECT_ERR'] + + ' ') if 'CM_REDIRECT_ERR' in env else '' - env['CM_RUN_SUFFIX'] = env['CM_RUN_SUFFIX'] + CM_RUN_SUFFIX if 'CM_RUN_SUFFIX' in env else CM_RUN_SUFFIX + env['CM_RUN_SUFFIX'] = env['CM_RUN_SUFFIX'] + \ + CM_RUN_SUFFIX if 'CM_RUN_SUFFIX' in env else CM_RUN_SUFFIX - if env.get('CM_RUN_DIR','') == '': + if env.get('CM_RUN_DIR', '') == '': env['CM_RUN_DIR'] = os.getcwd() + env['CM_RUN_CMD'] = CM_RUN_PREFIX + ' ' + os.path.join( + env['CM_RUN_DIR'], env['CM_BIN_NAME']) + ' ' + env['CM_RUN_SUFFIX'] - env['CM_RUN_CMD'] = CM_RUN_PREFIX + ' ' + os.path.join(env['CM_RUN_DIR'],env['CM_BIN_NAME']) + ' ' + env['CM_RUN_SUFFIX'] - - x = env.get('CM_RUN_PREFIX0','') - if x!='': - env['CM_RUN_CMD'] = x + ' ' + env.get('CM_RUN_CMD','') + x = env.get('CM_RUN_PREFIX0', '') + if x != '': + env['CM_RUN_CMD'] = x + ' ' + env.get('CM_RUN_CMD', '') - if os_info['platform'] != 'windows' and str(env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in [ "no", "false", "0"]: + if os_info['platform'] != 'windows' and str( + env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]: logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) - env['CM_RUN_CMD'] += r" 2>&1 | tee " + q+ os.path.join(logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" + env['CM_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join( + logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" - # additional arguments and tags for measuring system informations(only if 'CM_PROFILE_NVIDIA_POWER' is 'on') + # additional arguments and tags for measuring system informations(only if + # 'CM_PROFILE_NVIDIA_POWER' is 'on') if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": env['CM_SYS_UTILISATION_SCRIPT_TAGS'] = '' # this section is for selecting the variation @@ -56,9 +65,13 @@ def preprocess(i): elif env.get('CM_MLPERF_DEVICE', '') == "cpu": env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cpu' # this section is for supplying the input arguments/tags - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + logs_dir + '\'' # specify the logs directory - if env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '': # specifying the interval in which the system information should be measured - env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + env['CM_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"' + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + \ + logs_dir + '\'' # specify the logs directory + # specifying the interval in which the system information should be + # measured + if env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '': + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + \ + env['CM_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"' # generate the pre run cmd - recording runtime system infos pre_run_cmd = "" @@ -71,36 +84,41 @@ def preprocess(i): pre_run_cmd += ' && ' # running the script as a process in background - pre_run_cmd = pre_run_cmd + 'cm run script --tags=runtime,system,utilisation' + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' + pre_run_cmd = pre_run_cmd + 'cm run script --tags=runtime,system,utilisation' + \ + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' # obtain the command if of the background process pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" - print(f"Pre run command for recording the runtime system information: {pre_run_cmd}") + print( + f"Pre run command for recording the runtime system information: {pre_run_cmd}") env['CM_PRE_RUN_CMD'] = pre_run_cmd - # generate the post run cmd - for killing the process that records runtime system infos + # generate the post run cmd - for killing the process that records runtime + # system infos post_run_cmd = "" if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": post_run_cmd += r"echo killing process \$cmd_pid && kill -TERM \${cmd_pid}" - print(f"Post run command for killing the process that measures the runtime system information: {post_run_cmd}") + print( + f"Post run command for killing the process that measures the runtime system information: {post_run_cmd}") env['CM_POST_RUN_CMD'] = post_run_cmd # Print info - print ('***************************************************************************') - print ('CM script::benchmark-program/run.sh') - print ('') - print ('Run Directory: {}'.format(env.get('CM_RUN_DIR',''))) + print('***************************************************************************') + print('CM script::benchmark-program/run.sh') + print('') + print('Run Directory: {}'.format(env.get('CM_RUN_DIR', ''))) + + print('') + print('CMD: {}'.format(env.get('CM_RUN_CMD', ''))) - print ('') - print ('CMD: {}'.format(env.get('CM_RUN_CMD',''))) + print('') - print ('') + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/build-docker-image/customize.py b/script/build-docker-image/customize.py index a533deaab9..d3e62ed081 100644 --- a/script/build-docker-image/customize.py +++ b/script/build-docker-image/customize.py @@ -2,13 +2,14 @@ import os from os.path import exists + def preprocess(i): os_info = i['os_info'] env = i['env'] dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') - if dockerfile_path!='' and os.path.exists(dockerfile_path): + if dockerfile_path != '' and os.path.exists(dockerfile_path): build_dockerfile = False env['CM_BUILD_DOCKERFILE'] = "no" os.chdir(os.path.dirname(dockerfile_path)) @@ -17,14 +18,14 @@ def preprocess(i): env['CM_BUILD_DOCKERFILE'] = "yes" env['CM_DOCKERFILE_BUILD_FROM_IMAGE_SCRIPT'] = "yes" - CM_DOCKER_BUILD_ARGS = env.get('+ CM_DOCKER_BUILD_ARGS', []) if env.get('CM_GH_TOKEN', '') != '': - CM_DOCKER_BUILD_ARGS.append( "CM_GH_TOKEN="+env['CM_GH_TOKEN'] ) + CM_DOCKER_BUILD_ARGS.append("CM_GH_TOKEN=" + env['CM_GH_TOKEN']) if CM_DOCKER_BUILD_ARGS: - build_args = "--build-arg "+ " --build-arg ".join(CM_DOCKER_BUILD_ARGS) + build_args = "--build-arg " + \ + " --build-arg ".join(CM_DOCKER_BUILD_ARGS) else: build_args = "" @@ -40,7 +41,9 @@ def preprocess(i): docker_image_name = env.get('CM_DOCKER_IMAGE_NAME', '') if docker_image_name == '': - docker_image_name = "cm-script-" +env.get('CM_DOCKER_RUN_SCRIPT_TAGS','').replace(',', '-').replace('_','-') + docker_image_name = "cm-script-" + \ + env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '').replace( + ',', '-').replace('_', '-') env['CM_DOCKER_IMAGE_NAME'] = docker_image_name if env.get("CM_DOCKER_IMAGE_TAG", "") == '': @@ -62,12 +65,12 @@ def preprocess(i): # Prepare CMD to build image XCMD = [ - 'docker build ' + env.get('CM_DOCKER_CACHE_ARG',''), - ' ' + build_args, - ' -f "' + dockerfile_path + '"', - ' -t "' + image_name, - ' .' - ] + 'docker build ' + env.get('CM_DOCKER_CACHE_ARG', ''), + ' ' + build_args, + ' -f "' + dockerfile_path + '"', + ' -t "' + image_name, + ' .' + ] with open(dockerfile_path + '.build.sh', 'w') as f: f.write(' \\\n'.join(XCMD) + '\n') @@ -77,25 +80,27 @@ def preprocess(i): CMD = ''.join(XCMD) - print ('================================================') - print ('CM generated the following Docker build command:') - print ('') - print (CMD) + print('================================================') + print('CM generated the following Docker build command:') + print('') + print(CMD) - print ('') + print('') env['CM_DOCKER_BUILD_CMD'] = CMD - return {'return':0} + return {'return': 0} + def get_image_name(env): image_name = env.get('CM_DOCKER_IMAGE_REPO', '') + '/' + \ - env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \ - env.get('CM_DOCKER_IMAGE_TAG', '') + '"' + env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \ + env.get('CM_DOCKER_IMAGE_TAG', '') + '"' return image_name + def postprocess(i): env = i['env'] @@ -108,24 +113,24 @@ def postprocess(i): PCMD = 'docker image push ' + image_name dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') - if dockerfile_path!='' and os.path.isfile(dockerfile_path): + if dockerfile_path != '' and os.path.isfile(dockerfile_path): with open(dockerfile_path + '.push.sh', 'w') as f: f.write(PCMD + '\n') with open(dockerfile_path + '.build.bat', 'w') as f: f.write(PCMD + '\n') - print ('================================================') - print ('CM generated the following Docker push command:') - print ('') - print (PCMD) + print('================================================') + print('CM generated the following Docker push command:') + print('') + print(PCMD) - print ('') + print('') r = os.system(PCMD) - print ('') + print('') - if r>0: - return {'return':1, 'error':'pushing to Docker Hub failed'} + if r > 0: + return {'return': 1, 'error': 'pushing to Docker Hub failed'} - return {'return':0} + return {'return': 0} diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index 4bbfe0e572..f5cd062042 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -5,13 +5,15 @@ import re import shutil + def preprocess(i): os_info = i['os_info'] env = i['env'] - if env["CM_DOCKER_OS"] not in [ "ubuntu", "rhel", "arch" ]: - return {'return': 1, 'error': f"Specified docker OS: {env['CM_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"} + if env["CM_DOCKER_OS"] not in ["ubuntu", "rhel", "arch"]: + return { + 'return': 1, 'error': f"Specified docker OS: {env['CM_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"} path = i['run_script_input']['path'] @@ -24,35 +26,39 @@ def preprocess(i): copy_files = [] if env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '') != '': - script_tags=env['CM_DOCKER_RUN_SCRIPT_TAGS'] - found_scripts = cm.access({'action': 'search', 'automation': 'script', 'tags': script_tags}) + script_tags = env['CM_DOCKER_RUN_SCRIPT_TAGS'] + found_scripts = cm.access( + {'action': 'search', 'automation': 'script', 'tags': script_tags}) scripts_list = found_scripts['list'] if not scripts_list: - return {'return': 1, 'error': 'No CM script found for tags ' + script_tags} + return {'return': 1, + 'error': 'No CM script found for tags ' + script_tags} if len(scripts_list) > 1: - return {'return': 1, 'error': 'More than one scripts found for tags '+ script_tags} + return { + 'return': 1, 'error': 'More than one scripts found for tags ' + script_tags} script = scripts_list[0] input_mapping = script.meta.get('input_mapping', {}) default_env = script.meta.get('default_env', {}) - for input_,env_ in input_mapping.items(): + for input_, env_ in input_mapping.items(): if input_ == "docker": continue - arg=env_ - if env_ in default_env: #other inputs to be done later - arg=arg+"="+str(default_env[env_]) - #build_args.append(arg) - #input_args.append("--"+input_+"="+"$"+env_) + arg = env_ + if env_ in default_env: # other inputs to be done later + arg = arg + "=" + str(default_env[env_]) + # build_args.append(arg) + # input_args.append("--"+input_+"="+"$"+env_) if "CM_DOCKER_OS_VERSION" not in env: env["CM_DOCKER_OS_VERSION"] = "20.04" docker_image_base = get_value(env, config, 'FROM', 'CM_DOCKER_IMAGE_BASE') if not docker_image_base: - return {'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "} + return { + 'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "} # Handle cm_mlops Repository if env.get("CM_REPO_PATH", "") != "": @@ -60,14 +66,21 @@ def preprocess(i): cm_repo_path = os.path.abspath(env["CM_REPO_PATH"]) if not os.path.exists(cm_repo_path): - return {'return': 1, 'error': f"Specified CM_REPO_PATH does not exist: {cm_repo_path}"} + return { + 'return': 1, 'error': f"Specified CM_REPO_PATH does not exist: {cm_repo_path}"} cmr_yml_path = os.path.join(cm_repo_path, "cmr.yaml") if not os.path.isfile(cmr_yml_path): - return {'return': 1, 'error': f"cmr.yaml not found in CM_REPO_PATH: {cm_repo_path}"} + return { + 'return': 1, 'error': f"cmr.yaml not found in CM_REPO_PATH: {cm_repo_path}"} # Define the build context directory (where the Dockerfile will be) - build_context_dir = os.path.dirname(env.get('CM_DOCKERFILE_WITH_PATH', os.path.join(os.getcwd(), "Dockerfile"))) + build_context_dir = os.path.dirname( + env.get( + 'CM_DOCKERFILE_WITH_PATH', + os.path.join( + os.getcwd(), + "Dockerfile"))) os.makedirs(build_context_dir, exist_ok=True) # Create cm_repo directory relative to the build context @@ -78,21 +91,27 @@ def preprocess(i): shutil.rmtree(repo_build_context_path) try: - print(f"Copying repository from {cm_repo_path} to {repo_build_context_path}") + print( + f"Copying repository from {cm_repo_path} to {repo_build_context_path}") shutil.copytree(cm_repo_path, repo_build_context_path) except Exception as e: - return {'return': 1, 'error': f"Failed to copy repository to build context: {str(e)}"} + return { + 'return': 1, 'error': f"Failed to copy repository to build context: {str(e)}"} if not os.path.isdir(repo_build_context_path): - return {'return': 1, 'error': f"Repository was not successfully copied to {repo_build_context_path}"} + return { + 'return': 1, 'error': f"Repository was not successfully copied to {repo_build_context_path}"} # (Optional) Verify the copy if not os.path.isdir(repo_build_context_path): - return {'return': 1, 'error': f"cm_repo was not successfully copied to the build context at {repo_build_context_path}"} + return { + 'return': 1, 'error': f"cm_repo was not successfully copied to the build context at {repo_build_context_path}"} else: - print(f"cm_repo is present in the build context at {repo_build_context_path}") + print( + f"cm_repo is present in the build context at {repo_build_context_path}") - relative_repo_path = os.path.relpath(repo_build_context_path, build_context_dir) + relative_repo_path = os.path.relpath( + repo_build_context_path, build_context_dir) else: # CM_REPO_PATH is not set; use cm pull repo as before use_copy_repo = False @@ -109,21 +128,23 @@ def preprocess(i): repo_owner = match.group(4) repo_name = match.group(5) cm_mlops_repo = f"{repo_owner}@{repo_name}" - print(f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}") + print( + f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}") else: cm_mlops_repo = "mlcommons@cm4mlops" cm_mlops_repo_branch_string = f" --branch={env['CM_MLOPS_REPO_BRANCH']}" if env.get('CM_DOCKERFILE_WITH_PATH', '') == '': - env['CM_DOCKERFILE_WITH_PATH'] = os.path.join(os.getcwd(), "Dockerfile") + env['CM_DOCKERFILE_WITH_PATH'] = os.path.join( + os.getcwd(), "Dockerfile") dockerfile_with_path = env['CM_DOCKERFILE_WITH_PATH'] dockerfile_dir = os.path.dirname(dockerfile_with_path) extra_dir = os.path.dirname(dockerfile_with_path) - if extra_dir!='': + if extra_dir != '': os.makedirs(extra_dir, exist_ok=True) f = open(dockerfile_with_path, "w") @@ -132,7 +153,9 @@ def preprocess(i): # Maintainers f.write(EOL) - f.write('# Automatically generated by the CM workflow automation meta-framework' + EOL) + f.write( + '# Automatically generated by the CM workflow automation meta-framework' + + EOL) f.write('# https://github.com/mlcommons/ck' + EOL) f.write(EOL) @@ -154,17 +177,17 @@ def preprocess(i): for arg in config['ARGS_DEFAULT']: arg_value = config['ARGS_DEFAULT'][arg] - f.write('ARG '+ f"{arg}={arg_value}" + EOL) + f.write('ARG ' + f"{arg}={arg_value}" + EOL) for arg in config['ARGS']: - f.write('ARG '+ arg + EOL) + f.write('ARG ' + arg + EOL) for build_arg in build_args: - f.write('ARG '+ build_arg + EOL) + f.write('ARG ' + build_arg + EOL) for build_arg in sorted(build_args_default): v = build_args_default[build_arg] - f.write('ARG '+ build_arg + '="' + str(v) + '"' + EOL) + f.write('ARG ' + build_arg + '="' + str(v) + '"' + EOL) f.write(EOL) copy_cmds = [] @@ -172,39 +195,58 @@ def preprocess(i): for copy_file in env['CM_DOCKER_COPY_FILES']: copy_split = copy_file.split(":") if len(copy_split) != 2: - return {'return': 1, 'error': 'Invalid docker copy input {} given'.format(copy_file)} + return { + 'return': 1, 'error': 'Invalid docker copy input {} given'.format(copy_file)} filename = os.path.basename(copy_split[0]) if not os.path.exists(os.path.join(dockerfile_dir, filename)): - shutil.copytree(copy_split[0], os.path.join(dockerfile_dir, filename)) - f.write('COPY '+ filename+" "+copy_split[1] + EOL) - - f.write(EOL+'# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes'+EOL+'# Install system dependencies' + EOL) - f.write('RUN ' + get_value(env, config, 'package-manager-update-cmd', 'CM_PACKAGE_MANAGER_UPDATE_CMD') + EOL) - f.write('RUN '+ get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config, - 'packages')) + EOL) - - if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '')!='': + shutil.copytree( + copy_split[0], os.path.join( + dockerfile_dir, filename)) + f.write('COPY ' + filename + " " + copy_split[1] + EOL) + + f.write( + EOL + + '# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes' + + EOL + + '# Install system dependencies' + + EOL) + f.write( + 'RUN ' + + get_value( + env, + config, + 'package-manager-update-cmd', + 'CM_PACKAGE_MANAGER_UPDATE_CMD') + + EOL) + f.write('RUN ' + get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config, + 'packages')) + EOL) + + if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '') != '': f.write('RUN ' + env['CM_DOCKER_EXTRA_SYS_DEPS'] + EOL) if env['CM_DOCKER_OS'] == "ubuntu": if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23: - if "--break-system-packages" not in env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): + if "--break-system-packages" not in env.get( + 'CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" pip_extra_flags = env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '') + f.write(EOL + '# Setup docker environment' + EOL) - f.write(EOL+'# Setup docker environment' + EOL) - - entry_point = get_value(env, config, 'ENTRYPOINT', 'CM_DOCKER_IMAGE_ENTRYPOINT') + entry_point = get_value( + env, + config, + 'ENTRYPOINT', + 'CM_DOCKER_IMAGE_ENTRYPOINT') if entry_point: f.write('ENTRYPOINT ' + entry_point + EOL) - for key,value in config['ENV'].items(): - f.write('ENV '+ key + "=\"" + value + "\""+ EOL) + for key, value in config['ENV'].items(): + f.write('ENV ' + key + "=\"" + value + "\"" + EOL) for cmd in config['RUN_CMDS']: - f.write('RUN '+ cmd + EOL) + f.write('RUN ' + cmd + EOL) - f.write(EOL+'# Setup docker user' + EOL) + f.write(EOL + '# Setup docker user' + EOL) docker_user = get_value(env, config, 'USER', 'CM_DOCKER_USER') docker_group = get_value(env, config, 'GROUP', 'CM_DOCKER_GROUP') @@ -216,119 +258,150 @@ def preprocess(i): DOCKER_GROUP = "-g $GID -o" user_shell = json.loads(shell) - f.write('RUN useradd ' + DOCKER_USER_ID + DOCKER_GROUP + ' --create-home --shell '+ user_shell[0] + ' ' + f.write('RUN useradd ' + DOCKER_USER_ID + DOCKER_GROUP + ' --create-home --shell ' + user_shell[0] + ' ' + docker_user + EOL) - f.write('RUN echo "' + docker_user + ' ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers' + EOL) + f.write( + 'RUN echo "' + + docker_user + + ' ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers' + + EOL) f.write('USER ' + docker_user + ":" + docker_group + EOL) dockerfile_env = env.get('CM_DOCKERFILE_ENV', {}) dockerfile_env_input_string = "" for docker_env_key in dockerfile_env: - dockerfile_env_input_string = dockerfile_env_input_string + " --env."+docker_env_key+"="+str(dockerfile_env[docker_env_key]) + dockerfile_env_input_string = dockerfile_env_input_string + " --env." + \ + docker_env_key + "=" + str(dockerfile_env[docker_env_key]) workdir = get_value(env, config, 'WORKDIR', 'CM_DOCKER_WORKDIR') if workdir: f.write('WORKDIR ' + workdir + EOL) - f.write(EOL+'# Install python packages' + EOL) + f.write(EOL + '# Install python packages' + EOL) python = get_value(env, config, 'PYTHON', 'CM_DOCKERFILE_PYTHON') docker_use_virtual_python = env.get('CM_DOCKER_USE_VIRTUAL_PYTHON', "yes") - if str(docker_use_virtual_python).lower() not in [ "no", "0", "false"]: + if str(docker_use_virtual_python).lower() not in ["no", "0", "false"]: f.write('RUN {} -m venv /home/cmuser/venv/cm'.format(python) + " " + EOL) f.write('ENV PATH="/home/cmuser/venv/cm/bin:$PATH"' + EOL) - #f.write('RUN . /opt/venv/cm/bin/activate' + EOL) - f.write('RUN {} -m pip install '.format(python) + " ".join(get_value(env, config, 'python-packages')) + ' ' + pip_extra_flags + ' ' + EOL) - - f.write(EOL+'# Download CM repo for scripts' + EOL) + # f.write('RUN . /opt/venv/cm/bin/activate' + EOL) + f.write( + 'RUN {} -m pip install '.format(python) + + " ".join( + get_value( + env, + config, + 'python-packages')) + + ' ' + + pip_extra_flags + + ' ' + + EOL) + + f.write(EOL + '# Download CM repo for scripts' + EOL) if use_copy_repo: docker_repo_dest = "/home/cmuser/CM/repos/mlcommons@cm4mlops" - f.write(f'COPY --chown=cmuser:cm {relative_repo_path} {docker_repo_dest}' + EOL) + f.write( + f'COPY --chown=cmuser:cm {relative_repo_path} {docker_repo_dest}' + + EOL) f.write(EOL + '# Register CM repository' + EOL) f.write('RUN cm pull repo --url={} --quiet'.format(docker_repo_dest) + EOL) f.write(EOL) - else: # Use cm pull repo as before - x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO','') - if x!='': x=' '+x + x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO', '') + if x != '': + x = ' ' + x - f.write('RUN cm pull repo ' + cm_mlops_repo + cm_mlops_repo_branch_string + x + EOL) + f.write( + 'RUN cm pull repo ' + + cm_mlops_repo + + cm_mlops_repo_branch_string + + x + + EOL) # Check extra repositories - x = env.get('CM_DOCKER_EXTRA_CM_REPOS','') - if x!='': + x = env.get('CM_DOCKER_EXTRA_CM_REPOS', '') + if x != '': for y in x.split(','): - f.write('RUN '+ y + EOL) + f.write('RUN ' + y + EOL) - if str(env.get('CM_DOCKER_SKIP_CM_SYS_UPGRADE', False)).lower() not in ["true", "1", "yes"]: - f.write(EOL+'# Install all system dependencies' + EOL) + if str(env.get('CM_DOCKER_SKIP_CM_SYS_UPGRADE', False) + ).lower() not in ["true", "1", "yes"]: + f.write(EOL + '# Install all system dependencies' + EOL) f.write('RUN cm run script --tags=get,sys-utils-cm --quiet' + EOL) if 'CM_DOCKER_PRE_RUN_COMMANDS' in env: for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: - f.write('RUN '+ pre_run_cmd + EOL) + f.write('RUN ' + pre_run_cmd + EOL) - run_cmd_extra=" "+env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":","=") + run_cmd_extra = " " + \ + env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") gh_token = get_value(env, config, "GH_TOKEN", "CM_GH_TOKEN") if gh_token: run_cmd_extra = " --env.CM_GH_TOKEN=$CM_GH_TOKEN" - f.write(EOL+'# Run commands' + EOL) + f.write(EOL + '# Run commands' + EOL) for comment in env.get('CM_DOCKER_RUN_COMMENTS', []): f.write(comment + EOL) skip_extra = False if 'CM_DOCKER_RUN_CMD' not in env: - env['CM_DOCKER_RUN_CMD']="" + env['CM_DOCKER_RUN_CMD'] = "" if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: - env['CM_DOCKER_RUN_CMD']+="cm version" + env['CM_DOCKER_RUN_CMD'] += "cm version" skip_extra = True else: - if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')).lower() not in ["yes", "1", "true"]: + if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False') + ).lower() not in ["yes", "1", "true"]: env['CM_DOCKER_RUN_CMD'] += "cm pull repo && " - env['CM_DOCKER_RUN_CMD'] += "cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS']+ ' --quiet' + env['CM_DOCKER_RUN_CMD'] += "cm run script --tags=" + \ + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' else: - if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')).lower() not in ["yes", "1", "true"]: - env['CM_DOCKER_RUN_CMD']="cm pull repo && " + env['CM_DOCKER_RUN_CMD'] + if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False') + ).lower() not in ["yes", "1", "true"]: + env['CM_DOCKER_RUN_CMD'] = "cm pull repo && " + \ + env['CM_DOCKER_RUN_CMD'] print(env['CM_DOCKER_RUN_CMD']) - fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION"," --fake_run") + dockerfile_env_input_string - fake_run = fake_run + " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run + fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION", + " --fake_run") + dockerfile_env_input_string + fake_run = fake_run + \ + " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] if not skip_extra: x += fake_run if '--quiet' not in x: - x+=' --quiet' - if run_cmd_extra!='': - x+=' '+run_cmd_extra + x += ' --quiet' + if run_cmd_extra != '': + x += ' ' + run_cmd_extra f.write(x + EOL) - #fake_run to install the dependent scripts and caching them - if not "run" in env['CM_DOCKER_RUN_CMD'] and str(env.get('CM_REAL_RUN', False)).lower() in ["false", "0", "no"]: + # fake_run to install the dependent scripts and caching them + if not "run" in env['CM_DOCKER_RUN_CMD'] and str( + env.get('CM_REAL_RUN', False)).lower() in ["false", "0", "no"]: fake_run = dockerfile_env_input_string x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra if '--quiet' not in x: - x+=' --quiet ' - x+=EOL + x += ' --quiet ' + x += EOL f.write(x) - if 'CM_DOCKER_POST_RUN_COMMANDS' in env: for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: - f.write('RUN '+ post_run_cmd + EOL) + f.write('RUN ' + post_run_cmd + EOL) - post_file = env.get('DOCKER_IMAGE_POST_FILE','') - if post_file!='': + post_file = env.get('DOCKER_IMAGE_POST_FILE', '') + if post_file != '': r = utils.load_txt(post_file) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] f.write(s + EOL) @@ -337,22 +410,24 @@ def preprocess(i): f.close() - #f = open(env['CM_DOCKERFILE_WITH_PATH'], "r") - #print(f.read()) + # f = open(env['CM_DOCKERFILE_WITH_PATH'], "r") + # print(f.read()) + + return {'return': 0} - return {'return':0} -def get_value(env, config, key, env_key = None): +def get_value(env, config, key, env_key=None): if not env_key: env_key = key - if env.get(env_key, None) != None: + if env.get(env_key, None) is not None: return env[env_key] docker_os = env['CM_DOCKER_OS'] docker_os_version = env['CM_DOCKER_OS_VERSION'] - version_meta = config['distros'][docker_os]['versions'].get(docker_os_version, '') + version_meta = config['distros'][docker_os]['versions'].get( + docker_os_version, '') if key in version_meta: return version_meta[key] diff --git a/script/build-mlperf-inference-server-nvidia/customize.py b/script/build-mlperf-inference-server-nvidia/customize.py index 47338aed90..8d477fcc74 100644 --- a/script/build-mlperf-inference-server-nvidia/customize.py +++ b/script/build-mlperf-inference-server-nvidia/customize.py @@ -2,29 +2,34 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if '+LIBRARY_PATH' not in env: env['+LIBRARY_PATH'] = [] if 'CM_TENSORRT_INSTALL_PATH' in env: - env['+LIBRARY_PATH'].append(os.path.join(env['CM_TENSORRT_INSTALL_PATH'], "lib")) + env['+LIBRARY_PATH'].append(os.path.join( + env['CM_TENSORRT_INSTALL_PATH'], "lib")) - cxxflags = [ "-Wno-error=switch", "-DDALI_1_15=1", "-Wno-error=maybe-uninitialized" ] + cxxflags = [ + "-Wno-error=switch", + "-DDALI_1_15=1", + "-Wno-error=maybe-uninitialized"] if env.get('CM_GCC_VERSION', '') != '': gcc_major_version = env['CM_GCC_VERSION'].split(".")[0] if int(gcc_major_version) > 10: - if env.get('CM_MLPERF_INFERENCE_VERSION','') != "4.1": + if env.get('CM_MLPERF_INFERENCE_VERSION', '') != "4.1": cxxflags.append("-Wno-error=range-loop-construct") - if env.get('CM_MLPERF_DEVICE','') == "inferentia": + if env.get('CM_MLPERF_DEVICE', '') == "inferentia": env['USE_INFERENTIA'] = "1" env['USE_NIGHTLY'] = "0" env['CM_MAKE_BUILD_COMMAND'] = "build" @@ -33,10 +38,11 @@ def preprocess(i): env['+ CXXFLAGS'] = [] env['+ CXXFLAGS'] += cxxflags - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/calibrate-model-for.qaic/customize.py b/script/calibrate-model-for.qaic/customize.py index b6e91c4335..a77967c49e 100644 --- a/script/calibrate-model-for.qaic/customize.py +++ b/script/calibrate-model-for.qaic/customize.py @@ -3,6 +3,7 @@ import sys import yaml + def preprocess(i): os_info = i['os_info'] @@ -25,11 +26,12 @@ def preprocess(i): return r cmd = r['cmd'] - print("Profiling from "+ os.getcwd()) + print("Profiling from " + os.getcwd()) env['CM_RUN_CMD'] = cmd - return {'return':0} + return {'return': 0} + def create_batched_inputs(env): original_images_file = env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] @@ -39,44 +41,47 @@ def create_batched_inputs(env): with open(original_images_file) as f: file_paths = f.read().splitlines() - i = 0; + i = 0 outfile = None lastfile = None outfiles = [] - os.makedirs(os.path.join(os.getcwd(),"raw"), exist_ok = True) + os.makedirs(os.path.join(os.getcwd(), "raw"), exist_ok=True) for file in file_paths: - if i%int(batchsize) == 0: + if i % int(batchsize) == 0: filename = os.path.basename(file).replace(".rgb32", ".raw") - outfile = os.path.join(os.getcwd(),"raw", filename) + outfile = os.path.join(os.getcwd(), "raw", filename) outfiles.append(outfile) with open(outfile, "wb") as f: pass with open(outfile, "ab") as f: with open(file, "rb") as infile: f.write(infile.read()) - i = i+1 + i = i + 1 lastfile = file - while i%int(batchsize) != 0: + while i % int(batchsize) != 0: with open(outfile, "ab") as f: with open(lastfile, "rb") as infile: f.write(infile.read()) - i = i+1 + i = i + 1 with open("batched_input_files", "w") as f: f.write("\n".join(outfiles)) return {'return': 0} + def construct_calibration_cmd(env): compiler_params = env['CM_QAIC_COMPILER_PARAMS'] batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE', "1") - cmd = env['CM_QAIC_EXEC_PATH'] + " " + cmd = env['CM_QAIC_EXEC_PATH'] + " " if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': - cmd += " -input-list-file=batched_input_files -batchsize="+batchsize + " " - cmd += compiler_params + " -dump-profile=profile.yaml -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + cmd += " -input-list-file=batched_input_files -batchsize=" + batchsize + " " + cmd += compiler_params + " -dump-profile=profile.yaml -model=" + \ + env['CM_ML_MODEL_FILE_WITH_PATH'] return {'return': 0, 'cmd': cmd} + def postprocess(i): env = i['env'] @@ -84,12 +89,12 @@ def postprocess(i): env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path if env.get('CM_ML_MODEL_INPUT_LAYER_NAME', '') != '': - input_layer_names = [ env.get('CM_ML_MODEL_INPUT_LAYER_NAME') ] + input_layer_names = [env.get('CM_ML_MODEL_INPUT_LAYER_NAME')] else: - input_layer_names = [ "images:0", "images/:0" ] + input_layer_names = ["images:0", "images/:0"] - output_layer_names_conf = [ [], [] ] - output_layer_names_loc = [ [], [] ] + output_layer_names_conf = [[], []] + output_layer_names_loc = [[], []] output_layer_names_loc[0] = [ "/GatherElements/:0", @@ -97,7 +102,7 @@ def postprocess(i): "/GatherElements_2/:0", "/GatherElements_3/:0", "/GatherElements_4/:0" - ] + ] output_layer_names_conf[0] = [ "/TopK/:0", @@ -105,23 +110,23 @@ def postprocess(i): "/TopK_2/:0", "/TopK_3/:0", "/TopK_4/:0" - ] + ] output_layer_names_loc[1] = [ - "GatherElements_588/:0", - "GatherElements_598/:0", - "GatherElements_608/:0", - "GatherElements_618/:0", - "GatherElements_628/:0" + "GatherElements_588/:0", + "GatherElements_598/:0", + "GatherElements_608/:0", + "GatherElements_618/:0", + "GatherElements_628/:0" ] output_layer_names_conf[1] = [ - "TopK_570/:0", + "TopK_570/:0", "TopK_572/:0", "TopK_574/:0", "TopK_576/:0", "TopK_578/:0" - ] + ] if env.get('CM_QAIC_MODEL_NAME', '') == "retinanet": with open(profile_file_path, "r") as stream: @@ -132,9 +137,10 @@ def postprocess(i): output_max_val_conf = -sys.maxsize docs = yaml.load_all(stream, yaml.FullLoader) for doc in docs: - if type(doc) == list: - node_names = [ k['NodeOutputName'] for k in doc] + +if isinstance(doc, if ) + node_names = [k['NodeOutputName'] for k in doc] oindex = None for output in output_layer_names_loc: @@ -143,13 +149,15 @@ def postprocess(i): break if oindex is None: - return {'return': 1, 'error': 'Output node names not found for the given retinanet model'} + return { + 'return': 1, 'error': 'Output node names not found for the given retinanet model'} for k in doc: if k["NodeOutputName"] in input_layer_names: min_val = k['Min'] max_val = k['Max'] - scale, offset = get_scale_offset(min_val, max_val) + scale, offset = get_scale_offset( + min_val, max_val) env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset @@ -160,13 +168,16 @@ def postprocess(i): output_min_val_loc = min_val if max_val > output_max_val_loc: output_max_val_loc = max_val - loc_scale, loc_offset = get_scale_offset(min_val, max_val) - index = output_layer_names_loc[oindex].index(k["NodeOutputName"]) + loc_scale, loc_offset = get_scale_offset( + min_val, max_val) + index = output_layer_names_loc[oindex].index( + k["NodeOutputName"]) env[f'CM_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale - env[f'CM_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128 # to uint8 is done in NMS code + # to uint8 is done in NMS code + env[f'CM_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128 total_range = max_val - min_val - scale = total_range/256.0 + scale = total_range / 256.0 offset = round(-min_val / scale) if k["NodeOutputName"] in output_layer_names_conf[oindex]: @@ -176,28 +187,34 @@ def postprocess(i): output_min_val_conf = min_val if max_val > output_max_val_conf: output_max_val_conf = max_val - conf_scale, conf_offset = get_scale_offset(min_val, max_val) - index = output_layer_names_conf[oindex].index(k["NodeOutputName"]) + conf_scale, conf_offset = get_scale_offset( + min_val, max_val) + index = output_layer_names_conf[oindex].index( + k["NodeOutputName"]) env[f'CM_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale - env[f'CM_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128 # to uint8 is done in NMS code + # to uint8 is done in NMS code + env[f'CM_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128 total_range = max_val - min_val - scale = total_range/256.0 + scale = total_range / 256.0 offset = round(-min_val / scale) - loc_scale, loc_offset = get_scale_offset(output_min_val_loc, output_max_val_loc) - conf_scale, conf_offset = get_scale_offset(output_min_val_conf, output_max_val_conf) + loc_scale, loc_offset = get_scale_offset( + output_min_val_loc, output_max_val_loc) + conf_scale, conf_offset = get_scale_offset( + output_min_val_conf, output_max_val_conf) env['CM_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale - env['CM_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code + env['CM_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code env['CM_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale - env['CM_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code + env['CM_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code except yaml.YAMLError as exc: return {'return': 1, 'error': exc} - return {'return':0} + return {'return': 0} + def get_scale_offset(min_val, max_val): total_range = max_val - min_val - scale = total_range/256.0 + scale = total_range /256.0 offset = round(-min_val / scale) return scale, offset diff --git a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py index 977a9993bb..11d224c59c 100644 --- a/script/clean-nvidia-mlperf-inference-scratch-space/customize.py +++ b/script/clean-nvidia-mlperf-inference-scratch-space/customize.py @@ -2,6 +2,7 @@ import os import cmind as cm + def preprocess(i): os_info = i['os_info'] @@ -21,32 +22,34 @@ def preprocess(i): if env.get('CM_MODEL', '') == 'sdxl': if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_data': clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "data", "coco", "SDXL")} """ - cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" + cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'preprocessed_data': clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "preprocessed_data", "coco2014-tokenized-sdxl")} """ - cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" + cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_model': clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "models", "SDXL")} """ - cache_rm_tags = "nvidia-harness,_download_model,_sdxl" + cache_rm_tags = "nvidia-harness,_download_model,_sdxl" cache_rm_tags = cache_rm_tags + extra_cache_rm_tags if cache_rm_tags: - r = cm.access({'action': 'rm', 'automation': 'cache', 'tags': cache_rm_tags, 'f': True}) + r = cm.access({'action': 'rm', 'automation': 'cache', + 'tags': cache_rm_tags, 'f': True}) print(r) - if r['return'] != 0 and r['return'] != 16: ## ignore missing ones + if r['return'] != 0 and r['return'] != 16: # ignore missing ones return r - if r['return'] == 0: # cache entry found + if r['return'] == 0: # cache entry found if clean_cmd != '': env['CM_RUN_CMD'] = clean_cmd else: if clean_cmd != '': env['CM_RUN_CMD'] = clean_cmd - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/compile-model-for.qaic/customize.py b/script/compile-model-for.qaic/customize.py index 7644821c5c..3acf5b5cc8 100644 --- a/script/compile-model-for.qaic/customize.py +++ b/script/compile-model-for.qaic/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -20,54 +21,69 @@ def preprocess(i): return r cmd = r['cmd'] - print("Compiling from "+ os.getcwd()) + print("Compiling from " + os.getcwd()) env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd env['CM_RUN_CMD'] = cmd else: import shutil - print("Creating cache entry from " + env['CM_REGISTER_CACHE'] + " to " + os.getcwd()) - r = shutil.copytree(env['CM_REGISTER_CACHE'], os.path.join(os.getcwd(), "elfs")) + print( + "Creating cache entry from " + + env['CM_REGISTER_CACHE'] + + " to " + + os.getcwd()) + r = shutil.copytree( + env['CM_REGISTER_CACHE'], + os.path.join( + os.getcwd(), + "elfs")) print(r) - return {'return':0} + return {'return': 0} + def construct_compilation_cmd(env): compiler_params_base = env['CM_QAIC_MODEL_COMPILER_PARAMS_BASE'] - compiler_args = env['CM_QAIC_MODEL_COMPILER_ARGS'] + ' ' + env.get('CM_QAIC_MODEL_COMPILER_ARGS_SUT', '') + compiler_args = env['CM_QAIC_MODEL_COMPILER_ARGS'] + \ + ' ' + env.get('CM_QAIC_MODEL_COMPILER_ARGS_SUT', '') batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE') if env.get('CM_QAIC_MODEL_QUANTIZATION', '') == 'yes': - profile_string = " -load-profile=" + env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] + profile_string = " -load-profile=" + \ + env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] else: profile_string = '' compiler_params = compiler_params_base + ' ' + compiler_args if batchsize: - compiler_params += " -batchsize="+batchsize + compiler_params += " -batchsize=" + batchsize - percentile_calibration_params = env.get('CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS') + percentile_calibration_params = env.get( + 'CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS') if percentile_calibration_params: compiler_params += " " + percentile_calibration_params aic_binary_dir = os.path.join(os.getcwd(), "elfs") - cmd = env['CM_QAIC_EXEC_PATH'] + \ - " -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ - profile_string + ' -aic-binary-dir=' + aic_binary_dir + ' ' \ - + compiler_params + cmd = env['CM_QAIC_EXEC_PATH'] + \ + " -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + profile_string + ' -aic-binary-dir=' + aic_binary_dir + ' ' \ + + compiler_params return {'return': 0, 'cmd': cmd} + def postprocess(i): env = i['env'] - env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join(os.getcwd(), "elfs", "programqpc.bin") + env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join( + os.getcwd(), "elfs", "programqpc.bin") if not os.path.isdir(os.path.join(os.getcwd(), "elfs")): - return {'return': 1, 'error': 'elfs directory not found inside the compiled directory'} + return { + 'return': 1, 'error': 'elfs directory not found inside the compiled directory'} env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/compile-program/customize.py b/script/compile-program/customize.py index 73a3eeb82b..c5e2adabb8 100644 --- a/script/compile-program/customize.py +++ b/script/compile-program/customize.py @@ -1,29 +1,38 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] CPPFLAGS = env.get('+ CPPFLAGS', []) env['CM_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS) - env['CM_CXX_COMPILER_FLAGS'] = " ".join(env.get('+ CXXFLAGS', []) + CPPFLAGS) + env['CM_CXX_COMPILER_FLAGS'] = " ".join( + env.get('+ CXXFLAGS', []) + CPPFLAGS) env['CM_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', [])) - CPATH = env.get('+CPATH', [ ]) - env['CM_C_INCLUDE_PATH'] = " -I".join([" "] + env.get('+C_INCLUDE_PATH', []) + CPATH) - env['CM_CPLUS_INCLUDE_PATH'] = " -I".join([" "] + env.get('+CPLUS_INCLUDE_PATH', []) + CPATH) - env['CM_F_INCLUDE_PATH'] = " -I".join([" "] + env.get('+F_INCLUDE_PATH', []) + CPATH) + CPATH = env.get('+CPATH', []) + env['CM_C_INCLUDE_PATH'] = " -I".join([" "] + + env.get('+C_INCLUDE_PATH', []) + + CPATH) + env['CM_CPLUS_INCLUDE_PATH'] = " -I".join( + [" "] + env.get('+CPLUS_INCLUDE_PATH', []) + CPATH) + env['CM_F_INCLUDE_PATH'] = " -I".join([" "] + + env.get('+F_INCLUDE_PATH', []) + + CPATH) # If windows, need to extend it more ... - if os_info['platform'] == 'windows' and env.get('CM_COMPILER_FAMILY','')!='LLVM': - print ("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") - return {'return':0} + if os_info['platform'] == 'windows' and env.get( + 'CM_COMPILER_FAMILY', '') != 'LLVM': + print("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") + return {'return': 0} LDFLAGS = env.get('+ LDFLAGS', []) env['CM_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS) - env['CM_CXX_LINKER_FLAGS'] = " ".join(env.get('+ LDCXXFLAGS', []) + LDFLAGS) + env['CM_CXX_LINKER_FLAGS'] = " ".join( + env.get('+ LDCXXFLAGS', []) + LDFLAGS) env['CM_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS) if env.get('CM_LINKER_LANG', 'C') == "C": @@ -44,11 +53,14 @@ def preprocess(i): env['CM_LINKER_COMPILE_FLAGS'] = env['CM_F_COMPILER_FLAGS'] env['CM_LINKER_FLAGS'] = env['CM_F_LINKER_FLAGS'] - env['CM_LD_LIBRARY_PATH'] = " -L".join([" " ] + env.get('+LD_LIBRARY_PATH', [])) - env['CM_SOURCE_FOLDER_PATH'] = env['CM_SOURCE_FOLDER_PATH'] if 'CM_SOURCE_FOLDER_PATH' in env else env['CM_TMP_CURRENT_SCRIPT_PATH'] if 'CM_TMP_CURRENT_SCRIPT_PATH' in env else '' + env['CM_LD_LIBRARY_PATH'] = " -L".join([" "] + + env.get('+LD_LIBRARY_PATH', [])) + env['CM_SOURCE_FOLDER_PATH'] = env['CM_SOURCE_FOLDER_PATH'] if 'CM_SOURCE_FOLDER_PATH' in env else env[ + 'CM_TMP_CURRENT_SCRIPT_PATH'] if 'CM_TMP_CURRENT_SCRIPT_PATH' in env else '' + + return {'return': 0} - return {'return':0} def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/convert-csv-to-md/customize.py b/script/convert-csv-to-md/customize.py index cbcbdaf386..5e63bcd939 100644 --- a/script/convert-csv-to-md/customize.py +++ b/script/convert-csv-to-md/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -17,12 +18,14 @@ def preprocess(i): md_file = env.get('CM_MD_FILE', '') process_file = os.path.join(i['run_script_input']['path'], "process.py") - env['CM_RUN_CMD'] = '{} {} {} {} '.format(env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) + env['CM_RUN_CMD'] = '{} {} {} {} '.format( + env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/convert-csv-to-md/process.py b/script/convert-csv-to-md/process.py index 1a563cdabb..df1637c67c 100644 --- a/script/convert-csv-to-md/process.py +++ b/script/convert-csv-to-md/process.py @@ -4,7 +4,7 @@ csv_file = sys.argv[1] if len(sys.argv) > 1 else "summary.csv" md_file = sys.argv[2] if len(sys.argv) > 2 else "converted.md" -df=pd.read_csv(csv_file, engine='python') +df = pd.read_csv(csv_file, engine='python') with open(md_file, "w") as md: df.to_markdown(buf=md) diff --git a/script/convert-ml-model-huggingface-to-onnx/customize.py b/script/convert-ml-model-huggingface-to-onnx/customize.py index 49c588fc38..6ef39f0f84 100644 --- a/script/convert-ml-model-huggingface-to-onnx/customize.py +++ b/script/convert-ml-model-huggingface-to-onnx/customize.py @@ -1,13 +1,14 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get("CM_MODEL_HUGG_PATH","") == "": + if env.get("CM_MODEL_HUGG_PATH", "") == "": return {'return': 1, 'error': 'CM_MODEL_HUGG_PATH is not set'} automation = i['automation'] @@ -16,11 +17,12 @@ def preprocess(i): path = os.getcwd() - return {'return':0} + return {'return': 0} + def postprocess(i): os_info = i['os_info'] env = i['env'] - env['HUGGINGFACE_ONNX_FILE_PATH'] = os.path.join(os.getcwd(),"model.onnx") - return {'return':0} + env['HUGGINGFACE_ONNX_FILE_PATH'] = os.path.join(os.getcwd(), "model.onnx") + return {'return': 0} diff --git a/script/copy-to-clipboard/code.py b/script/copy-to-clipboard/code.py index 082813e9a0..0a1aa014a0 100644 --- a/script/copy-to-clipboard/code.py +++ b/script/copy-to-clipboard/code.py @@ -3,7 +3,9 @@ text = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT', '') -add_quotes = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [True,'True','yes'] +add_quotes = os.environ.get( + 'CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [ + True, 'True', 'yes'] if add_quotes: text = '"' + text + '"' diff --git a/script/create-conda-env/customize.py b/script/create-conda-env/customize.py index 3d4b17e7fd..c9fd8083bf 100644 --- a/script/create-conda-env/customize.py +++ b/script/create-conda-env/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -12,9 +13,10 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] if env.get('CM_CONDA_ENV_NAME', '') == '': - return {'return':1, 'error': 'Please use "_name." variation'} + return {'return': 1, 'error': 'Please use "_name." variation'} + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] @@ -25,7 +27,7 @@ def postprocess(i): env['CM_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin") env['CM_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib") - env['+PATH'] = [ env['CM_CONDA_BIN_PATH'] ] - env['+LD_LIBRARY_PATH'] = [ env['CM_CONDA_LIB_PATH'] ] + env['+PATH'] = [env['CM_CONDA_BIN_PATH']] + env['+LD_LIBRARY_PATH'] = [env['CM_CONDA_LIB_PATH']] - return {'return':0} + return {'return': 0} diff --git a/script/create-custom-cache-entry/customize.py b/script/create-custom-cache-entry/customize.py index ddfbe05a5d..f15a1e7fdf 100644 --- a/script/create-custom-cache-entry/customize.py +++ b/script/create-custom-cache-entry/customize.py @@ -2,27 +2,30 @@ import os import shutil + def preprocess(i): # CM script internal variables env = i['env'] extra_cache_tags = [] - if env.get('CM_EXTRA_CACHE_TAGS','').strip() == '': - print ('') - extra_cache_tags_str = input('Enter extra tags for the custom CACHE entry separated by comma: ') + if env.get('CM_EXTRA_CACHE_TAGS', '').strip() == '': + print('') + extra_cache_tags_str = input( + 'Enter extra tags for the custom CACHE entry separated by comma: ') extra_cache_tags = extra_cache_tags_str.strip().split(',') - return {'return':0, 'add_extra_cache_tags':extra_cache_tags} + return {'return': 0, 'add_extra_cache_tags': extra_cache_tags} + def postprocess(i): env = i['env'] - path = env.get('CM_CUSTOM_CACHE_ENTRY_PATH','').strip() + path = env.get('CM_CUSTOM_CACHE_ENTRY_PATH', '').strip() - if path!='': + if path != '': if not os.path.isdir(path): os.makedirs(path) else: @@ -30,7 +33,8 @@ def postprocess(i): x = '' env_key = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY', '') - if env_key != '': x = env_key+'_' + if env_key != '': + x = env_key + '_' env['CM_CUSTOM_CACHE_ENTRY_{}PATH'.format(x)] = path env['CM_CUSTOM_CACHE_ENTRY_PATH'] = path diff --git a/script/create-fpgaconvnet-app-tinyml/customize.py b/script/create-fpgaconvnet-app-tinyml/customize.py index 8a70d706de..5c6a448bc0 100644 --- a/script/create-fpgaconvnet-app-tinyml/customize.py +++ b/script/create-fpgaconvnet-app-tinyml/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,25 +15,29 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] - run_dir = env['CM_TINY_FPGACONVNET_'+network_env_name+'_RUN_DIR'] + run_dir = env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] run_cmd = "cd " + run_dir + " && xsct create_boot_image.tcl" env['CM_RUN_CMD'] = run_cmd env['CM_RUN_DIR'] = run_dir - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':1} + return {'return': 1} network = env['CM_TINY_NETWORK_NAME'] - json_location = os.path.join(env['CM_RUN_DIR'], env['CM_TINY_NETWORK_NAME'] + ".json") + json_location = os.path.join( + env['CM_RUN_DIR'], + env['CM_TINY_NETWORK_NAME'] + ".json") if os.path.exists(json_location): - print(f"JSON configuration file for {network} created at {json_location}") + print( + f"JSON configuration file for {network} created at {json_location}") else: - return {'return':1, 'error': "JSON configuration file generation failed"} + return {'return': 1, 'error': "JSON configuration file generation failed"} - return {'return':0} + return {'return': 0} diff --git a/script/create-fpgaconvnet-config-tinyml/customize.py b/script/create-fpgaconvnet-config-tinyml/customize.py index 6489bb7f16..6175a2bdd2 100644 --- a/script/create-fpgaconvnet-config-tinyml/customize.py +++ b/script/create-fpgaconvnet-config-tinyml/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,7 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - code_path = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "closed", "fpgaconvnet", "code") + code_path = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "closed", + "fpgaconvnet", + "code") network_env_name = env['CM_TINY_NETWORK_NAME'].replace("-", "_").upper() env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name env['CM_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path @@ -25,13 +30,15 @@ def preprocess(i): run_dir = os.path.join(code_path, board, benchmark) env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir - run_cmd = "cd " + run_dir + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py" + run_cmd = "cd " + run_dir + " && " + \ + env['CM_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py" env['ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] env['CM_RUN_CMD'] = run_cmd env['CM_RUN_DIR'] = run_dir - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -41,13 +48,17 @@ def postprocess(i): env['CM_TINY_FPGACONVNET_NETWORK_NAME'] = network network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] - json_location = os.path.join(env['CM_RUN_DIR'], env['CM_TINY_NETWORK_NAME'] + ".json") + json_location = os.path.join( + env['CM_RUN_DIR'], + env['CM_TINY_NETWORK_NAME'] + ".json") if os.path.exists(json_location): - print(f"JSON configuration file for {network} created at {json_location}") + print( + f"JSON configuration file for {network} created at {json_location}") else: - return {'return':1, 'error': "JSON configuration file generation failed"} + return {'return': 1, 'error': "JSON configuration file generation failed"} - env['CM_TINY_FPGACONVNET_CONFIG_FILE_' + network_env_name + '_PATH'] = json_location + env['CM_TINY_FPGACONVNET_CONFIG_FILE_' + + network_env_name + '_PATH'] = json_location env['CM_GET_DEPENDENT_CACHED_PATH'] = json_location - return {'return':0} + return {'return': 0} diff --git a/script/create-patch/customize.py b/script/create-patch/customize.py index 2990d29ff0..0ebd63b99f 100644 --- a/script/create-patch/customize.py +++ b/script/create-patch/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,39 +16,42 @@ def preprocess(i): new_dir = env.get('CM_CREATE_PATCH_NEW', '') if new_dir == '': - return {'return':1, 'error':'specify NEW directory using --new'} + return {'return': 1, 'error': 'specify NEW directory using --new'} if not os.path.isdir(new_dir): - return {'return':1, 'error':'NEW directory doesn\'t exist {}'.format(new_dir)} + return {'return': 1, + 'error': 'NEW directory doesn\'t exist {}'.format(new_dir)} old_dir = env.get('CM_CREATE_PATCH_OLD', '') if old_dir == '': - return {'return':1, 'error':'specify OLD directory using --old'} + return {'return': 1, 'error': 'specify OLD directory using --old'} if not os.path.isdir(old_dir): - return {'return':1, 'error':'OLD directory doesn\'t exist {}'.format(old_dir)} + return {'return': 1, + 'error': 'OLD directory doesn\'t exist {}'.format(old_dir)} exclude = env.get('CM_CREATE_PATCH_EXCLUDE', '').strip() x_exclude = '' - if exclude!='': + if exclude != '': for e in exclude.split(','): - x_exclude+=' --exclude={}'.format(e) + x_exclude += ' --exclude={}'.format(e) - cmd = 'diff -Naur {} {} {} > patch.patch'.format(x_exclude, old_dir, new_dir) + cmd = 'diff -Naur {} {} {} > patch.patch'.format( + x_exclude, old_dir, new_dir) if not quiet: - print ('') - print ('Running command:') - print ('') - print (cmd) - print ('') + print('') + print('Running command:') + print('') + print(cmd) + print('') os.system(cmd) + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/destroy-terraform/customize.py b/script/destroy-terraform/customize.py index fd604e38d0..e0bfa63b5f 100644 --- a/script/destroy-terraform/customize.py +++ b/script/destroy-terraform/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -9,8 +10,9 @@ def preprocess(i): meta = i['meta'] - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/detect-cpu/customize.py b/script/detect-cpu/customize.py index 56a753af3b..2d5dfeeef4 100644 --- a/script/detect-cpu/customize.py +++ b/script/detect-cpu/customize.py @@ -3,12 +3,13 @@ lscpu_out = 'tmp-lscpu.out' + def preprocess(i): if os.path.isfile(lscpu_out): os.remove(lscpu_out) - return {'return':0} + return {'return': 0} def postprocess(i): @@ -34,34 +35,36 @@ def postprocess(i): f = 'tmp-systeminfo.csv' if not os.path.isfile(f): - print ('WARNING: {} file was not generated!'.format(f)) + print('WARNING: {} file was not generated!'.format(f)) else: keys = {} j = 0 with open(f, 'r') as csvf: for s in csv.reader(csvf): - if j==0: - keys=s + if j == 0: + keys = s else: x = {} for k in range(0, len(s)): - x[keys[k]]=s[k] + x[keys[k]] = s[k] sys.append(x) - if j==1: + if j == 1: sys1 = x - j+=1 + j += 1 except Exception as e: - logger.warning ('WARNING: problem processing file {} ({})!'.format(f, format(e))) + logger.warning( + 'WARNING: problem processing file {} ({})!'.format( + f, format(e))) pass try: f = 'tmp-wmic-cpu.csv' if not os.path.isfile(f): - logger.warning ('WARNING: {} file was not generated!'.format(f)) + logger.warning('WARNING: {} file was not generated!'.format(f)) else: keys = {} @@ -69,104 +72,121 @@ def postprocess(i): with open(f, 'r', encoding='utf16') as csvf: for s in csv.reader(csvf): - if j==1: - keys=s - elif j>1: + if j == 1: + keys = s + elif j > 1: x = {} for k in range(0, len(s)): - x[keys[k]]=s[k] + x[keys[k]] = s[k] cpu.append(x) - if j==2: + if j == 2: cpu1 = x - j+=1 + j += 1 except Exception as e: - logger.warning ('WARNING: problem processing file {} ({})!'.format(f, format(e))) + logger.warning( + 'WARNING: problem processing file {} ({})!'.format( + f, format(e))) pass + state['host_device_raw_info'] = { + 'sys': sys, 'sys1': sys1, 'cpu': cpu, 'cpu1': cpu1} - state['host_device_raw_info']={'sys':sys, 'sys1':sys1, 'cpu':cpu, 'cpu1':cpu1} - - logger.warning ('WARNING: need to unify system and cpu output on Windows') - - return {'return':0} + logger.warning( + 'WARNING: need to unify system and cpu output on Windows') + return {'return': 0} - ############################################################################### + ########################################################################## # Linux if not os.path.isfile(lscpu_out): - print ('WARNING: lscpu.out file was not generated!') + print('WARNING: lscpu.out file was not generated!') # Currently ignore this error though probably should fail? # But need to check that is supported on all platforms. - return {'return':0} + return {'return': 0} r = utils.load_txt(file_name=lscpu_out) - if r['return']>0: return r + if r['return'] > 0: + return r ss = r['string'] - #state['cpu_info_raw'] = ss + # state['cpu_info_raw'] = ss # Unifying some CPU info across different platforms unified_env = { - 'CM_CPUINFO_CPUs':'CM_HOST_CPU_TOTAL_CORES', - 'CM_CPUINFO_L1d_cache': 'CM_HOST_CPU_L1D_CACHE_SIZE', - 'CM_CPUINFO_L1i_cache': 'CM_HOST_CPU_L1I_CACHE_SIZE', - 'CM_CPUINFO_L2_cache': 'CM_HOST_CPU_L2_CACHE_SIZE', - 'CM_CPUINFO_L3_cache': 'CM_HOST_CPU_L3_CACHE_SIZE', - 'CM_CPUINFO_Sockets': 'CM_HOST_CPU_SOCKETS', - 'CM_CPUINFO_NUMA_nodes': 'CM_HOST_CPU_NUMA_NODES', - 'CM_CPUINFO_Cores_per_socket': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', - 'CM_CPUINFO_Cores_per_cluster': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', - 'CM_CPUINFO_Threads_per_core': 'CM_HOST_CPU_THREADS_PER_CORE', - 'CM_CPUINFO_Architecture': 'CM_HOST_CPU_ARCHITECTURE', - 'CM_CPUINFO_CPU_family': 'CM_HOST_CPU_FAMILY', - 'CM_CPUINFO_CPU_max_MHz': 'CM_HOST_CPU_MAX_MHZ', - 'CM_CPUINFO_Model_name': 'CM_HOST_CPU_MODEL_NAME', - 'CM_CPUINFO_On_line_CPUs_list': 'CM_HOST_CPU_ON_LINE_CPUS_LIST', - 'CM_CPUINFO_Vendor_ID': 'CM_HOST_CPU_VENDOR_ID', - 'CM_CPUINFO_hw_physicalcpu': 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', - 'CM_CPUINFO_hw_logicalcpu': 'CM_HOST_CPU_TOTAL_CORES', - 'CM_CPUINFO_hw_packages': 'CM_HOST_CPU_SOCKETS', - 'CM_CPUINFO_hw_memsize': 'CM_HOST_CPU_MEMSIZE', - 'CM_CPUINFO_hw_l1icachesize': 'CM_HOST_CPU_L1I_CACHE_SIZE', - 'CM_CPUINFO_hw_l1dcachesize': 'CM_HOST_CPU_L1D_CACHE_SIZE', - 'CM_CPUINFO_hw_l2cachesize': 'CM_HOST_CPU_L2_CACHE_SIZE' - } + 'CM_CPUINFO_CPUs': 'CM_HOST_CPU_TOTAL_CORES', + 'CM_CPUINFO_L1d_cache': 'CM_HOST_CPU_L1D_CACHE_SIZE', + 'CM_CPUINFO_L1i_cache': 'CM_HOST_CPU_L1I_CACHE_SIZE', + 'CM_CPUINFO_L2_cache': 'CM_HOST_CPU_L2_CACHE_SIZE', + 'CM_CPUINFO_L3_cache': 'CM_HOST_CPU_L3_CACHE_SIZE', + 'CM_CPUINFO_Sockets': 'CM_HOST_CPU_SOCKETS', + 'CM_CPUINFO_NUMA_nodes': 'CM_HOST_CPU_NUMA_NODES', + 'CM_CPUINFO_Cores_per_socket': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'CM_CPUINFO_Cores_per_cluster': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'CM_CPUINFO_Threads_per_core': 'CM_HOST_CPU_THREADS_PER_CORE', + 'CM_CPUINFO_Architecture': 'CM_HOST_CPU_ARCHITECTURE', + 'CM_CPUINFO_CPU_family': 'CM_HOST_CPU_FAMILY', + 'CM_CPUINFO_CPU_max_MHz': 'CM_HOST_CPU_MAX_MHZ', + 'CM_CPUINFO_Model_name': 'CM_HOST_CPU_MODEL_NAME', + 'CM_CPUINFO_On_line_CPUs_list': 'CM_HOST_CPU_ON_LINE_CPUS_LIST', + 'CM_CPUINFO_Vendor_ID': 'CM_HOST_CPU_VENDOR_ID', + 'CM_CPUINFO_hw_physicalcpu': 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', + 'CM_CPUINFO_hw_logicalcpu': 'CM_HOST_CPU_TOTAL_CORES', + 'CM_CPUINFO_hw_packages': 'CM_HOST_CPU_SOCKETS', + 'CM_CPUINFO_hw_memsize': 'CM_HOST_CPU_MEMSIZE', + 'CM_CPUINFO_hw_l1icachesize': 'CM_HOST_CPU_L1I_CACHE_SIZE', + 'CM_CPUINFO_hw_l1dcachesize': 'CM_HOST_CPU_L1D_CACHE_SIZE', + 'CM_CPUINFO_hw_l2cachesize': 'CM_HOST_CPU_L2_CACHE_SIZE' + } if env['CM_HOST_OS_TYPE'] == 'linux': - vkeys = [ 'Architecture', 'Model name', 'Vendor ID', 'CPU family', 'NUMA node(s)', 'CPU(s)', \ - 'On-line CPU(s) list', 'Socket(s)', 'Core(s) per socket', 'Core(s) per cluster', 'Thread(s) per core', 'L1d cache', 'L1i cache', 'L2 cache', \ - 'L3 cache', 'CPU max MHz' ] + vkeys = ['Architecture', 'Model name', 'Vendor ID', 'CPU family', 'NUMA node(s)', 'CPU(s)', + 'On-line CPU(s) list', 'Socket(s)', 'Core(s) per socket', 'Core(s) per cluster', 'Thread(s) per core', 'L1d cache', 'L1i cache', 'L2 cache', + 'L3 cache', 'CPU max MHz'] elif env['CM_HOST_OS_FLAVOR'] == 'macos': - vkeys = [ 'hw.physicalcpu', 'hw.logicalcpu', 'hw.packages', 'hw.ncpu', 'hw.memsize', 'hw.l1icachesize', \ - 'hw.l2cachesize' ] + vkeys = ['hw.physicalcpu', 'hw.logicalcpu', 'hw.packages', 'hw.ncpu', 'hw.memsize', 'hw.l1icachesize', + 'hw.l2cachesize'] if vkeys: for s in ss.split('\n'): v = s.split(':') key = v[0] if key in vkeys: - env_key = 'CM_CPUINFO_'+key.replace(" ","_").replace('(','').replace(')','').replace('-','_').replace('.','_') + env_key = 'CM_CPUINFO_' + key.replace( + " ", + "_").replace( + '(', + '').replace( + ')', + '').replace( + '-', + '_').replace( + '.', + '_') if env_key in unified_env: - env[unified_env[env_key]]=v[1].strip() + env[unified_env[env_key]] = v[1].strip() else: env[env_key] = v[1].strip() - if env.get('CM_HOST_CPU_SOCKETS','') == '-':#assume as 1 + if env.get('CM_HOST_CPU_SOCKETS', '') == '-': # assume as 1 env['CM_HOST_CPU_SOCKETS'] = '1' - if env.get('CM_HOST_CPU_TOTAL_CORES', '') != '' and env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '': + if env.get('CM_HOST_CPU_TOTAL_CORES', '') != '' and env.get( + 'CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '': env['CM_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] - if env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES','') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES','') != '' and env.get('CM_HOST_CPU_THREADS_PER_CORE','') == '': + if env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') != '' and env.get( + 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', '') != '' and env.get('CM_HOST_CPU_THREADS_PER_CORE', '') == '': env['CM_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['CM_HOST_CPU_TOTAL_LOGICAL_CORES']) // - int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']))) + int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']))) - if env.get('CM_HOST_CPU_SOCKETS','') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES','') != '' and env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET','') == '': - env['CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str(int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['CM_HOST_CPU_SOCKETS'])) + if env.get('CM_HOST_CPU_SOCKETS', '') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES', + '') != '' and env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', '') == '': + env['CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str( + int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['CM_HOST_CPU_SOCKETS'])) - return {'return':0} + return {'return': 0} diff --git a/script/detect-os/customize.py b/script/detect-os/customize.py index d59a511f6a..c5a0b17c1d 100644 --- a/script/detect-os/customize.py +++ b/script/detect-os/customize.py @@ -2,6 +2,7 @@ import os import subprocess + def preprocess(i): env = i['env'] @@ -17,7 +18,7 @@ def preprocess(i): # Update state (demo) # state['os_info'] = os_info - return {'return':0} + return {'return': 0} def postprocess(i): @@ -30,7 +31,8 @@ def postprocess(i): if os_info['platform'] != 'windows': if os_info['platform'] == 'linux': sys_cmd = "ld --verbose | grep SEARCH_DIR " - result = subprocess.check_output(sys_cmd, shell=True).decode("utf-8") + result = subprocess.check_output( + sys_cmd, shell=True).decode("utf-8") result = result.replace("SEARCH_DIR(\"=", "") result = result.replace("SEARCH_DIR(\"", "") result = result.replace("\")", "") @@ -39,14 +41,15 @@ def postprocess(i): dirs = result.split(';') lib_dir = [] for _dir in dirs: - if _dir != '' and _dir not in lib_dir: + if _dir != '' and _dir not in lib_dir: lib_dir.append(_dir) env['+CM_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir r = utils.load_txt(file_name='tmp-run.out', - check_if_exists = True, - split = True) - if r['return']>0: return r + check_if_exists=True, + split=True) + if r['return'] > 0: + return r s = r['list'] @@ -63,20 +66,20 @@ def postprocess(i): env['CM_HOST_SYSTEM_NAME'] = platform.node() if 'CM_HOST_OS_PACKAGE_MANAGER' not in env: - if env.get('CM_HOST_OS_FLAVOR','') == "ubuntu" or \ - "debian" in env.get('CM_HOST_OS_FLAVOR_LIKE','') or \ - env.get('CM_HOST_OS_FLAVOR','') == "debian": + if env.get('CM_HOST_OS_FLAVOR', '') == "ubuntu" or \ + "debian" in env.get('CM_HOST_OS_FLAVOR_LIKE', '') or \ + env.get('CM_HOST_OS_FLAVOR', '') == "debian": env['CM_HOST_OS_PACKAGE_MANAGER'] = "apt" - if env.get('CM_HOST_OS_FLAVOR','') == "rhel" or \ - "rhel" in env.get('CM_HOST_OS_FLAVOR_LIKE',''): + if env.get('CM_HOST_OS_FLAVOR', '') == "rhel" or \ + "rhel" in env.get('CM_HOST_OS_FLAVOR_LIKE', ''): env['CM_HOST_OS_PACKAGE_MANAGER'] = "dnf" - if env.get('CM_HOST_OS_FLAVOR','') == "amzn": + if env.get('CM_HOST_OS_FLAVOR', '') == "amzn": env['CM_HOST_OS_PACKAGE_MANAGER'] = "yum" - if env.get('CM_HOST_OS_FLAVOR_LIKE','') == "arch": + if env.get('CM_HOST_OS_FLAVOR_LIKE', '') == "arch": env['CM_HOST_OS_PACKAGE_MANAGER'] = "arch" - if env.get('CM_HOST_OS_FLAVOR','') == "macos": + if env.get('CM_HOST_OS_FLAVOR', '') == "macos": env['CM_HOST_OS_PACKAGE_MANAGER'] = "brew" - if env.get('CM_HOST_OS_FLAVOR','') == "sles": + if env.get('CM_HOST_OS_FLAVOR', '') == "sles": env['CM_HOST_OS_PACKAGE_MANAGER'] = "zypper" if env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "apt": env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y" @@ -103,4 +106,4 @@ def postprocess(i): if os.path.exists("/.dockerenv"): env['CM_RUN_INSIDE_DOCKER'] = "yes" - return {'return':0} + return {'return': 0} diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 7ad623ece5..e14d2983c4 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -1,9 +1,11 @@ from cmind import utils -import os, subprocess +import os +import subprocess import select import sys import grp + def preprocess(i): os_info = i['os_info'] @@ -17,7 +19,7 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') if os.geteuid() == 0: - env['CM_SUDO'] = '' #root user does not need sudo + env['CM_SUDO'] = '' # root user does not need sudo env['CM_SUDO_USER'] = "yes" else: if can_execute_sudo_without_password() or prompt_sudo() == 0: @@ -28,18 +30,21 @@ def preprocess(i): env['CM_SUDO_USER'] = "no" env['CM_SUDO'] = '' - return {'return':0} + return {'return': 0} + def can_execute_sudo_without_password(): try: # Run a harmless command using sudo result = subprocess.run( - ['sudo', '-n', 'true'], # -n prevents sudo from prompting for a password + # -n prevents sudo from prompting for a password + ['sudo', '-n', 'true'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - # Check the return code; if it's 0, sudo executed without needing a password + # Check the return code; if it's 0, sudo executed without needing a + # password if result.returncode == 0: return True else: @@ -49,11 +54,11 @@ def can_execute_sudo_without_password(): return False - def reset_terminal(): """Reset terminal to default settings.""" subprocess.run(['stty', 'sane']) + def prompt_retry(timeout=10, default_retry=False): """Prompt the user with a yes/no question to retry the command, with a 10-second timeout.""" @@ -65,7 +70,10 @@ def prompt_retry(timeout=10, default_retry=False): print(f"Non-interactive environment detected. Skipping retry.") return default_retry # Automatically use the default in non-interactive terminals - print(f"Timeout occurred. Do you want to try again? (y/n): ", end='', flush=True) + print( + f"Timeout occurred. Do you want to try again? (y/n): ", + end='', + flush=True) # Use select to wait for user input with a timeout ready, _, _ = select.select([sys.stdin], [], [], timeout) @@ -80,6 +88,7 @@ def prompt_retry(timeout=10, default_retry=False): print("\nNo input received in 10 seconds. Exiting.") return False # No input within the timeout, so don't retry + def is_user_in_sudo_group(): """Check if the current user is in the 'sudo' group.""" try: @@ -92,6 +101,7 @@ def is_user_in_sudo_group(): print(f"Error checking sudo group: {str(e)}") return False + def prompt_sudo(): if os.geteuid() != 0 and not is_user_in_sudo_group(): # No sudo required for root user @@ -111,17 +121,17 @@ def prompt_sudo(): # Run the command with sudo, passing the password try: - if password == None: + if password is None: r = subprocess.check_output( - ['sudo', '-S', 'echo'] , + ['sudo', '-S', 'echo'], text=True, stderr=subprocess.STDOUT, timeout=15 # Capture the command output ) else: r = subprocess.check_output( - ['sudo', '-S', 'echo'] , - input=password+ "\n", # Pass the password to stdin + ['sudo', '-S', 'echo'], + input=password + "\n", # Pass the password to stdin text=True, stderr=subprocess.STDOUT, timeout=15 # Capture the command output @@ -143,8 +153,9 @@ def prompt_sudo(): return 0 + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/download-and-extract/customize.py b/script/download-and-extract/customize.py index 87d65b35af..b3c570dbc6 100644 --- a/script/download-and-extract/customize.py +++ b/script/download-and-extract/customize.py @@ -2,6 +2,7 @@ import os import hashlib + def preprocess(i): os_info = i['os_info'] @@ -18,32 +19,37 @@ def preprocess(i): extra_cache_tags = i['input'].get('extra_cache_tags', '') r = automation.update_deps({ 'deps': meta['prehook_deps'] + meta['posthook_deps'], - 'update_deps':{ + 'update_deps': { 'download-script': { 'extra_cache_tags': extra_cache_tags, 'force_cache': True - }, - 'extract-script':{ + }, + 'extract-script': { 'extra_cache_tags': extra_cache_tags, 'force_cache': True - } } - }) - if r['return']>0: return r + } + }) + if r['return'] > 0: + return r if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] if not os.path.exists(filepath): - return {'return':1, 'error':'Local file {} doesn\'t exist'.format(filepath)} + return {'return': 1, + 'error': 'Local file {} doesn\'t exist'.format(filepath)} - env['CM_EXTRACT_REMOVE_EXTRACTED']='no' + env['CM_EXTRACT_REMOVE_EXTRACTED'] = 'no' - if str(env.get('CM_DAE_EXTRACT_DOWNLOADED')).lower() in [ "yes", "1", "true" ]: - if (env.get('CM_EXTRACT_FINAL_ENV_NAME', '') == '') and (env.get('CM_DAE_FINAL_ENV_NAME', '') != ''): + if str(env.get('CM_DAE_EXTRACT_DOWNLOADED') + ).lower() in ["yes", "1", "true"]: + if (env.get('CM_EXTRACT_FINAL_ENV_NAME', '') == '') and ( + env.get('CM_DAE_FINAL_ENV_NAME', '') != ''): env['CM_EXTRACT_FINAL_ENV_NAME'] = env['CM_DAE_FINAL_ENV_NAME'] - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -53,13 +59,15 @@ def postprocess(i): filepath = env.get('CM_DOWNLOAD_DOWNLOADED_PATH', '') if filepath == '': - return {'return':1, 'error': 'No extracted path set in "CM_EXTRACT_EXTRACTED_PATH"'} + return {'return': 1, + 'error': 'No extracted path set in "CM_EXTRACT_EXTRACTED_PATH"'} if not os.path.exists(filepath): - return {'return':1, 'error': 'Extracted path doesn\'t exist: {}'.format(filepath)} + return {'return': 1, + 'error': 'Extracted path doesn\'t exist: {}'.format(filepath)} if env.get('CM_DAE_FINAL_ENV_NAME'): env[env['CM_DAE_FINAL_ENV_NAME']] = filepath - env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath - return {'return':0} + return {'return': 0} diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 2a21e88e61..6142c48a94 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -2,6 +2,7 @@ import os import subprocess + def escape_special_chars(text, tool=None): special_chars = [ '&', '|', '(', ')' @@ -10,12 +11,13 @@ def escape_special_chars(text, tool=None): for char in special_chars: text = text.replace(char, f'^{char}') - #handle URL special cases + # handle URL special cases if tool != "rclone": text = text.replace('%', "%%") return text + def preprocess(i): os_info = i['os_info'] @@ -23,7 +25,8 @@ def preprocess(i): # env to be passed to the subprocess subprocess_env = os.environ.copy() - subprocess_env['PATH'] += os.pathsep + os.pathsep.join(env.get('+PATH', '')) + subprocess_env['PATH'] += os.pathsep + \ + os.pathsep.join(env.get('+PATH', '')) meta = i['meta'] @@ -39,8 +42,8 @@ def preprocess(i): q = '"' if os_info['platform'] == 'windows' else "'" - x='*' if os_info['platform'] == 'windows' else '' - x_c='-s' if os_info['platform'] == 'darwin_off' else '' + x = '*' if os_info['platform'] == 'windows' else '' + x_c = '-s' if os_info['platform'] == 'darwin_off' else '' # command for deleting file in windows and linux is different if os_info['platform'] == 'windows': @@ -52,34 +55,37 @@ def preprocess(i): filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] if not os.path.exists(filepath): - return {'return':1, 'error':'Local file {} doesn\'t exist'.format(filepath)} + return {'return': 1, + 'error': 'Local file {} doesn\'t exist'.format(filepath)} env['CM_DOWNLOAD_CMD'] = "" env['CM_DOWNLOAD_FILENAME'] = filepath if not quiet: - print ('') - print ('Using local file: {}'.format(filepath)) + print('') + print('Using local file: {}'.format(filepath)) else: - url = env.get('CM_DOWNLOAD_URL','') + url = env.get('CM_DOWNLOAD_URL', '') - if url=='': - return {'return':1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} + if url == '': + return { + 'return': 1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} - print ('') - print ('Downloading from {}'.format(url)) + print('') + print('Downloading from {}'.format(url)) if '&' in url and tool != "cmutil": if os_info['platform'] == 'windows': - url = '"'+url+'"' + url = '"' + url + '"' else: - url = url.replace('&','\\&') + url = url.replace('&', '\\&') extra_download_options = env.get('CM_DOWNLOAD_EXTRA_OPTIONS', '') verify_ssl = env.get('CM_VERIFY_SSL', "True") - if str(verify_ssl).lower() in [ "no", "false" ] or os_info['platform'] == 'windows': + if str(verify_ssl).lower() in [ + "no", "false"] or os_info['platform'] == 'windows': verify_ssl = False else: verify_ssl = True @@ -87,7 +93,7 @@ def preprocess(i): if env.get('CM_DOWNLOAD_PATH', '') != '': download_path = env['CM_DOWNLOAD_PATH'] if not os.path.exists(download_path): - os.makedirs(download_path, exist_ok = True) + os.makedirs(download_path, exist_ok=True) os.chdir(download_path) if env.get('CM_DOWNLOAD_FILENAME', '') == '': @@ -96,8 +102,8 @@ def preprocess(i): if "." in urltail and "/" in urlhead: # Check if ? after filename j = urltail.find('?') - if j>0: - urltail=urltail[:j] + if j > 0: + urltail = urltail[:j] env['CM_DOWNLOAD_FILENAME'] = urltail elif env.get('CM_DOWNLOAD_TOOL', '') == "rclone": env['CM_DOWNLOAD_FILENAME'] = urltail @@ -111,61 +117,84 @@ def preprocess(i): checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{escape_special_chars(env['CM_DOWNLOAD_CHECKSUM_FILE'])}" else: checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" - checksum_result = subprocess.run(checksum_cmd, cwd=f'{q}{filepath}{q}', capture_output=True, text=True, shell=True, env=subprocess_env) + checksum_result = subprocess.run( + checksum_cmd, + cwd=f'{q}{filepath}{q}', + capture_output=True, + text=True, + shell=True, + env=subprocess_env) elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': if os_info['platform'] == 'windows': checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{escape_special_chars(env['CM_DOWNLOAD_FILENAME'])} | md5sum -c{x_c} -" else: checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{q}{env['CM_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -" - checksum_result = subprocess.run(checksum_cmd, capture_output=True, text=True, shell=True, env=subprocess_env) - if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + checksum_result = subprocess.run( + checksum_cmd, + capture_output=True, + text=True, + shell=True, + env=subprocess_env) + if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get( + 'CM_DOWNLOAD_CHECKSUM', '') != '': # print(checksum_result) #for debugging if "checksum did not match" in checksum_result.stderr.lower(): - computed_checksum = subprocess.run(f"md5sum {env['CM_DOWNLOAD_FILENAME']}", capture_output=True, text=True, shell=True).stdout.split(" ")[0] - print(f"WARNING: File already present, mismatch between original checksum({env.get('CM_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.") + computed_checksum = subprocess.run( + f"md5sum {env['CM_DOWNLOAD_FILENAME']}", + capture_output=True, + text=True, + shell=True).stdout.split(" ")[0] + print( + f"WARNING: File already present, mismatch between original checksum({env.get('CM_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.") try: os.remove(env['CM_DOWNLOAD_FILENAME']) - print(f"File {env['CM_DOWNLOAD_FILENAME']} deleted successfully.") + print( + f"File {env['CM_DOWNLOAD_FILENAME']} deleted successfully.") except PermissionError: - return {"return":1, "error":f"Permission denied to delete file {env['CM_DOWNLOAD_FILENAME']}."} + return { + "return": 1, "error": f"Permission denied to delete file {env['CM_DOWNLOAD_FILENAME']}."} cmutil_require_download = 1 elif "no such file" in checksum_result.stderr.lower(): - #print(f"No file {env['CM_DOWNLOAD_FILENAME']}. Downloading through cmutil.") + # print(f"No file {env['CM_DOWNLOAD_FILENAME']}. Downloading through cmutil.") cmutil_require_download = 1 elif checksum_result.returncode > 0: - return {"return":1, "error":f"Error while checking checksum: {checksum_result.stderr}"} + return { + "return": 1, "error": f"Error while checking checksum: {checksum_result.stderr}"} else: - print(f"File {env['CM_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..") + print( + f"File {env['CM_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..") else: cmutil_require_download = 1 if cmutil_require_download == 1: cm = automation.cmind - for i in range(1,5): - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url, - 'verify': verify_ssl}) - if r['return'] == 0: break + for i in range(1, 5): + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url, + 'verify': verify_ssl}) + if r['return'] == 0: + break oldurl = url - url = env.get('CM_DOWNLOAD_URL'+str(i),'') + url = env.get('CM_DOWNLOAD_URL' + str(i), '') if url == '': break print(f"Download from {oldurl} failed, trying from {url}") - if r['return']>0: return r + if r['return'] > 0: + return r env['CM_DOWNLOAD_CMD'] = "" env['CM_DOWNLOAD_FILENAME'] = r['filename'] elif tool == "wget": if env.get('CM_DOWNLOAD_FILENAME', '') != '': - extra_download_options +=f" --tries=3 -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + extra_download_options += f" --tries=3 -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} " if not verify_ssl: extra_download_options += "--no-check-certificate " env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" - for i in range(1,5): - url = env.get('CM_DOWNLOAD_URL'+str(i),'') + for i in range(1, 5): + url = env.get('CM_DOWNLOAD_URL' + str(i), '') if url == '': break env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})" @@ -173,36 +202,39 @@ def preprocess(i): elif tool == "curl": if env.get('CM_DOWNLOAD_FILENAME', '') != '': - extra_download_options +=f" --output {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + extra_download_options += f" --output {q}{env['CM_DOWNLOAD_FILENAME']}{q} " env['CM_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}" - for i in range(1,5): - url = env.get('CM_DOWNLOAD_URL'+str(i),'') + for i in range(1, 5): + url = env.get('CM_DOWNLOAD_URL' + str(i), '') if url == '': break env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})" - elif tool == "gdown": if not verify_ssl: extra_download_options += "--no-check-certificate " env['CM_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}" - for i in range(1,5): - url = env.get('CM_DOWNLOAD_URL'+str(i),'') + for i in range(1, 5): + url = env.get('CM_DOWNLOAD_URL' + str(i), '') if url == '': break env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})" elif tool == "rclone": - if env.get('CM_RCLONE_CONFIG_CMD', '') != '': #keeping this for backward compatibility. Ideally should be done via get,rclone-config script + # keeping this for backward compatibility. Ideally should be done + # via get,rclone-config script + if env.get('CM_RCLONE_CONFIG_CMD', '') != '': env['CM_DOWNLOAD_CONFIG_CMD'] = env['CM_RCLONE_CONFIG_CMD'] rclone_copy_using = env.get('CM_RCLONE_COPY_USING', 'sync') if rclone_copy_using == "sync": pre_clean = False if env["CM_HOST_OS_TYPE"] == "windows": - # have to modify the variable from url to temp_url if it is going to be used anywhere after this point + # have to modify the variable from url to temp_url if it is + # going to be used anywhere after this point url = url.replace("%", "%%") - temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace("%", "%%") + temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace( + "%", "%%") env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer" else: env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer" @@ -220,16 +252,36 @@ def preprocess(i): env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': if os_info['platform'] == 'windows': - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, escape_special_chars(env['CM_DOWNLOAD_FILENAME']), x_c) + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format( + env.get('CM_DOWNLOAD_CHECKSUM'), x, escape_special_chars( + env['CM_DOWNLOAD_FILENAME']), x_c) else: - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) - for i in range(1,5): - if env.get('CM_DOWNLOAD_CHECKSUM'+str(i),'') == '': + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format( + env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) + for i in range(1, 5): + if env.get('CM_DOWNLOAD_CHECKSUM' + str(i), '') == '': break if os_info['platform'] == 'windows': - env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format(env.get('CM_DOWNLOAD_CHECKSUM'+str(i)), x, escape_special_chars(env['CM_DOWNLOAD_FILENAME']), x_c) + env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format( + env.get( + 'CM_DOWNLOAD_CHECKSUM' + + str(i)), + x, + escape_special_chars( + env['CM_DOWNLOAD_FILENAME']), + x_c) else: - env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format(env.get('CM_DOWNLOAD_CHECKSUM'+str(i)), x, q, env['CM_DOWNLOAD_FILENAME'].replace("%", "%%"), q, x_c) + env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format( + env.get( + 'CM_DOWNLOAD_CHECKSUM' + + str(i)), + x, + q, + env['CM_DOWNLOAD_FILENAME'].replace( + "%", + "%%"), + q, + x_c) # print(env['CM_DOWNLOAD_CHECKSUM_CMD']) else: env['CM_DOWNLOAD_CHECKSUM_CMD'] = "" @@ -237,17 +289,21 @@ def preprocess(i): if not pre_clean: env['CM_PRE_DOWNLOAD_CMD'] = '' - if os_info['platform'] == 'windows' and env.get('CM_DOWNLOAD_CMD', '') != '': - env['CM_DOWNLOAD_CMD'] = escape_special_chars(env['CM_DOWNLOAD_CMD'], tool) + if os_info['platform'] == 'windows' and env.get( + 'CM_DOWNLOAD_CMD', '') != '': + env['CM_DOWNLOAD_CMD'] = escape_special_chars( + env['CM_DOWNLOAD_CMD'], tool) if pre_clean: env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "del /Q %CM_DOWNLOAD_FILENAME%" # Check that if empty CMD, should add "" for x in ['CM_DOWNLOAD_CMD', 'CM_DOWNLOAD_CHECKSUM_CMD']: - env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' + env[x + '_USED'] = 'YES' if env.get(x, '') != '' else 'NO' else: - env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format(env['CM_DOWNLOAD_FILENAME']) + env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format( + env['CM_DOWNLOAD_FILENAME']) + + return {'return': 0} - return {'return':0} def postprocess(i): @@ -258,7 +314,8 @@ def postprocess(i): filepath = env['CM_DOWNLOAD_DOWNLOADED_PATH'] if not os.path.exists(filepath): - return {'return':1, 'error': 'Downloaded path {} does not exist. Probably CM_DOWNLOAD_FILENAME is not set and CM_DOWNLOAD_URL given is not pointing to a file'.format(filepath)} + return { + 'return': 1, 'error': 'Downloaded path {} does not exist. Probably CM_DOWNLOAD_FILENAME is not set and CM_DOWNLOAD_URL given is not pointing to a file'.format(filepath)} if env.get('CM_DOWNLOAD_RENAME_FILE', '') != '': file_dir = os.path.dirname(filepath) @@ -267,13 +324,12 @@ def postprocess(i): os.rename(filepath, new_file_path) filepath = new_file_path - - if env.get('CM_DOWNLOAD_FINAL_ENV_NAME','') != '': + if env.get('CM_DOWNLOAD_FINAL_ENV_NAME', '') != '': env[env['CM_DOWNLOAD_FINAL_ENV_NAME']] = filepath - env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath # Since may change directory, check if need to clean some temporal files - automation.clean_some_tmp_files({'env':env}) + automation.clean_some_tmp_files({'env': env}) - return {'return':0} + return {'return': 0} diff --git a/script/download-torrent/customize.py b/script/download-torrent/customize.py index 52b57f253b..3b4cb4c41b 100644 --- a/script/download-torrent/customize.py +++ b/script/download-torrent/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,14 +15,17 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') if not env.get('CM_TORRENT_DOWNLOADED_FILE_NAME'): - return {'return':1, 'error': 'CM_TORRENT_DOWNLOADED_FILE_NAME is not set' } + return {'return': 1, 'error': 'CM_TORRENT_DOWNLOADED_FILE_NAME is not set'} + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - torrent_downloaded_path = os.path.join(env['CM_TORRENT_DOWNLOADED_DIR'], env['CM_TORRENT_DOWNLOADED_NAME']) + torrent_downloaded_path = os.path.join( + env['CM_TORRENT_DOWNLOADED_DIR'], + env['CM_TORRENT_DOWNLOADED_NAME']) env['CM_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path if 'CM_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env: @@ -30,4 +34,4 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path - return {'return':0} + return {'return': 0} diff --git a/script/draw-graph-from-json-data/customize.py b/script/draw-graph-from-json-data/customize.py index a4a2aefaaa..db1f64b1f8 100644 --- a/script/draw-graph-from-json-data/customize.py +++ b/script/draw-graph-from-json-data/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -21,10 +22,11 @@ def preprocess(i): if env.get('CM_OUTPUT_MERMAID_PATH', '') != '': env['CM_RUN_CMD'] += f""" --output_mermaid {env['CM_OUTPUT_MERMAID_PATH']}""" - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/draw-graph-from-json-data/process-cm-deps.py b/script/draw-graph-from-json-data/process-cm-deps.py index c3f3747a6f..fe331a4a0a 100644 --- a/script/draw-graph-from-json-data/process-cm-deps.py +++ b/script/draw-graph-from-json-data/process-cm-deps.py @@ -4,6 +4,8 @@ import json # Function to parse the nested JSON structure + + def parse_json_to_edges(json_data): edges = [] for root_key, nodes in json_data.items(): @@ -12,6 +14,7 @@ def parse_json_to_edges(json_data): edges.append((node_details["parent"], node_key)) return edges + def generate_mermaid_output(json_data, mermaid_file="graph.mmd"): edges = parse_json_to_edges(json_data) @@ -20,7 +23,8 @@ def generate_mermaid_output(json_data, mermaid_file="graph.mmd"): # Add each edge in Mermaid syntax for parent, child in edges: - mermaid_lines.append(f""" {parent.replace(" ", "_")} --> {child.replace(" ", "_")}""") + mermaid_lines.append( + f""" {parent.replace(" ", "_")} --> {child.replace(" ", "_")}""") # Write to a Mermaid file with open(mermaid_file, "w") as f: @@ -29,7 +33,6 @@ def generate_mermaid_output(json_data, mermaid_file="graph.mmd"): print(f"Mermaid syntax saved to {mermaid_file}") - # Function to generate and visualize the graph def generate_graph_from_nested_json(json_data, output_image="graph.png"): # Parse the JSON to extract edges @@ -43,7 +46,7 @@ def generate_graph_from_nested_json(json_data, output_image="graph.png"): # Draw the graph using a spring layout for better visualization plt.figure(figsize=(30, 25)) - #pos = nx.spectral_layout(G, seed=42) # Seed for consistent layout + # pos = nx.spectral_layout(G, seed=42) # Seed for consistent layout pos = nx.shell_layout(G) # Seed for consistent layout nx.draw( G, @@ -62,22 +65,42 @@ def generate_graph_from_nested_json(json_data, output_image="graph.png"): # Save the visualization plt.savefig(output_image, format="png", dpi=300) print(f"Graph visualization saved as {output_image}") - #plt.show() + # plt.show() return G # Function to export the graph data + + def export_graph_data(graph, filename="graph.graphml"): nx.write_graphml(graph, filename) print(f"Graph data saved as {filename}") # Main function to handle argument parsing and processing + + def main(): - parser = argparse.ArgumentParser(description="Generate a graph from nested JSON input.") - parser.add_argument("json_file", type=str, help="Path to the JSON input file.") - parser.add_argument("--output_image", type=str, default="graph.png", help="Output image file for the graph visualization.") - parser.add_argument("--output_mermaid", type=str, default="graph.mmd", help="Output mermaid file for the graph data.") - parser.add_argument("--output_graphml", type=str, default="graph.graphml", help="Output GraphML file for the graph data.") + parser = argparse.ArgumentParser( + description="Generate a graph from nested JSON input.") + parser.add_argument( + "json_file", + type=str, + help="Path to the JSON input file.") + parser.add_argument( + "--output_image", + type=str, + default="graph.png", + help="Output image file for the graph visualization.") + parser.add_argument( + "--output_mermaid", + type=str, + default="graph.mmd", + help="Output mermaid file for the graph data.") + parser.add_argument( + "--output_graphml", + type=str, + default="graph.graphml", + help="Output GraphML file for the graph data.") args = parser.parse_args() @@ -86,12 +109,14 @@ def main(): json_data = json.load(f) # Generate the graph - G = generate_graph_from_nested_json(json_data, output_image=args.output_image) + G = generate_graph_from_nested_json( + json_data, output_image=args.output_image) generate_mermaid_output(json_data, mermaid_file="graph.mmd") # Export the graph data export_graph_data(G, filename=args.output_graphml) + if __name__ == "__main__": main() diff --git a/script/dump-pip-freeze/customize.py b/script/dump-pip-freeze/customize.py index 00b5bb9fdb..e9e6ce6b78 100644 --- a/script/dump-pip-freeze/customize.py +++ b/script/dump-pip-freeze/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -12,11 +13,13 @@ def preprocess(i): automation = i['automation'] if env.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '': - env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join(os.getcwd(), "tmp-pip-freeze") + env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + os.getcwd(), "tmp-pip-freeze") quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -33,10 +36,10 @@ def postprocess(i): # If was not created, sometimes issues on Windows # There is another workaround if os_info['platform'] == 'windows': - r = automation.cmind.access({'action':'system', - 'automation':'utils', - 'cmd':'py -m pip freeze', - 'stdout':pip_freeze_file}) + r = automation.cmind.access({'action': 'system', + 'automation': 'utils', + 'cmd': 'py -m pip freeze', + 'stdout': pip_freeze_file}) # skip output if os.path.isfile(pip_freeze_file): @@ -46,7 +49,6 @@ def postprocess(i): split = line.split("==") pip_freeze[split[0]] = split[1].strip() - state['pip_freeze'] = pip_freeze - return {'return':0} + return {'return': 0} diff --git a/script/dump-pip-freeze/dump.py b/script/dump-pip-freeze/dump.py index 1d7f7ab853..c6d4dc2eaa 100644 --- a/script/dump-pip-freeze/dump.py +++ b/script/dump-pip-freeze/dump.py @@ -1,7 +1,9 @@ import os from pip._internal.operations import freeze -pip_freeze_out = os.environ.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', 'tmp-pip-freeze') +pip_freeze_out = os.environ.get( + 'CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', + 'tmp-pip-freeze') if os.path.isfile(pip_freeze_out): os.remove(pip_freeze_out) @@ -12,10 +14,10 @@ try: for pkg in pkgs: - x+=pkg+'\n' -except: + x += pkg + '\n' +except BaseException: pass -if len(x)>0: +if len(x) > 0: with open(pip_freeze_out, "w") as f: f.write(x) diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index 561e6636b0..dd03941892 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -2,9 +2,10 @@ import os import hashlib + def preprocess(i): - variation_tags = i.get('variation_tags',[]) + variation_tags = i.get('variation_tags', []) os_info = i['os_info'] @@ -22,25 +23,29 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - filename = env.get('CM_EXTRACT_FILEPATH','') + filename = env.get('CM_EXTRACT_FILEPATH', '') if filename == '': - return {'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'} + return { + 'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'} if windows: filename = filename.replace("%", "%%") env['CM_EXTRACT_FILENAME'] = filename - # Check if extract to some path outside CM cache (to reuse large files later if cache is cleaned) + # Check if extract to some path outside CM cache (to reuse large files + # later if cache is cleaned) extract_path = env.get('CM_EXTRACT_PATH', '') if extract_path != '': if not os.path.exists(extract_path): - os.makedirs(extract_path, exist_ok = True) + os.makedirs(extract_path, exist_ok=True) os.chdir(extract_path) # By default remove archive after extraction - remove_extracted = False if env.get('CM_EXTRACT_REMOVE_EXTRACTED','').lower() == 'no' else True + remove_extracted = False if env.get( + 'CM_EXTRACT_REMOVE_EXTRACTED', + '').lower() == 'no' else True if filename.endswith(".zip") or filename.endswith(".pth"): env['CM_EXTRACT_TOOL'] = "unzip" @@ -48,7 +53,7 @@ def preprocess(i): if windows: x = '"' if ' ' in filename else '' env['CM_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x - filename = filename[:-3] # leave only .tar + filename = filename[:-3] # leave only .tar env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' env['CM_EXTRACT_TOOL'] = 'tar ' elif os_info['platform'] == 'darwin': @@ -61,7 +66,7 @@ def preprocess(i): if windows: x = '"' if ' ' in filename else '' env['CM_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x - filename = filename[:-3] # leave only .tar + filename = filename[:-3] # leave only .tar env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' env['CM_EXTRACT_TOOL'] = 'tar ' else: @@ -72,25 +77,29 @@ def preprocess(i): env['CM_EXTRACT_TOOL'] = 'tar ' elif filename.endswith(".gz"): # Check target filename - extracted_filename = env.get('CM_EXTRACT_EXTRACTED_FILENAME','') + extracted_filename = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') if extracted_filename == '': extracted_filename = os.path.basename(filename)[:-3] env['CM_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename x = '-c' if windows else '-k' - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d '+ (x + ' ' if not remove_extracted else '') + ' > ' + q + extracted_filename + q + ' < ' + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ + (x + ' ' if not remove_extracted else '') + \ + ' > ' + q + extracted_filename + q + ' < ' env['CM_EXTRACT_TOOL'] = 'gzip ' - elif env.get('CM_EXTRACT_UNZIP','') == 'yes': + elif env.get('CM_EXTRACT_UNZIP', '') == 'yes': env['CM_EXTRACT_TOOL'] = 'unzip ' - elif env.get('CM_EXTRACT_UNTAR','') == 'yes': + elif env.get('CM_EXTRACT_UNTAR', '') == 'yes': env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' env['CM_EXTRACT_TOOL'] = 'tar ' - elif env.get('CM_EXTRACT_GZIP','') == 'yes': + elif env.get('CM_EXTRACT_GZIP', '') == 'yes': env['CM_EXTRACT_CMD'] = 'gzip ' - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d '+ ('-k ' if not remove_extracted else '') + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ + ('-k ' if not remove_extracted else '') else: - return {'return': 1, 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'} + return {'return': 1, + 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'} env['CM_EXTRACT_PRE_CMD'] = '' @@ -104,34 +113,36 @@ def preprocess(i): x = '' if windows else '-p' y = '"' if ' ' in extract_to_folder else '' - #env['CM_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['CM_EXTRACT_TO_FOLDER'] + env.get('CM_EXTRACT_TOOL_OPTIONS', '') - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + y + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') - env['CM_EXTRACT_PRE_CMD'] = 'mkdir '+ x +' '+ y + extract_to_folder + y + ' ' + xsep + ' ' + # env['CM_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['CM_EXTRACT_TO_FOLDER'] + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + \ + y + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + env['CM_EXTRACT_PRE_CMD'] = 'mkdir ' + x + ' ' + \ + y + extract_to_folder + y + ' ' + xsep + ' ' env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder elif 'unzip' in env['CM_EXTRACT_TOOL']: - env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d '+ q + extract_to_folder + q + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + q + extract_to_folder + q env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder - x = '"' if ' ' in filename else '' env['CM_EXTRACT_CMD'] = env['CM_EXTRACT_PRE_CMD'] + env['CM_EXTRACT_TOOL'] + ' ' + \ - env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ - ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '')+ ' '+ x + filename + x + env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x - print ('') - print ('Current directory: {}'.format(os.getcwd())) - print ('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) - print ('') + print('') + print('Current directory: {}'.format(os.getcwd())) + print('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) + print('') final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') - if final_file!='': + if final_file != '': if env.get('CM_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '': env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = f"cd {q}{final_file}{q} {xsep} md5sum -c {q}{env['CM_EXTRACT_EXTRACTED_CHECKSUM_FILE']}{q}" elif env.get('CM_EXTRACT_EXTRACTED_CHECKSUM', '') != '': - x='*' if os_info['platform'] == 'windows' else '' - env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format(env.get('CM_EXTRACT_EXTRACTED_CHECKSUM'), x, env['CM_EXTRACT_EXTRACTED_FILENAME']) + x = '*' if os_info['platform'] == 'windows' else '' + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format( + env.get('CM_EXTRACT_EXTRACTED_CHECKSUM'), x, env['CM_EXTRACT_EXTRACTED_FILENAME']) else: env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" else: @@ -143,15 +154,14 @@ def preprocess(i): # for x in ['CM_EXTRACT_CMD', 'CM_EXTRACT_EXTRACTED_CHECKSUM_CMD']: # env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' - # If force cache, add filepath to tag unless _path is used ... - path_tag = 'path.'+filename + path_tag = 'path.' + filename add_extra_cache_tags = [] if path_tag not in variation_tags: add_extra_cache_tags.append(path_tag) - return {'return':0, 'add_extra_cache_tags':add_extra_cache_tags} + return {'return': 0, 'add_extra_cache_tags': add_extra_cache_tags} def postprocess(i): @@ -173,33 +183,35 @@ def postprocess(i): # We do not use this env variable anymore # folderpath = env.get('CM_EXTRACT_EXTRACT_TO_PATH', '') - folderpath = extract_path if extract_path!='' else os.getcwd() + folderpath = extract_path if extract_path != '' else os.getcwd() filepath = os.path.join(folderpath, filename) else: - filepath = os.getcwd() # Extracted to the root cache folder + filepath = os.getcwd() # Extracted to the root cache folder if not os.path.exists(filepath): - return {'return':1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)} -# return {'return':1, 'error': 'CM_EXTRACT_EXTRACTED_FILENAME and CM_EXTRACT_TO_FOLDER are not set'} + return { + 'return': 1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)} +# return {'return':1, 'error': 'CM_EXTRACT_EXTRACTED_FILENAME and +# CM_EXTRACT_TO_FOLDER are not set'} env['CM_EXTRACT_EXTRACTED_PATH'] = filepath # Set external environment variable with the final path - if env.get('CM_EXTRACT_FINAL_ENV_NAME', '')!='': + if env.get('CM_EXTRACT_FINAL_ENV_NAME', '') != '': env[env['CM_EXTRACT_FINAL_ENV_NAME']] = filepath # Detect if this file will be deleted or moved - env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath # Check if need to remove archive after extraction - if env.get('CM_EXTRACT_REMOVE_EXTRACTED','').lower() != 'no': - archive_filepath=env.get('CM_EXTRACT_FILEPATH','') - if archive_filepath!='' and os.path.isfile(archive_filepath): + if env.get('CM_EXTRACT_REMOVE_EXTRACTED', '').lower() != 'no': + archive_filepath = env.get('CM_EXTRACT_FILEPATH', '') + if archive_filepath != '' and os.path.isfile(archive_filepath): os.remove(archive_filepath) # Since may change directory, check if need to clean some temporal files - automation.clean_some_tmp_files({'env':env}) + automation.clean_some_tmp_files({'env': env}) - return {'return':0} + return {'return': 0} diff --git a/script/fail/customize.py b/script/fail/customize.py index 855c39b5bb..4168a3b7a7 100644 --- a/script/fail/customize.py +++ b/script/fail/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,15 +15,16 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') # Checking conditions - if env.get('CM_FAIL_WINDOWS','').lower()=='true': + if env.get('CM_FAIL_WINDOWS', '').lower() == 'true': if os_info['platform'] == 'windows': - return {'return':1, 'error': 'CM detected fail condition: running on Windows'} + return {'return': 1, + 'error': 'CM detected fail condition: running on Windows'} + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/flash-tinyml-binary/customize.py b/script/flash-tinyml-binary/customize.py index a2062be59c..bc0c3bd18c 100644 --- a/script/flash-tinyml-binary/customize.py +++ b/script/flash-tinyml-binary/customize.py @@ -1,19 +1,22 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} if 'CM_TINY_BUILD_DIR' not in env: - return {'return':1, 'error': 'Please set CM_TINY_BUILD_DIR to the build directory of the model'} - return {'return':0} + return { + 'return': 1, 'error': 'Please set CM_TINY_BUILD_DIR to the build directory of the model'} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 7c7d078c01..4829b08b3f 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -6,15 +6,15 @@ import cmind as cm import sys -def preprocess(i): +def preprocess(i): os_info = i['os_info'] env = i['env'] state = i['state'] script_path = i['run_script_input']['path'] - rerun = True if env.get("CM_RERUN","")!='' else False + rerun = True if env.get("CM_RERUN", "") != '' else False env['CM_MLPERF_SKIP_RUN'] = env.get('CM_MLPERF_SKIP_RUN', "no") @@ -34,23 +34,23 @@ def preprocess(i): print("\nNo mode given. Using accuracy as default\n") env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" - if env.get('OUTPUT_BASE_DIR', '') == '': - env['OUTPUT_BASE_DIR'] = env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + env['OUTPUT_BASE_DIR'] = env.get( + 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) if 'CM_NUM_THREADS' not in env: if 'CM_MINIMIZE_THREADS' in env: - env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // \ - (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) else: env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") if 'CM_MLPERF_CONF' not in env: - env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") - + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") RUN_CMD = "" state['RUN'] = {} @@ -68,11 +68,13 @@ def preprocess(i): if model_full_name not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']]: i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name] = {} - if scenario not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name]: - i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name][scenario] = {} - + if scenario not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + ][model_full_name]: + i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + ][model_full_name][scenario] = {} - conf = i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name][scenario] + conf = i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + ][model_full_name][scenario] mode = env['CM_MLPERF_LOADGEN_MODE'] @@ -97,21 +99,23 @@ def preprocess(i): query_count = None value = None - if scenario in [ 'Offline', 'Server' ]: + if scenario in ['Offline', 'Server']: metric = "target_qps" tolerance = 1.01 - #value = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + # value = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') value = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') - elif scenario in [ 'SingleStream', 'MultiStream' ]: + elif scenario in ['SingleStream', 'MultiStream']: metric = "target_latency" value = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') if value: - if scenario == "SingleStream" and (1000/float(value) * 660 < 100): + if scenario == "SingleStream" and ( + 1000 / float(value) * 660 < 100): env['CM_MLPERF_USE_MAX_DURATION'] = 'no' - elif scenario == "MultiStream" and (1000/float(value) * 660 < 662): + elif scenario == "MultiStream" and (1000 / float(value) * 660 < 662): env['CM_MLPERF_USE_MAX_DURATION'] = 'no' - if env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in [ "yes", "1", "true" ] and env.get('CM_MLPERF_USE_MAX_DURATION', "yes").lower() not in [ "no", "false", "0"]: - tolerance = 0.4 #much lower because we have max_duration + if env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get( + 'CM_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]: + tolerance = 0.4 # much lower because we have max_duration else: tolerance = 0.9 else: @@ -122,11 +126,18 @@ def preprocess(i): conf[metric] = value else: if metric in conf: - print("Original configuration value {} {}".format(conf[metric], metric)) - metric_value = str(float(conf[metric]) * tolerance) #some tolerance - print("Adjusted configuration value {} {}".format(metric_value, metric)) + print( + "Original configuration value {} {}".format( + conf[metric], metric)) + metric_value = str( + float( + conf[metric]) * + tolerance) # some tolerance + print( + "Adjusted configuration value {} {}".format( + metric_value, metric)) else: - #if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": if metric == "target_qps": if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": print("In find performance mode: using 1 as target_qps") @@ -138,8 +149,11 @@ def preprocess(i): print("In find performance mode: using 0.5ms as target_latency") else: print("No target_latency specified. Using default") - if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in [ "no", "false", "0" ] or env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in [ "yes", "1", "true" ]: - # Total number of queries needed is a multiple of dataset size. So we dont use max_duration and so we need to be careful with the input latency + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in ["no", "false", "0"] or env.get( + 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in ["yes", "1", "true"]: + # Total number of queries needed is a multiple of dataset + # size. So we dont use max_duration and so we need to be + # careful with the input latency if '3d-unet' in env['CM_MODEL']: conf[metric] = 400 elif 'gptj' in env['CM_MODEL']: @@ -149,62 +163,82 @@ def preprocess(i): else: conf[metric] = 0.5 metric_value = conf[metric] - #else: - # return {'return': 1, 'error': f"Config details missing for SUT:{env['CM_SUT_NAME']}, Model:{env['CM_MODEL']}, Scenario: {scenario}. Please input {metric} value"} + # else: + # return {'return': 1, 'error': f"Config details missing for + # SUT:{env['CM_SUT_NAME']}, Model:{env['CM_MODEL']}, Scenario: + # {scenario}. Please input {metric} value"} - #Pass the modified performance metrics to the implementation + # Pass the modified performance metrics to the implementation if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": - if metric == "target_latency" and env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY', '') == '': + if metric == "target_latency" and env.get( + 'CM_MLPERF_LOADGEN_TARGET_LATENCY', '') == '': env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric] elif metric == "target_qps" and env.get('CM_MLPERF_LOADGEN_TARGET_QPS', '') == '': env['CM_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric] - if env['CM_MLPERF_RUN_STYLE'] == "fast": if scenario == "Offline": metric_value = float(metric_value) / fast_factor - if scenario in [ "SingleStream", "MultiStream" ]: + if scenario in ["SingleStream", "MultiStream"]: metric_value = float(metric_value) * fast_factor elif env['CM_MLPERF_RUN_STYLE'] == "test": if scenario == "Offline": metric_value = float(env.get('CM_MLPERF_INFERENCE_TEST_QPS', 1)) - if scenario in [ "SingleStream" ]: + if scenario in ["SingleStream"]: metric_value = 1000 elif env['CM_MLPERF_RUN_STYLE'] == "valid": if scenario == "Offline": required_min_queries_offline = {} - required_min_queries_offline = get_required_min_queries_offline(env['CM_MODEL'], version) + required_min_queries_offline = get_required_min_queries_offline( + env['CM_MODEL'], version) - - if mode == "compliance" and scenario == "Server": #Adjust the server_target_qps + if mode == "compliance" and scenario == "Server": # Adjust the server_target_qps test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") if test == "TEST01": - metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST01_SERVER_ADJUST_FACTOR", 0.96))) - #if test == "TEST05": + metric_value = str( + float(metric_value) * + float( + env.get( + "CM_MLPERF_TEST01_SERVER_ADJUST_FACTOR", + 0.96))) + # if test == "TEST05": # metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97))) if test == "TEST04": - metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST04_SERVER_ADJUST_FACTOR", 0.97))) + metric_value = str( + float(metric_value) * + float( + env.get( + "CM_MLPERF_TEST04_SERVER_ADJUST_FACTOR", + 0.97))) conf[metric] = metric_value - user_conf += ml_model_name + "." + scenario + "." + metric + " = " + str(metric_value) + "\n" + user_conf += ml_model_name + "." + scenario + \ + "." + metric + " = " + str(metric_value) + "\n" if env.get('CM_MLPERF_PERFORMANCE_SAMPLE_COUNT', '') != '': performance_sample_count = env['CM_MLPERF_PERFORMANCE_SAMPLE_COUNT'] - user_conf += ml_model_name + ".*.performance_sample_count_override = " + performance_sample_count + "\n" + user_conf += ml_model_name + ".*.performance_sample_count_override = " + \ + performance_sample_count + "\n" log_mode = mode if 'CM_MLPERF_POWER' in env and mode == "performance": log_mode = "performance_power" - env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join(env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) + env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join( + env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) - sut_name = env.get('CM_SUT_NAME', env['CM_MLPERF_BACKEND'] + "-" + env['CM_MLPERF_DEVICE']) - OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, \ - model_full_name, scenario.lower(), mode) + sut_name = env.get( + 'CM_SUT_NAME', + env['CM_MLPERF_BACKEND'] + + "-" + + env['CM_MLPERF_DEVICE']) + OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, + model_full_name, scenario.lower(), mode) - env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name) + env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name) if 'CM_MLPERF_POWER' in env and mode == "performance": env['CM_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power") @@ -215,18 +249,34 @@ def preprocess(i): OUTPUT_DIR = os.path.join(OUTPUT_DIR, "run_1") elif mode == "compliance": test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") - OUTPUT_DIR = os.path.join(env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME'], sut_name, model_full_name, scenario.lower(), test) + OUTPUT_DIR = os.path.join( + env['OUTPUT_BASE_DIR'], + env['CM_OUTPUT_FOLDER_NAME'], + sut_name, + model_full_name, + scenario.lower(), + test) if test == "TEST01": audit_path = os.path.join(test, ml_model_name) else: audit_path = test - audit_full_path = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", audit_path, "audit.config") + audit_full_path = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + audit_path, + "audit.config") env['CM_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path - #copy the audit conf to the run directory incase the implementation is not supporting the audit-conf path + # copy the audit conf to the run directory incase the implementation is + # not supporting the audit-conf path if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) - shutil.copyfile(audit_full_path, os.path.join(OUTPUT_DIR, "audit.config")) + shutil.copyfile( + audit_full_path, + os.path.join( + OUTPUT_DIR, + "audit.config")) env['CM_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR env['CM_LOGS_DIR'] = OUTPUT_DIR @@ -240,7 +290,8 @@ def preprocess(i): run_exists = run_files_exist(log_mode, OUTPUT_DIR, required_files, env) - if 'CM_MLPERF_POWER' in env and env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['CM_MLPERF_RUN_STYLE'] == "valid" and mode == "performance": + if 'CM_MLPERF_POWER' in env and env.get( + 'CM_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['CM_MLPERF_RUN_STYLE'] == "valid" and mode == "performance": short_ranging = True else: short_ranging = False @@ -248,112 +299,150 @@ def preprocess(i): if short_ranging: import copy ranging_user_conf = copy.deepcopy(user_conf) - ranging_user_conf += ml_model_name + "." + scenario + ".min_duration = 300000" + "\n" + ranging_user_conf += ml_model_name + "." + \ + scenario + ".min_duration = 300000" + "\n" if env['CM_MLPERF_RUN_STYLE'] == "test": max_duration_test_s = int(env.get('CM_MLPERF_MAX_DURATION_TEST', 30)) - max_duration_test = str(max_duration_test_s * 1000) # in milliseconds + max_duration_test = str(max_duration_test_s * 1000) # in milliseconds query_count = int(env.get('CM_TEST_QUERY_COUNT', 5)) - min_query_count = int(env.get('CM_MLPERF_INFERENCE_MIN_QUERY_COUNT', query_count)) - max_query_count = max(min_query_count, int(env.get('CM_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count))) - user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(max_query_count) + "\n" - user_conf += ml_model_name + "." + scenario + ".min_query_count = " + str(min_query_count) + "\n" + min_query_count = int( + env.get( + 'CM_MLPERF_INFERENCE_MIN_QUERY_COUNT', + query_count)) + max_query_count = max( + min_query_count, int( + env.get( + 'CM_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count))) + user_conf += ml_model_name + "." + scenario + \ + ".max_query_count = " + str(max_query_count) + "\n" + user_conf += ml_model_name + "." + scenario + \ + ".min_query_count = " + str(min_query_count) + "\n" user_conf += ml_model_name + "." + scenario + ".min_duration = 0" + "\n" - user_conf += ml_model_name + "." + scenario + ".sample_concatenate_permutation = 0" + "\n" + user_conf += ml_model_name + "." + scenario + \ + ".sample_concatenate_permutation = 0" + "\n" env['CM_MLPERF_MAX_QUERY_COUNT'] = max_query_count # max_duration is effective for all scenarios except the Offline - if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ "no", "false", "0"]: + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ + "no", "false", "0"]: if scenario != "Offline": - user_conf += ml_model_name + "." + scenario + f".max_duration = {max_duration_test}" + "\n" + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_test}" + "\n" elif env['CM_MLPERF_RUN_STYLE'] == "fast": - user_conf += ml_model_name + "." + scenario + ".sample_concatenate_permutation = 0" + "\n" + user_conf += ml_model_name + "." + scenario + \ + ".sample_concatenate_permutation = 0" + "\n" max_duration_fast_s = int(env.get('CM_MLPERF_MAX_DURATION_FAST', 120)) - max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds + max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds if scenario == "Server": - user_conf += ml_model_name + "." + scenario + f".max_duration = {max_duration_fast}" + "\n" + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_fast}" + "\n" target_qps = conf['target_qps'] - query_count = str(int((660/fast_factor) * (float(target_qps)))) - user_conf += ml_model_name + "." + scenario + ".max_query_count = " + query_count + "\n" + query_count = str(int((660 / fast_factor) * (float(target_qps)))) + user_conf += ml_model_name + "." + scenario + \ + ".max_query_count = " + query_count + "\n" env['CM_MLPERF_MAX_QUERY_COUNT'] = query_count else: - max_duration_valid_s = int(env.get('CM_MLPERF_MAX_DURATION_VALID', 660)) - max_duration_valid = str(max_duration_valid_s * 1000) # in milliseconds - max_duration_ranging_s = int(env.get('CM_MLPERF_MAX_DURATION_RANGING', 300)) - max_duration_ranging = str(max_duration_ranging_s * 1000) # in milliseconds + max_duration_valid_s = int( + env.get('CM_MLPERF_MAX_DURATION_VALID', 660)) + max_duration_valid = str( + max_duration_valid_s * + 1000) # in milliseconds + max_duration_ranging_s = int( + env.get('CM_MLPERF_MAX_DURATION_RANGING', 300)) + max_duration_ranging = str( + max_duration_ranging_s * + 1000) # in milliseconds if scenario == "MultiStream" or scenario == "SingleStream": - if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ "no", "false", "0" ] and env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in [ "yes", "1", "true" ]: - user_conf += ml_model_name + "." + scenario + f".max_duration = {max_duration_valid}" + "\n" - elif env.get('CM_MLPERF_INFERENCE_MIN_DURATION','') != '': - user_conf += ml_model_name + "." + scenario + ".min_duration = " + env['CM_MLPERF_INFERENCE_MIN_DURATION'] +" \n" + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( + 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_valid}" + "\n" + elif env.get('CM_MLPERF_INFERENCE_MIN_DURATION', '') != '': + user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ + env['CM_MLPERF_INFERENCE_MIN_DURATION'] + " \n" if scenario == "MultiStream": - user_conf += ml_model_name + "." + scenario + ".min_query_count = "+ env.get('CM_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT', "662") + "\n" + user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ + env.get( + 'CM_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT', + "662") + "\n" if short_ranging: - ranging_user_conf += ml_model_name + "." + scenario + f".max_duration = {max_duration_ranging} \n " + ranging_user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_ranging} \n " elif scenario == "Offline": query_count = int(float(conf['target_qps']) * 660) query_count = str(max(query_count, required_min_queries_offline)) - #user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(int(query_count)+40) + "\n" + # user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(int(query_count)+40) + "\n" if short_ranging: ranging_query_count = str(int(float(conf['target_qps']) * 300)) - ranging_user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(ranging_query_count) + "\n" + ranging_user_conf += ml_model_name + "." + scenario + \ + ".max_query_count = " + str(ranging_query_count) + "\n" ranging_user_conf += ml_model_name + "." + scenario + ".min_query_count = 0 \n" if query_count: - env['CM_MAX_EXAMPLES'] = str(query_count) #needed for squad accuracy checker - + # needed for squad accuracy checker + env['CM_MAX_EXAMPLES'] = str(query_count) import uuid from pathlib import Path key = uuid.uuid4().hex - user_conf_path = os.path.join(script_path, "tmp", key+".conf") + user_conf_path = os.path.join(script_path, "tmp", key + ".conf") user_conf_file = Path(user_conf_path) user_conf_file.parent.mkdir(exist_ok=True, parents=True) user_conf_file.write_text(user_conf) if short_ranging: - ranging_user_conf_path = os.path.join(script_path, "tmp", "ranging_"+key+".conf") + ranging_user_conf_path = os.path.join( + script_path, "tmp", "ranging_" + key + ".conf") ranging_user_conf_file = Path(ranging_user_conf_path) ranging_user_conf_file.write_text(ranging_user_conf) - - if (env.get('CM_MLPERF_LOADGEN_QUERY_COUNT','') == '') and query_count and ((mode != "accuracy") or (env['CM_MLPERF_RUN_STYLE'] != "valid")): + if (env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') == '') and query_count and ( + (mode != "accuracy") or (env['CM_MLPERF_RUN_STYLE'] != "valid")): env['CM_MLPERF_LOADGEN_QUERY_COUNT'] = str(query_count) if not run_exists or rerun: print("Output Dir: '" + OUTPUT_DIR + "'") print(user_conf) - if env.get('CM_MLPERF_POWER','') == "yes" and os.path.exists(env.get('CM_MLPERF_POWER_LOG_DIR', '')): + if env.get('CM_MLPERF_POWER', '') == "yes" and os.path.exists( + env.get('CM_MLPERF_POWER_LOG_DIR', '')): shutil.rmtree(env['CM_MLPERF_POWER_LOG_DIR']) else: if not env.get('CM_MLPERF_COMPLIANCE_RUN_POSTPONED', False): print("Run files exist, skipping run...\n") env['CM_MLPERF_SKIP_RUN'] = "yes" - if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR, \ - required_files[4]) or env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("CM_REGENERATE_MEASURE_FILES", False): + if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR, + required_files[4]) or env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("CM_REGENERATE_MEASURE_FILES", False): - env['CM_MLPERF_TESTING_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), key+".conf")# user_conf_path - env['CM_MLPERF_RANGING_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), "ranging_"+key+".conf")# ranging_user_conf_path for a shorter run + env['CM_MLPERF_TESTING_USER_CONF'] = os.path.join( + os.path.dirname(user_conf_path), key + ".conf") # user_conf_path + env['CM_MLPERF_RANGING_USER_CONF'] = os.path.join( + os.path.dirname(user_conf_path), + "ranging_" + key + ".conf") # ranging_user_conf_path for a shorter run if short_ranging: env['CM_MLPERF_USER_CONF'] = r"\${CM_MLPERF_USER_CONF}" else: - env['CM_MLPERF_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), key+".conf")# user_conf_path + env['CM_MLPERF_USER_CONF'] = os.path.join( + os.path.dirname(user_conf_path), key + ".conf") # user_conf_path else: - print(f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") + print( + f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") env['CM_MLPERF_USER_CONF'] = '' os.makedirs(OUTPUT_DIR, exist_ok=True) - if str(env.get('CM_MLPERF_RESULTS_DIR_SHARED', '')).lower() in [ "yes", "true", "1" ]: + if str(env.get('CM_MLPERF_RESULTS_DIR_SHARED', '') + ).lower() in ["yes", "true", "1"]: os.chmod(OUTPUT_DIR, 0o2775) - return {'return':0} + return {'return': 0} + def run_files_exist(mode, OUTPUT_DIR, run_files, env): import submission_checker as checker @@ -361,22 +450,32 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env): is_valid = True - file_loc = {"accuracy": 0, "performance": 1, "power": 2, "performance_power": 3, "measure": 4, "compliance": 1} + file_loc = { + "accuracy": 0, + "performance": 1, + "power": 2, + "performance_power": 3, + "measure": 4, + "compliance": 1} required_files = run_files[file_loc[mode]] if mode == "performance_power": for file_ in run_files[2]: - file_path = os.path.join(os.path.dirname(OUTPUT_DIR), "power", file_) - if (not os.path.exists(file_path) or os.stat(file_path).st_size == 0): + file_path = os.path.join( + os.path.dirname(OUTPUT_DIR), "power", file_) + if (not os.path.exists(file_path) + or os.stat(file_path).st_size == 0): return False - required_files += run_files[1] #We need performance files too in the run directory + # We need performance files too in the run directory + required_files += run_files[1] for file_ in required_files: file_path = os.path.join(OUTPUT_DIR, file_) - if (not os.path.exists(file_path) or os.stat(file_path).st_size == 0) and file_ != "accuracy.txt": + if (not os.path.exists(file_path) or os.stat( + file_path).st_size == 0) and file_ != "accuracy.txt": return False - if file_ == "mlperf_log_detail.txt" and "performance" in mode: + if file_ == "mlperf_log_detail.txt" and "performance" in mode: mlperf_log = MLPerfLog(file_path) if ( "result_validity" not in mlperf_log.get_keys() @@ -385,13 +484,18 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env): return False if mode == "compliance": - #If a performance run followed the last compliance run, compliance check needs to be redone + # If a performance run followed the last compliance run, compliance + # check needs to be redone RESULT_DIR = os.path.split(OUTPUT_DIR)[0] COMPLIANCE_DIR = OUTPUT_DIR OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) - #If reference test result is invalid, don't do compliance run - file_path = os.path.join(RESULT_DIR, "performance", "run_1", "mlperf_log_detail.txt") + # If reference test result is invalid, don't do compliance run + file_path = os.path.join( + RESULT_DIR, + "performance", + "run_1", + "mlperf_log_detail.txt") mlperf_log = MLPerfLog(file_path) if ( "result_validity" not in mlperf_log.get_keys() @@ -402,7 +506,12 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env): test = env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] - SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") + SCRIPT_PATH = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + test, + "run_verification.py") if test == "TEST06": cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" else: @@ -414,21 +523,25 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env): is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) if not is_valid and 'Stream' in env['CM_MLPERF_LOADGEN_SCENARIO']: - env['CM_MLPERF_USE_MAX_DURATION'] = 'no' # We have the determined latency, compliance test failed, so lets not use max duration - env['CM_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run + # We have the determined latency, compliance test failed, so lets + # not use max duration + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + env['CM_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run return is_valid - if "power" in mode and env.get('CM_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in [ "yes", "true", "on" ]: + if "power" in mode and env.get( + 'CM_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in ["yes", "true", "on"]: from power.power_checker import check as check_power_more try: is_valid = check_power_more(os.path.dirname(OUTPUT_DIR)) == 0 - except: + except BaseException: is_valid = False return is_valid return is_valid + def measure_files_exist(OUTPUT_DIR, run_files): for file in run_files: file_path = os.path.join(OUTPUT_DIR, file) @@ -436,6 +549,7 @@ def measure_files_exist(OUTPUT_DIR, run_files): return False return True + def get_checker_files(): import submission_checker as checker @@ -446,6 +560,7 @@ def get_checker_files(): REQUIRED_MEASURE_FILES = checker.REQUIRED_MEASURE_FILES return REQUIRED_ACC_FILES, REQUIRED_PERF_FILES, REQUIRED_POWER_FILES, REQUIRED_PERF_POWER_FILES, REQUIRED_MEASURE_FILES + def get_required_min_queries_offline(model, version): import submission_checker as checker diff --git a/script/generate-mlperf-tiny-report/customize.py b/script/generate-mlperf-tiny-report/customize.py index 825a682cb1..03fdf1c3d7 100644 --- a/script/generate-mlperf-tiny-report/customize.py +++ b/script/generate-mlperf-tiny-report/customize.py @@ -6,6 +6,7 @@ import json import shutil + def preprocess(i): env = i['env'] @@ -13,18 +14,19 @@ def preprocess(i): cur_dir = os.getcwd() # Query cache for results dirs - env_repo_tags=env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS','').strip() - xtags='' if env_repo_tags =='' else ',version-'+env_repo_tags + env_repo_tags = env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS', '').strip() + xtags = '' if env_repo_tags == '' else ',version-' + env_repo_tags - r = cm.access({'action':'find', - 'automation':'cache,541d6f712a6b464e', - 'tags':'get,repo,mlperf-tiny-results'+xtags}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-tiny-results' + xtags}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - return {'return':1, 'error':'no repository with TinyMLPerf results found'} + if len(lst) == 0: + return {'return': 1, 'error': 'no repository with TinyMLPerf results found'} for c in lst: path = os.path.join(c.path, 'repo') @@ -37,7 +39,7 @@ def preprocess(i): version = '' for t in tags: if t.startswith('version-'): - version = 'v'+t[8:] + version = 'v' + t[8:] break # Run local script @@ -48,16 +50,17 @@ def preprocess(i): env['CM_TINYMLPERF_CURRENT_DIR'] = cur_dir env['CM_TINYMLPERF_REPO_VERSION'] = version - print ('') - print ('Repo path: {}'.format(path)) + print('') + print('Repo path: {}'.format(path)) - r = automation.run_native_script({'run_script_input':run_script_input, - 'env':env, - 'script_name':'run_submission_checker'}) - if r['return']>0: + r = automation.run_native_script({'run_script_input': run_script_input, + 'env': env, + 'script_name': 'run_submission_checker'}) + if r['return'] > 0: return r - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -69,15 +72,15 @@ def postprocess(i): for ext in ['.csv', '.xlsx']: - p1 = os.path.join (path, 'summary'+ext) - p2 = os.path.join (cur_dir, 'summary-{}{}'.format(version,ext)) + p1 = os.path.join(path, 'summary' + ext) + p2 = os.path.join(cur_dir, 'summary-{}{}'.format(version, ext)) if not os.path.isfile(p1): - return {'return':1, 'error':'summary.csv file was not created'} + return {'return': 1, 'error': 'summary.csv file was not created'} if os.path.isfile(p2): os.remove(p2) - shutil.copy(p1,p2) + shutil.copy(p1, p2) - return {'return':0} + return {'return': 0} diff --git a/script/generate-mlperf-tiny-submission/customize.py b/script/generate-mlperf-tiny-submission/customize.py index 534915c3b6..75bd43832a 100644 --- a/script/generate-mlperf-tiny-submission/customize.py +++ b/script/generate-mlperf-tiny-submission/customize.py @@ -3,6 +3,7 @@ import json import shutil + def preprocess(i): return generate_submission(i) @@ -12,10 +13,10 @@ def preprocess(i): def generate_submission(i): # Save current user directory - cur_dir=os.getcwd() + cur_dir = os.getcwd() env = i['env'] state = i['state'] - inp=i['input'] + inp = i['input'] results_dir = env['CM_MLPERF_RESULTS_DIR'] if 'CM_MLPERF_SUBMISSION_DIR' not in env: @@ -26,19 +27,23 @@ def generate_submission(i): print('* MLPerf tiny submission dir: {}'.format(submission_dir)) print('* MLPerf tiny results dir: {}'.format(results_dir)) - results = [f for f in os.listdir(results_dir) if not os.path.isfile(os.path.join(results_dir, f))] + results = [ + f for f in os.listdir(results_dir) if not os.path.isfile( + os.path.join( + results_dir, + f))] - division=inp.get('division','open') + division = inp.get('division', 'open') - if division not in ['open','closed']: - return {'return':1, 'error':'"division" must be "open" or "closed"'} + if division not in ['open', 'closed']: + return {'return': 1, 'error': '"division" must be "open" or "closed"'} system_meta = state['CM_SUT_META'] division = system_meta['division'] print('* MLPerf tiny division: {}'.format(division)) path_submission_root = submission_dir - path_submission_division=os.path.join(path_submission_root, division) + path_submission_division = os.path.join(path_submission_root, division) if not os.path.isdir(path_submission_division): os.makedirs(path_submission_division) @@ -48,12 +53,12 @@ def generate_submission(i): print('* MLPerf tiny submitter: {}'.format(submitter)) - path_submission=os.path.join(path_submission_division, submitter) + path_submission = os.path.join(path_submission_division, submitter) if not os.path.isdir(path_submission): os.makedirs(path_submission) # SUT base - system=i.get('system','default') + system = i.get('system', 'default') code_path = os.path.join(path_submission, "code") for res in results: @@ -71,87 +76,128 @@ def generate_submission(i): else: sub_res = res submission_path = os.path.join(path_submission, "results", sub_res) - measurement_path = os.path.join(path_submission, "measurements", sub_res) + measurement_path = os.path.join( + path_submission, "measurements", sub_res) compliance_path = os.path.join(path_submission, "compliance", sub_res) system_path = os.path.join(path_submission, "systems") submission_system_path = system_path if not os.path.isdir(submission_system_path): os.makedirs(submission_system_path) - system_file = os.path.join(submission_system_path, sub_res+".json") + system_file = os.path.join(submission_system_path, sub_res + ".json") with open(system_file, "w") as fp: json.dump(system_meta, fp, indent=2) - models = [f for f in os.listdir(result_path) if not os.path.isfile(os.path.join(result_path, f))] + models = [ + f for f in os.listdir(result_path) if not os.path.isfile( + os.path.join( + result_path, f))] for model in models: result_model_path = os.path.join(result_path, model) submission_model_path = os.path.join(submission_path, model) measurement_model_path = os.path.join(measurement_path, model) compliance_model_path = os.path.join(compliance_path, model) code_model_path = os.path.join(code_path, model) - scenarios = [f for f in os.listdir(result_model_path) if not os.path.isfile(os.path.join(result_model_path, f))] + scenarios = [ + f for f in os.listdir(result_model_path) if not os.path.isfile( + os.path.join( + result_model_path, f))] submission_code_path = code_model_path if not os.path.isdir(submission_code_path): os.makedirs(submission_code_path) - if not os.path.exists(os.path.join(submission_code_path, "README.md")): - with open(os.path.join(submission_code_path, "README.md"), mode='w'): pass #create an empty README + if not os.path.exists(os.path.join( + submission_code_path, "README.md")): + with open(os.path.join(submission_code_path, "README.md"), mode='w'): + pass # create an empty README print('* MLPerf inference model: {}'.format(model)) for scenario in scenarios: - result_scenario_path = os.path.join(result_model_path, scenario) - submission_scenario_path = os.path.join(submission_model_path, scenario) - measurement_scenario_path = os.path.join(measurement_model_path, scenario) - compliance_scenario_path = os.path.join(compliance_model_path, scenario) - - modes = [f for f in os.listdir(result_scenario_path) if not os.path.isfile(os.path.join(result_scenario_path, f))] + result_scenario_path = os.path.join( + result_model_path, scenario) + submission_scenario_path = os.path.join( + submission_model_path, scenario) + measurement_scenario_path = os.path.join( + measurement_model_path, scenario) + compliance_scenario_path = os.path.join( + compliance_model_path, scenario) + + modes = [ + f for f in os.listdir(result_scenario_path) if not os.path.isfile( + os.path.join( + result_scenario_path, f))] for mode in modes: result_mode_path = os.path.join(result_scenario_path, mode) - submission_mode_path = os.path.join(submission_scenario_path, mode) + submission_mode_path = os.path.join( + submission_scenario_path, mode) submission_results_path = submission_mode_path submission_measurement_path = measurement_scenario_path - submission_compliance_path = os.path.join(compliance_scenario_path, mode) - if mode=='performance': - result_mode_path=os.path.join(result_mode_path, 'run_1') - submission_results_path=os.path.join(submission_mode_path, 'run_1') + submission_compliance_path = os.path.join( + compliance_scenario_path, mode) + if mode == 'performance': + result_mode_path = os.path.join( + result_mode_path, 'run_1') + submission_results_path = os.path.join( + submission_mode_path, 'run_1') if not os.path.isdir(submission_results_path): os.makedirs(submission_results_path) if not os.path.isdir(submission_measurement_path): os.makedirs(submission_measurement_path) if not os.path.isdir(submission_compliance_path): os.makedirs(submission_compliance_path) - mlperf_inference_conf_path = os.path.join(result_mode_path, "mlperf.conf") + mlperf_inference_conf_path = os.path.join( + result_mode_path, "mlperf.conf") if os.path.exists(mlperf_inference_conf_path): - shutil.copy(mlperf_inference_conf_path, os.path.join(submission_measurement_path, 'mlperf.conf')) - user_conf_path = os.path.join(result_mode_path, "user.conf") + shutil.copy( + mlperf_inference_conf_path, os.path.join( + submission_measurement_path, 'mlperf.conf')) + user_conf_path = os.path.join( + result_mode_path, "user.conf") if os.path.exists(user_conf_path): - shutil.copy(user_conf_path, os.path.join(submission_measurement_path, 'user.conf')) - measurements_json_path = os.path.join(result_mode_path, "measurements.json") + shutil.copy( + user_conf_path, os.path.join( + submission_measurement_path, 'user.conf')) + measurements_json_path = os.path.join( + result_mode_path, "measurements.json") if os.path.exists(user_conf_path): - shutil.copy(measurements_json_path, os.path.join(submission_measurement_path, sub_res+'.json')) + shutil.copy( + measurements_json_path, + os.path.join( + submission_measurement_path, + sub_res + '.json')) files = [] readme = False for f in os.listdir(result_mode_path): if f.startswith('mlperf_'): files.append(f) if f == "README.md": - shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, f)) + shutil.copy( + os.path.join( + result_mode_path, f), os.path.join( + submission_measurement_path, f)) readme = True if mode == "accuracy": - if os.path.exists(os.path.join(result_mode_path, "accuracy.txt")): + if os.path.exists(os.path.join( + result_mode_path, "accuracy.txt")): files.append("accuracy.txt") for f in files: print(' * ' + f) p_target = os.path.join(submission_results_path, f) - shutil.copy(os.path.join(result_mode_path, f), p_target) + shutil.copy( + os.path.join( + result_mode_path, + f), + p_target) if not readme: - with open(os.path.join(submission_measurement_path, "README.md"), mode='w'): pass #create an empty README + with open(os.path.join(submission_measurement_path, "README.md"), mode='w'): + pass # create an empty README + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/generate-nvidia-engine/customize.py b/script/generate-nvidia-engine/customize.py index 1cbe9e3d9a..efa6eb7e6f 100644 --- a/script/generate-nvidia-engine/customize.py +++ b/script/generate-nvidia-engine/customize.py @@ -2,29 +2,36 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} if 'CM_MODEL' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the model to run'} + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_DEVICE' not in env: - return {'return': 1, 'error': 'Please select a variation specifying the device to run on'} + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} - scenarios = env['CM_LOADGEN_SCENARIO']#will later extend to other scenarios + # will later extend to other scenarios + scenarios = env['CM_LOADGEN_SCENARIO'] cmd = " --action generate_engines " +\ - " --benchmarks " + env['CM_MODEL']+ \ + " --benchmarks " + env['CM_MODEL'] + \ " --scenarios " + scenarios + \ - " --gpu_batch_size="+env['CM_MODEL_BATCH_SIZE'] +\ - " --gpu_copy_streams="+env['CM_GPU_COPY_STREAMS'] +\ - " --workspace_size="+env['CM_TENSORRT_WORKSPACE_SIZE'] + " --gpu_batch_size=" + env['CM_MODEL_BATCH_SIZE'] +\ + " --gpu_copy_streams=" + env['CM_GPU_COPY_STREAMS'] +\ + " --workspace_size=" + env['CM_TENSORRT_WORKSPACE_SIZE'] + + ~ - return {'return':0} +return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-android-sdk/customize.py b/script/get-android-sdk/customize.py index 625df997a2..85b190bb1d 100644 --- a/script/get-android-sdk/customize.py +++ b/script/get-android-sdk/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,7 +16,7 @@ def preprocess(i): run_script_input = i['run_script_input'] # Check if ANDROID_HOME is already set - android_home = os.environ.get('ANDROID_HOME','').strip() + android_home = os.environ.get('ANDROID_HOME', '').strip() # We are inside CM cache entry cur_dir = os.getcwd() @@ -23,8 +24,8 @@ def preprocess(i): if android_home == '': android_home = cur_dir - env['CM_ANDROID_HOME']=android_home - env['ANDROID_HOME']=android_home + env['CM_ANDROID_HOME'] = android_home + env['ANDROID_HOME'] = android_home paths = [] @@ -39,14 +40,15 @@ def preprocess(i): elif platform == "darwin": host_os_for_android = 'mac' - sdk_manager_file = 'sdkmanager'+ext + sdk_manager_file = 'sdkmanager' + ext - print ('') + print('') found = False - for x in ['cmdline-tools', 'cmdline-tools'+os.sep+'tools', 'tools']: - sdk_manager_path = os.path.join(android_home, x, 'bin', sdk_manager_file) + for x in ['cmdline-tools', 'cmdline-tools' + os.sep + 'tools', 'tools']: + sdk_manager_path = os.path.join( + android_home, x, 'bin', sdk_manager_file) if os.path.isfile(sdk_manager_path): found = True break @@ -59,35 +61,41 @@ def preprocess(i): os.chdir(new_path) - cmdline_tools_version=env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION','') + cmdline_tools_version = env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION', '') env['CM_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version package_url = env['CM_ANDROID_CMDLINE_TOOLS_URL'] - package_url = package_url.replace('${CM_ANDROID_CMDLINE_TOOLS_OS}', host_os_for_android) - package_url = package_url.replace('${CM_ANDROID_CMDLINE_TOOLS_VERSION}', cmdline_tools_version) + package_url = package_url.replace( + '${CM_ANDROID_CMDLINE_TOOLS_OS}', + host_os_for_android) + package_url = package_url.replace( + '${CM_ANDROID_CMDLINE_TOOLS_VERSION}', + cmdline_tools_version) env['CM_ANDROID_CMDLINE_TOOLS_URL'] = package_url - print ('') - print ('Downloading from {} ...'.format(package_url)) + print('') + print('Downloading from {} ...'.format(package_url)) cm = automation.cmind - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':package_url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': package_url}) + if r['return'] > 0: + return r filename = r['filename'] - print ('Unzipping file {}'.format(filename)) + print('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', - 'filename':filename, - 'strip_folders':0}) - if r['return']>0: return r + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'filename': filename, + 'strip_folders': 0}) + if r['return'] > 0: + return r # if os.path.isfile(filename): # print ('Removing file {}'.format(filename)) @@ -97,7 +105,12 @@ def preprocess(i): os.chdir(cur_dir) - sdk_manager_path = os.path.join(android_home, 'cmdline-tools', 'tools', 'bin', sdk_manager_file) + sdk_manager_path = os.path.join( + android_home, + 'cmdline-tools', + 'tools', + 'bin', + sdk_manager_file) sdk_manager_dir = os.path.dirname(sdk_manager_path) @@ -109,61 +122,62 @@ def preprocess(i): paths.append(sdk_manager_dir) # Prepare SDK - print ('Preparing Android SDK manager ...') - - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'prepare-sdk-manager'}) - if r['return']>0: return r + print('Preparing Android SDK manager ...') + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'prepare-sdk-manager'}) + if r['return'] > 0: + return r - build_tools_version=env['CM_ANDROID_BUILD_TOOLS_VERSION'] + build_tools_version = env['CM_ANDROID_BUILD_TOOLS_VERSION'] - path_build_tools = os.path.join(android_home, 'build-tools', build_tools_version) - env['CM_ANDROID_BUILD_TOOLS_PATH']=path_build_tools + path_build_tools = os.path.join( + android_home, 'build-tools', build_tools_version) + env['CM_ANDROID_BUILD_TOOLS_PATH'] = path_build_tools paths.append(path_build_tools) - - cmake_version=env['CM_ANDROID_CMAKE_VERSION'] + cmake_version = env['CM_ANDROID_CMAKE_VERSION'] path_cmake = os.path.join(android_home, 'cmake', cmake_version, 'bin') - env['CM_ANDROID_CMAKE_PATH']=path_cmake + env['CM_ANDROID_CMAKE_PATH'] = path_cmake paths.append(path_cmake) - path_emulator = os.path.join(android_home, 'emulator') - env['CM_ANDROID_EMULATOR_PATH']=path_emulator + env['CM_ANDROID_EMULATOR_PATH'] = path_emulator paths.append(path_emulator) path_platform_tools = os.path.join(android_home, 'platform-tools') - env['CM_ANDROID_PLATFORM_TOOLS_PATH']=path_platform_tools + env['CM_ANDROID_PLATFORM_TOOLS_PATH'] = path_platform_tools paths.append(path_platform_tools) - - android_version=env['CM_ANDROID_VERSION'] + android_version = env['CM_ANDROID_VERSION'] path_platforms = os.path.join(android_home, 'platforms', android_version) - env['CM_ANDROID_PLATFORMS_PATH']=path_platforms - + env['CM_ANDROID_PLATFORMS_PATH'] = path_platforms path_tools = os.path.join(android_home, 'tools') - env['CM_ANDROID_TOOLS_PATH']=path_tools + env['CM_ANDROID_TOOLS_PATH'] = path_tools paths.append(path_tools) - android_ndk_version=env['CM_ANDROID_NDK_VERSION'] + android_ndk_version = env['CM_ANDROID_NDK_VERSION'] # Check Android NDK path_ndk = os.path.join(android_home, 'ndk', android_ndk_version) - env['CM_ANDROID_NDK_PATH']=path_ndk - env['ANDROID_NDK_HOME']=path_ndk - - - - path_ndk_compiler = os.path.join(path_ndk, 'toolchains', 'llvm', 'prebuilt', host_os_for_ndk, 'bin') - env['CM_ANDROID_LLVM_PATH']=path_ndk_compiler - env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH']=os.path.join(path_ndk_compiler, 'clang.exe') + env['CM_ANDROID_NDK_PATH'] = path_ndk + env['ANDROID_NDK_HOME'] = path_ndk + + path_ndk_compiler = os.path.join( + path_ndk, + 'toolchains', + 'llvm', + 'prebuilt', + host_os_for_ndk, + 'bin') + env['CM_ANDROID_LLVM_PATH'] = path_ndk_compiler + env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + path_ndk_compiler, 'clang.exe') paths.append(path_ndk_compiler) - - env['+PATH'] = paths - return {'return':0} #, 'version': version} + return {'return': 0} # , 'version': version} diff --git a/script/get-aocl/customize.py b/script/get-aocl/customize.py index 62c5a185a9..285ed97e69 100644 --- a/script/get-aocl/customize.py +++ b/script/get-aocl/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,17 +14,23 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] env['CM_AOCL_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] - env['CM_AOCL_BUILD_PATH'] = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "build") - aocl_lib_path = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], "build", "aocl-release", "src") + env['CM_AOCL_BUILD_PATH'] = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], "build") + aocl_lib_path = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "build", + "aocl-release", + "src") env['CM_AOCL_LIB_PATH'] = aocl_lib_path - env['+LIBRARY_PATH'] = [ aocl_lib_path ] if '+LIBRARY_PATH' not in env else env['+LIBRARY_PATH'] + [ aocl_lib_path ] - env['+LD_LIBRARY_PATH'] = [ aocl_lib_path ] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [ aocl_lib_path ] + env['+LIBRARY_PATH'] = [aocl_lib_path] if '+LIBRARY_PATH' not in env else env['+LIBRARY_PATH'] + [aocl_lib_path] + env['+LD_LIBRARY_PATH'] = [aocl_lib_path] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [aocl_lib_path] - return {'return':0} + return {'return': 0} diff --git a/script/get-aria2/customize.py b/script/get-aria2/customize.py index d6401d28ae..b7198b30bc 100644 --- a/script/get-aria2/customize.py +++ b/script/get-aria2/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): # Pre-set by CM @@ -12,20 +13,21 @@ def preprocess(i): # Check if a given tool is already installed file_name_core = 'aria2c' - file_name = file_name_core+'.exe' if os_info['platform'] == 'windows' else file_name_core + file_name = file_name_core + \ + '.exe' if os_info['platform'] == 'windows' else file_name_core force_install = env.get('CM_FORCE_INSTALL', False) == True if not force_install: r = i['automation'].find_artifact({'file_name': file_name, - 'env':env, - 'os_info':os_info, + 'env': env, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_ARIA2_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: # Not found, try install force_install = True @@ -35,8 +37,9 @@ def preprocess(i): # Force install if force_install: # Attempt to run installer - version = env.get('CM_VERSION','') - if version == '': version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION'] + version = env.get('CM_VERSION', '') + if version == '': + version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION'] if os_info['platform'] == 'windows': archive = 'aria2-{}-win-64bit-build1' @@ -48,66 +51,76 @@ def preprocess(i): ext2 = '.tar' archive = archive.format(version) - archive_with_ext = archive+ext + archive_with_ext = archive + ext env['CM_ARIA2_DOWNLOAD_DIR'] = archive env['CM_ARIA2_DOWNLOAD_FILE'] = archive_with_ext - if ext2!='': - env['CM_ARIA2_DOWNLOAD_FILE2'] = archive+ext2 + if ext2 != '': + env['CM_ARIA2_DOWNLOAD_FILE2'] = archive + ext2 - url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format(version, archive_with_ext) + url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format( + version, archive_with_ext) env['CM_ARIA2_DOWNLOAD_URL'] = url - print ('URL to download ARIA2: {}'.format(url)) + print('URL to download ARIA2: {}'.format(url)) - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) - if r['return']>0: return r + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) + if r['return'] > 0: + return r - if os_info['platform'] == 'windows' or env.get('CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true': + if os_info['platform'] == 'windows' or env.get( + 'CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true': install_path = os.path.join(os.getcwd(), archive) path_to_file = os.path.join(install_path, file_name) if not os.path.isfile(path_to_file): - return {'return':1, 'error':'file not found: {}'.format(path_to_file)} + return {'return': 1, + 'error': 'file not found: {}'.format(path_to_file)} env['CM_ARIA2_BIN_WITH_PATH'] = path_to_file env['CM_ARIA2_INSTALLED_TO_CACHE'] = 'yes' else: - path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH','') + path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH', '') env['CM_ARIA2_BIN_WITH_PATH'] = path_to_bin r = i['automation'].find_artifact({'file_name': file_name, - 'env':env, - 'os_info':os_info, + 'env': env, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_ARIA2_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return']>0: return r + 'detect_version': True, + 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + return r + + return {'return': 0} - return {'return':0} def detect_version(i): env = i['env'] r = i['automation'].parse_version({'match_text': r'aria2 version\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_ARIA2_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_ARIA2_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_ARIA2_BIN_WITH_PATH'] @@ -116,7 +129,7 @@ def postprocess(i): env['CM_ARIA2_INSTALLED_PATH'] = found_path - if env.get('CM_ARIA2_INSTALLED_TO_CACHE','')=='yes': + if env.get('CM_ARIA2_INSTALLED_TO_CACHE', '') == 'yes': env['+PATH'] = [env['CM_ARIA2_INSTALLED_PATH']] - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-aws-cli/customize.py b/script/get-aws-cli/customize.py index af92de93b0..9376577a18 100644 --- a/script/get-aws-cli/customize.py +++ b/script/get-aws-cli/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,40 +14,44 @@ def preprocess(i): env['FILE_NAME'] = file_name if 'CM_AWS_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, - 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_AWS_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_AWS_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'aws-cli/([\d.]+)\s', 'group_number': 1, - 'env_key':'CM_AWS_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_AWS_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_AWS_BIN_WITH_PATH'] @@ -54,6 +59,6 @@ def postprocess(i): found_path = os.path.dirname(found_file_path) env['CM_AWS_INSTALLED_PATH'] = found_path - env['CM_AWS_CACHE_TAGS'] = 'version-'+version + env['CM_AWS_CACHE_TAGS'] = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-bazel/customize.py b/script/get-bazel/customize.py index 9e14fc5a51..39ac03e645 100644 --- a/script/get-bazel/customize.py +++ b/script/get-bazel/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,48 +14,52 @@ def preprocess(i): env['FILE_NAME'] = file_name if 'CM_BAZEL_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, - 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_BAZEL_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_BAZEL_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'bazel\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_BAZEL_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_BAZEL_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_BAZEL_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) env['CM_BAZEL_INSTALLED_PATH'] = found_path - env['+PATH'] = [ found_path ] + env['+PATH'] = [found_path] - env['CM_BAZEL_CACHE_TAGS'] = 'version-'+version + env['CM_BAZEL_CACHE_TAGS'] = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-blis/customize.py b/script/get-blis/customize.py index fc5d8303a7..0582e4bd2c 100644 --- a/script/get-blis/customize.py +++ b/script/get-blis/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,8 +16,8 @@ def preprocess(i): env['CM_BLIS_SRC_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + return {'return': 0} - return {'return':0} def postprocess(i): @@ -29,6 +30,6 @@ def postprocess(i): blis_lib_path = os.path.join(install_dir, 'lib') - env['+LD_LIBRARY_PATH'] = [ blis_lib_path ] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [ blis_lib_path ] + env['+LD_LIBRARY_PATH'] = [blis_lib_path] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [blis_lib_path] - return {'return':0} + return {'return': 0} diff --git a/script/get-cache-dir/customize.py b/script/get-cache-dir/customize.py index 6e8a76460d..d1a3edd741 100644 --- a/script/get-cache-dir/customize.py +++ b/script/get-cache-dir/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,7 +14,8 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -26,4 +28,4 @@ def postprocess(i): env['CM_CACHE_DIR'] = cache_dir env['CM_GET_DEPENDENT_CACHED_PATH'] = cache_dir - return {'return':0} + return {'return': 0} diff --git a/script/get-cl/customize.py b/script/get-cl/customize.py index 1fb9fbcd2b..e36b2c6743 100644 --- a/script/get-cl/customize.py +++ b/script/get-cl/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] != 'windows': - return {'return':0} + return {'return': 0} env = i['env'] @@ -19,22 +20,25 @@ def preprocess(i): # Will check env['CM_TMP_PATH'] if comes from installation script ii = {'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_CL_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces} + 'detect_version': True, + 'env_path_key': 'CM_CL_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces} rr = automation.find_artifact(ii) - if rr['return'] >0 : + if rr['return'] > 0: # If not found in PATH, try a longer search if rr['return'] != 16: return rr - if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': - print (i['recursion_spaces'] + ' Starting deep search for {} - it may take some time ...'.format(file_name)) + print( + i['recursion_spaces'] + + ' Starting deep search for {} - it may take some time ...'.format(file_name)) paths = ['C:\\Program Files\\Microsoft Visual Studio', 'C:\\Program Files (x86)\\Microsoft Visual Studio', @@ -42,10 +46,11 @@ def preprocess(i): restrict_paths = ['Hostx64\\x64'] - r = automation.find_file_deep({'paths':paths, - 'file_name':file_name, - 'restrict_paths':restrict_paths}) - if r['return']>0: return r + r = automation.find_file_deep({'paths': paths, + 'file_name': file_name, + 'restrict_paths': restrict_paths}) + if r['return'] > 0: + return r found_paths = r['found_paths'] @@ -57,10 +62,11 @@ def preprocess(i): env['CM_TMP_PATH'] = tmp_paths env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - ii['env']=env + ii['env'] = env rr = automation.find_artifact(ii) - if rr['return'] >0 : return rr + if rr['return'] > 0: + return rr else: return rr @@ -69,53 +75,57 @@ def preprocess(i): # Check vcvarall.bat state = i['state'] - script_prefix = state.get('script_prefix',[]) + script_prefix = state.get('script_prefix', []) # Attempt to find vcvars64.bat bat_file_name = 'VC\\Auxiliary\\Build\\vcvars64.bat' - r = automation.find_file_back({'path':found_path, 'file_name':bat_file_name}) - if r['return']>0: return r + r = automation.find_file_back( + {'path': found_path, 'file_name': bat_file_name}) + if r['return'] > 0: + return r found_path_bat = r['found_path'] - if found_path_bat!='': + if found_path_bat != '': path_to_vcvars = os.path.join(found_path_bat, bat_file_name) - s = os_info['run_bat'].replace('${bat_file}', '"' + path_to_vcvars + '"') + s = os_info['run_bat'].replace( + '${bat_file}', '"' + path_to_vcvars + '"') script_prefix.append(s) state['script_prefix'] = script_prefix - env['CM_CL_BIN']=file_name - env['CM_CL_BIN_WITH_PATH']=os.path.join(found_path, file_name) + env['CM_CL_BIN'] = file_name + env['CM_CL_BIN_WITH_PATH'] = os.path.join(found_path, file_name) # General compiler for general program compilation - env['CM_C_COMPILER_BIN']=file_name - env['CM_C_COMPILER_WITH_PATH']=os.path.join(found_path, file_name) - env['CM_C_COMPILER_FLAG_OUTPUT']='/Fe:' - env['CM_C_COMPILER_FLAG_VERSION']='' + env['CM_C_COMPILER_BIN'] = file_name + env['CM_C_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name) + env['CM_C_COMPILER_FLAG_OUTPUT'] = '/Fe:' + env['CM_C_COMPILER_FLAG_VERSION'] = '' - env['CM_CXX_COMPILER_BIN']=env['CM_C_COMPILER_BIN'] - env['CM_CXX_COMPILER_WITH_PATH']=env['CM_C_COMPILER_WITH_PATH'] - env['CM_CXX_COMPILER_FLAG_OUTPUT']='/Fe:' - env['CM_CXX_COMPILER_FLAG_VERSION']='' + env['CM_CXX_COMPILER_BIN'] = env['CM_C_COMPILER_BIN'] + env['CM_CXX_COMPILER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH'] + env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '/Fe:' + env['CM_CXX_COMPILER_FLAG_VERSION'] = '' + + return {'return': 0} - return {'return':0} def detect_version(i): r = i['automation'].parse_version({'match_text': r'Version\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_CL_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_CL_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} def postprocess(i): @@ -124,13 +134,14 @@ def postprocess(i): r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] - env['CM_CL_CACHE_TAGS'] = 'version-'+version - env['CM_COMPILER_CACHE_TAGS'] = 'version-'+version+',family-msvc' + env['CM_CL_CACHE_TAGS'] = 'version-' + version + env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-msvc' env['CM_COMPILER_FAMILY'] = 'MSVC' env['CM_COMPILER_VERSION'] = env['CM_CL_VERSION'] - return {'return':0, 'version':version} + return {'return': 0, 'version': version} diff --git a/script/get-cmake/customize.py b/script/get-cmake/customize.py index c9a58db920..cea759b2ee 100644 --- a/script/get-cmake/customize.py +++ b/script/get-cmake/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,48 +14,52 @@ def preprocess(i): if 'CM_CMAKE_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, - 'env':env, - 'os_info':os_info, - 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_CMAKE_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_CMAKE_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'cmake version\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_CMAKE_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_CMAKE_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_CMAKE_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) - env['CM_CMAKE_CACHE_TAGS'] = 'version-'+version + env['CM_CMAKE_CACHE_TAGS'] = 'version-' + version if 'CM_HOST_CPU_TOTAL_CORES' in env: - env['CM_MAKE_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + env['CM_MAKE_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-cmsis_5/customize.py b/script/get-cmsis_5/customize.py index af9b13c464..9f9ebc556a 100644 --- a/script/get-cmsis_5/customize.py +++ b/script/get-cmsis_5/customize.py @@ -2,19 +2,21 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' if 'CM_GIT_RECURSE_SUBMODULES' not in env: env['CM_GIT_RECURSE_SUBMODULES'] = '' - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -22,4 +24,4 @@ def postprocess(i): state = i['state'] env['CMSIS_PATH'] = os.path.join(os.getcwd(), 'cmsis') - return {'return':0} + return {'return': 0} diff --git a/script/get-compiler-flags/customize.py b/script/get-compiler-flags/customize.py index 23ccbe6472..e9fc573015 100644 --- a/script/get-compiler-flags/customize.py +++ b/script/get-compiler-flags/customize.py @@ -2,6 +2,7 @@ import os import subprocess + def preprocess(i): os_info = i['os_info'] @@ -13,12 +14,13 @@ def preprocess(i): # TBD: add unified flags for Windows if os_info['platform'] == 'windows': - return {'return':0} + return {'return': 0} - if env.get("CM_FAST_COMPILATION") in [ "yes", "on", "1" ]: + if env.get("CM_FAST_COMPILATION") in ["yes", "on", "1"]: DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_FAST", "-O3") - DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_FAST", "-O3") # -flto") - this flag is not always available - elif env.get("CM_DEBUG_COMPILATION") in ["yes", "on", "1" ]: + # -flto") - this flag is not always available + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_FAST", "-O3") + elif env.get("CM_DEBUG_COMPILATION") in ["yes", "on", "1"]: DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEBUG", "-O0") DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEBUG", "-O0") else: @@ -60,4 +62,4 @@ def preprocess(i): # if int(env['CM_HOST_CPU_FAMILY']) >= 0: # env['+ CFLAGS'] += ["-march=znver2", "-flto"] - return {'return':0} + return {'return': 0} diff --git a/script/get-compiler-rust/customize.py b/script/get-compiler-rust/customize.py index cd42edf7bf..2a1b202f81 100644 --- a/script/get-compiler-rust/customize.py +++ b/script/get-compiler-rust/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,13 +14,14 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - rust_path = os.path.join(os.path.expanduser('~'),".cargo", "bin") - env['+PATH'] = [ rust_path ] + rust_path = os.path.join(os.path.expanduser('~'), ".cargo", "bin") + env['+PATH'] = [rust_path] - return {'return':0} + return {'return': 0} diff --git a/script/get-conda/customize.py b/script/get-conda/customize.py index 4d6f37ca1a..7e9cd92363 100644 --- a/script/get-conda/customize.py +++ b/script/get-conda/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -18,67 +19,77 @@ def preprocess(i): tmp_path = env.get('CM_CONDA_INSTALL_PATH', env.get('CM_TMP_PATH', '')) if tmp_path: x = ';' if os_info['platform'] == 'windows' else ':' - tmp_path+=x + tmp_path += x conda_path = os.path.join(os.path.expanduser("~"), "miniconda3", "bin") if os.path.exists(conda_path): - tmp_path += os.path.join(os.path.expanduser("~"), "miniconda3", "bin") + tmp_path += os.path.join(os.path.expanduser("~"), + "miniconda3", "bin") env['CM_TMP_PATH'] = tmp_path r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_CONDA_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) + 'detect_version': True, + 'env_path_key': 'CM_CONDA_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) else: env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") bin_dir = 'Scripts' if os_info['platform'] == 'windows' else 'bin' - env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name) + env['CM_CONDA_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name) - if conda_prefix_name != '' or r['return'] >0 : + if conda_prefix_name != '' or r['return'] > 0: if conda_prefix_name != '' or r['return'] == 16: if conda_prefix_name == '': - if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': return r - print (recursion_spaces+' # {}'.format(r['error'])) + print(recursion_spaces + ' # {}'.format(r['error'])) # Attempt to run installer - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) - if r['return']>0: return r + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) + if r['return'] > 0: + return r # Grigori: temporal fix - should be generalized/improved above - if os_info['platform'] == 'windows' and env.get('CM_CONDA_BIN_WITH_PATH','')=='': - env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") - env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_INSTALL_PATH'], 'Scripts', file_name) - + if os_info['platform'] == 'windows' and env.get( + 'CM_CONDA_BIN_WITH_PATH', '') == '': + env['CM_CONDA_INSTALL_PATH'] = os.path.join( + os.getcwd(), "miniconda3") + env['CM_CONDA_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_INSTALL_PATH'], 'Scripts', file_name) else: found_path = r['found_path'] - env['+PATH'] = [ found_path ] + env['+PATH'] = [found_path] + + return {'return': 0} - return {'return':0} def detect_version(i): r = i['automation'].parse_version({'match_text': r'conda\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_CONDA_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r - return {'return':0, 'version':r['version']} + 'env_key': 'CM_CONDA_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + return {'return': 0, 'version': r['version']} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r conda_bin_path = os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']) env['CM_CONDA_BIN_PATH'] = conda_bin_path - env['+PATH'] = [ conda_bin_path ] + env['+PATH'] = [conda_bin_path] conda_prefix = os.path.dirname(conda_bin_path) env['CM_CONDA_PREFIX'] = conda_prefix @@ -88,11 +99,11 @@ def postprocess(i): if os.path.exists(conda_lib_path): env['CM_CONDA_LIB_PATH'] = conda_lib_path - env['+LD_LIBRARY_PATH'] = [ conda_lib_path ] - env['+LIBRARY_PATH'] = [ conda_lib_path ] + env['+LD_LIBRARY_PATH'] = [conda_lib_path] + env['+LIBRARY_PATH'] = [conda_lib_path] version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + return {'return': 0, 'version': version} diff --git a/script/get-croissant/customize.py b/script/get-croissant/customize.py index 1ced8a4846..afa8214659 100644 --- a/script/get-croissant/customize.py +++ b/script/get-croissant/customize.py @@ -1,16 +1,17 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] - return {'return':0} + return {'return': 0} + def postprocess(i): os_info = i['os_info'] env = i['env'] - - return {'return':0} + return {'return': 0} diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py index bf59c1ccb6..ba66b8904b 100644 --- a/script/get-cuda-devices/customize.py +++ b/script/get-cuda-devices/customize.py @@ -2,14 +2,17 @@ import os import subprocess + def preprocess(i): env = i['env'] - if str(env.get('CM_DETECT_USING_PYCUDA', '')).lower() in [ "1", "yes", "true"]: + if str(env.get('CM_DETECT_USING_PYCUDA', '') + ).lower() in ["1", "yes", "true"]: i['run_script_input']['script_name'] = 'detect' - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -19,9 +22,10 @@ def postprocess(i): os_info = i['os_info'] r = utils.load_txt(file_name='tmp-run.out', - check_if_exists = True, - split = True) - if r['return']>0: return r + check_if_exists=True, + split=True) + if r['return'] > 0: + return r lst = r['list'] @@ -32,16 +36,16 @@ def postprocess(i): gpu_id = -1 for line in lst: - #print (line) + # print (line) j = line.find(':') - if j>=0: + if j >= 0: key = line[:j].strip() - val = line[j+1:].strip() + val = line[j + 1:].strip() if key == "GPU Device ID": - gpu_id+=1 + gpu_id += 1 gpu[gpu_id] = {} if gpu_id < 0: @@ -50,7 +54,7 @@ def postprocess(i): gpu[gpu_id][key] = val p[key] = val - key_env = 'CM_CUDA_DEVICE_PROP_'+key.upper().replace(' ','_') + key_env = 'CM_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_') env[key_env] = val state['cm_cuda_num_devices'] = gpu_id + 1 @@ -59,4 +63,4 @@ def postprocess(i): state['cm_cuda_device_prop'] = p state['cm_cuda_devices_prop'] = gpu - return {'return':0} + return {'return': 0} diff --git a/script/get-cuda-devices/detect.py b/script/get-cuda-devices/detect.py index 2b9a3383cb..6603cc9f46 100644 --- a/script/get-cuda-devices/detect.py +++ b/script/get-cuda-devices/detect.py @@ -1,6 +1,7 @@ import pycuda.driver as cuda import pycuda.autoinit + def get_gpu_info(): num_gpus = cuda.Device.count() all_gpu_info = [] @@ -39,7 +40,7 @@ def get_gpu_info(): # Print the GPU information for all available GPUs if __name__ == "__main__": gpu_info_list = get_gpu_info() - with open ("tmp-run.out", "w") as f: + with open("tmp-run.out", "w") as f: for idx, gpu_info in enumerate(gpu_info_list): print(f"GPU {idx}:") for key, value in gpu_info.items(): diff --git a/script/get-cuda/customize.py b/script/get-cuda/customize.py index 7bae3eb392..925e1e8b94 100644 --- a/script/get-cuda/customize.py +++ b/script/get-cuda/customize.py @@ -2,13 +2,14 @@ import os import json + def preprocess(i): os_info = i['os_info'] env = i['env'] - if str(env.get('CUDA_SKIP_SUDO','')).lower() == 'true': + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': env['CM_SUDO'] = '' recursion_spaces = i['recursion_spaces'] @@ -16,10 +17,12 @@ def preprocess(i): if os_info['platform'] == 'windows': file_name = env['CM_TMP_FILE_TO_CHECK_WINDOWS'] - if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" paths = [] - for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", + "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: if os.path.isdir(path): dirs = os.listdir(path) for dr in dirs: @@ -27,9 +30,9 @@ def preprocess(i): if os.path.isdir(path2): paths.append(path2) - if len(paths)>0: + if len(paths) > 0: tmp_paths = ';'.join(paths) - tmp_paths += ';'+os.environ.get('PATH','') + tmp_paths += ';' + os.environ.get('PATH', '') env['CM_TMP_PATH'] = tmp_paths env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' @@ -40,11 +43,13 @@ def preprocess(i): # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': system_path = os.environ.get('PATH') if system_path: system_path = system_path + ":" - env['CM_TMP_PATH'] = system_path + '/usr/local/cuda/bin:/usr/cuda/bin:/usr/local/cuda-11/bin:/usr/cuda-11/bin:/usr/local/cuda-12/bin:/usr/cuda-12/bin:/usr/local/packages/cuda' + env['CM_TMP_PATH'] = system_path + \ + '/usr/local/cuda/bin:/usr/cuda/bin:/usr/local/cuda-11/bin:/usr/cuda-11/bin:/usr/local/cuda-12/bin:/usr/cuda-12/bin:/usr/local/packages/cuda' env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": @@ -58,13 +63,13 @@ def preprocess(i): if env_key not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': path_env_key, - 'detect_version':True, - 'env_path_key':env_key, - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': env_key, + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if os_info['platform'] == 'windows': return r @@ -74,7 +79,7 @@ def preprocess(i): else: return r - return {'return':0} + return {'return': 0} def detect_version(i): @@ -84,25 +89,28 @@ def detect_version(i): else: return detect_version_cuda_lib(i) + def detect_version_nvcc(i): r = i['automation'].parse_version({'match_text': r'release\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_CUDA_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_CUDA_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def detect_version_cuda_lib(i): env = i['env'] print(env) cuda_rt_file_path = env['CM_CUDA_RT_WITH_PATH'] - cuda_lib_path=os.path.dirname(cuda_rt_file_path) + cuda_lib_path = os.path.dirname(cuda_rt_file_path) cuda_path = os.path.abspath(os.path.join(cuda_lib_path, os.pardir)) cuda_version = "version-missing" @@ -115,14 +123,12 @@ def detect_version_cuda_lib(i): if cuda_version_info: cuda_version = cuda_version_info.get('version') - env['CM_CUDA_VERSION'] = cuda_version version = cuda_version - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} def postprocess(i): @@ -132,10 +138,11 @@ def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] - env['CM_CUDA_CACHE_TAGS'] = 'version-'+version + env['CM_CUDA_CACHE_TAGS'] = 'version-' + version found_file_path = env[env['CM_TMP_ENV_KEY']] @@ -149,12 +156,13 @@ def postprocess(i): env['CM_NVCC_BIN'] = os.path.basename(found_file_path) else: - parent_path = os.path.dirname(found_file_path) #We traverse backwards until we find a path with include dir + # We traverse backwards until we find a path with include dir + parent_path = os.path.dirname(found_file_path) env['CM_CUDA_PATH_LIB'] = parent_path parent_path = os.path.dirname(parent_path) while os.path.isdir(parent_path): if os.path.exists(os.path.join(parent_path, "include")): - print("Path is "+parent_path) + print("Path is " + parent_path) found_path = parent_path cuda_path = found_path env['CM_CUDA_INSTALLED_PATH'] = cuda_path @@ -163,10 +171,11 @@ def postprocess(i): parent_path = os.path.dirname(parent_path) if 'CM_CUDA_INSTALLED_PATH' not in env: - return {'return': 1, 'error': "No CUDA installation path with an include directory is found"} + return { + 'return': 1, 'error': "No CUDA installation path with an include directory is found"} - env['CUDA_HOME']=cuda_path - env['CUDA_PATH']=cuda_path + env['CUDA_HOME'] = cuda_path + env['CUDA_PATH'] = cuda_path cuda_system_path_install = False system_path = os.environ.get('PATH') @@ -174,10 +183,11 @@ def postprocess(i): cuda_system_path_install = True # Check extra paths - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: env[key] = [] - ## Include + # Include cuda_path_include = os.path.join(cuda_path, 'include') if os.path.isdir(cuda_path_include): if os_info['platform'] != 'windows' and not cuda_system_path_install: @@ -186,11 +196,11 @@ def postprocess(i): env['CM_CUDA_PATH_INCLUDE'] = cuda_path_include - ## Lib + # Lib if os_info['platform'] == 'windows': - extra_dir='x64' + extra_dir = 'x64' else: - extra_dir='' + extra_dir = '' for d in ['lib64', 'lib']: cuda_path_lib = os.path.join(cuda_path, d) @@ -210,10 +220,12 @@ def postprocess(i): env['+ LDFLAGS'] = [] if 'CM_CUDA_PATH_LIB' in env and not cuda_system_path_install: x = env['CM_CUDA_PATH_LIB'] - if ' ' in x: x='"'+x+'"' - env['+ LDFLAGS'].append("-L"+x) + if ' ' in x: + x = '"' + x + '"' + env['+ LDFLAGS'].append("-L" + x) - env['CM_CUDA_VERSION_STRING'] = "cu"+env['CM_CUDA_VERSION'].replace(".", "") + env['CM_CUDA_VERSION_STRING'] = "cu" + \ + env['CM_CUDA_VERSION'].replace(".", "") env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5'] = env['CM_CUDA_VERSION_STRING'] - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index 9f02b2cb9f..d4f3f53a6d 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -3,6 +3,7 @@ import tarfile import shutil + def preprocess(i): recursion_spaces = i['recursion_spaces'] @@ -13,20 +14,19 @@ def preprocess(i): env['CM_TMP_RUN_COPY_SCRIPT'] = "no" - # If TAR file is not explicitly specified, search - if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': + if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '': cuda_path_lib = env.get('CM_CUDA_PATH_LIB') if os_info['platform'] == 'windows': - extra_pre='' - extra_ext='lib' + extra_pre = '' + extra_ext = 'lib' else: - extra_pre='lib' - extra_ext='so' + extra_pre = 'lib' + extra_ext = 'so' - libfilename = extra_pre + 'cudnn.' +extra_ext + libfilename = extra_pre + 'cudnn.' + extra_ext env['CM_CUDNN_VERSION'] = 'vdetected' if os.path.exists(os.path.join(cuda_path_lib, libfilename)): @@ -39,12 +39,14 @@ def preprocess(i): env['CM_CUDA_PATH_LIB_CUDNN'] = path return {'return': 0} - if env.get('CM_INPUT','').strip()=='': + if env.get('CM_INPUT', '').strip() == '': if os_info['platform'] == 'windows': - if env.get('CM_TMP_PATH','').strip()=='': - # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + if env.get('CM_TMP_PATH', '').strip() == '': + # Check in "C:\Program Files\NVIDIA GPU Computing + # Toolkit\CUDA" paths = [] - for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", + "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: if os.path.isdir(path): dirs = os.listdir(path) for dr in dirs: @@ -52,9 +54,9 @@ def preprocess(i): if os.path.isdir(path2): paths.append(path2) - if len(paths)>0: + if len(paths) > 0: tmp_paths = ';'.join(paths) - tmp_paths += ';'+os.environ.get('PATH','') + tmp_paths += ';' + os.environ.get('PATH', '') env['CM_TMP_PATH'] = tmp_paths env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' @@ -63,26 +65,27 @@ def preprocess(i): # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - cm_tmp_path = env.get('CM_TMP_PATH','').strip() - if cm_tmp_path!='': - cm_tmp_path+=':' - cm_tmp_path+='/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' + cm_tmp_path = env.get('CM_TMP_PATH', '').strip() + if cm_tmp_path != '': + cm_tmp_path += ':' + cm_tmp_path += '/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' env['CM_TMP_PATH'] = cm_tmp_path env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): - if(os.path.exists(lib_path)): - env['CM_TMP_PATH']+=':'+lib_path + for lib_path in env.get( + '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if (os.path.exists(lib_path)): + env['CM_TMP_PATH'] += ':' + lib_path r = i['automation'].find_artifact({'file_name': libfilename, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'LD_LIBRARY_PATH', - 'detect_version':False, - 'env_path_key':'CM_CUDA_PATH_LIB_CUDNN', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': False, + 'env_path_key': 'CM_CUDA_PATH_LIB_CUDNN', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if os_info['platform'] == 'windows': return r @@ -93,14 +96,15 @@ def preprocess(i): else: # On Linux we may detected file instead of path to cudnn if os.path.isfile(env['CM_CUDA_PATH_LIB_CUDNN']): - env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname(env['CM_CUDA_PATH_LIB_CUDNN']) + env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname( + env['CM_CUDA_PATH_LIB_CUDNN']) - return {'return':0} + return {'return': 0} - if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': + if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '': return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} - print ('Untaring file - can take some time ...') + print('Untaring file - can take some time ...') my_tar = tarfile.open(os.path.expanduser(env['CM_CUDNN_TAR_FILE_PATH'])) folder_name = my_tar.getnames()[0] @@ -111,7 +115,8 @@ def preprocess(i): import re version_match = re.match(r'cudnn-.*?-(\d+.\d+.\d+.\d+)', folder_name) if not version_match: - return {'return': 1, 'error': 'Extracted CUDNN folder does not seem proper - Version information missing'} + return { + 'return': 1, 'error': 'Extracted CUDNN folder does not seem proper - Version information missing'} version = version_match.group(1) env['CM_CUDNN_VERSION'] = version @@ -123,17 +128,19 @@ def preprocess(i): env['CM_CUDA_PATH_INCLUDE_CUDNN'] = env['CM_CUDA_PATH_INCLUDE'] try: - print("Copying cudnn include files to {}(CUDA_INCLUDE_PATH)".format(cuda_inc_path)) - shutil.copytree(inc_path, cuda_inc_path, dirs_exist_ok = True) + print( + "Copying cudnn include files to {}(CUDA_INCLUDE_PATH)".format(cuda_inc_path)) + shutil.copytree(inc_path, cuda_inc_path, dirs_exist_ok=True) print("Copying cudnn lib files to {}CUDA_LIB_PATH".format(cuda_lib_path)) - shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok = True) - except: - #Need to copy to system path via run.sh + shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok=True) + except BaseException: + # Need to copy to system path via run.sh env['CM_TMP_RUN_COPY_SCRIPT'] = "yes" env['CM_TMP_INC_PATH'] = inc_path env['CM_TMP_LIB_PATH'] = lib_path - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -144,11 +151,11 @@ def postprocess(i): version = env['CM_CUDNN_VERSION'] if version == 'vdetected': - path_to_cudnn = env.get('CM_CUDA_PATH_LIB_CUDNN','') + path_to_cudnn = env.get('CM_CUDA_PATH_LIB_CUDNN', '') if os.path.isdir(path_to_cudnn): path_to_include = path_to_cudnn path_to_include_file = '' - for j in range(0,2): + for j in range(0, 2): path_to_include = os.path.dirname(path_to_include) x = os.path.join(path_to_include, 'include', 'cudnn_version.h') if os.path.isfile(x): @@ -156,12 +163,13 @@ def postprocess(i): break if path_to_include_file == '' and path_to_cudnn.startswith('/lib'): - x = os.path.join('/usr','include','cudnn_version.h') + x = os.path.join('/usr', 'include', 'cudnn_version.h') if os.path.isfile(x): path_to_include_file = x if path_to_include_file != '': - env['CM_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname(path_to_include_file) + env['CM_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname( + path_to_include_file) r = utils.load_txt(path_to_include_file, split=True) if r['return'] == 0: @@ -170,24 +178,24 @@ def postprocess(i): xversion = '' for l in lst: - l=l.strip() + l = l.strip() x = '#define CUDNN_MAJOR ' if l.startswith(x): - xversion=l[len(x):] + xversion = l[len(x):] x = '#define CUDNN_MINOR ' if l.startswith(x): - xversion+='.'+l[len(x):] + xversion += '.' + l[len(x):] x = '#define CUDNN_PATCHLEVEL ' if l.startswith(x): - xversion+='.'+l[len(x):] + xversion += '.' + l[len(x):] if xversion != '': version = xversion env['CM_CUDNN_VERSION'] = xversion - env['CM_CUDA_PATH_LIB_CUDNN_EXISTS']='yes' + env['CM_CUDA_PATH_LIB_CUDNN_EXISTS'] = 'yes' - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-dataset-cifar10/customize.py b/script/get-dataset-cifar10/customize.py index b624357402..d000dc407d 100644 --- a/script/get-dataset-cifar10/customize.py +++ b/script/get-dataset-cifar10/customize.py @@ -2,15 +2,17 @@ import os import shutil + def preprocess(i): env = i['env'] return {'return': 0} + def postprocess(i): env = i['env'] - variation_tags = i.get('variation_tags',[]) + variation_tags = i.get('variation_tags', []) return {'return': 0} diff --git a/script/get-dataset-cnndm/customize.py b/script/get-dataset-cnndm/customize.py index 27363d8000..25966d8505 100644 --- a/script/get-dataset-cnndm/customize.py +++ b/script/get-dataset-cnndm/customize.py @@ -2,6 +2,7 @@ import os import shutil + def preprocess(i): env = i['env'] @@ -9,21 +10,27 @@ def preprocess(i): if env.get('CM_CNNDM_INTEL_VARIATION', '') == 'yes': i['run_script_input']['script_name'] = "run-intel" else: - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") return {'return': 0} + def postprocess(i): env = i['env'] - if env.get('CM_DATASET_CALIBRATION','') == "no": + if env.get('CM_DATASET_CALIBRATION', '') == "no": env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install') - env['CM_DATASET_EVAL_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_eval.json') - env['CM_DATASET_CNNDM_EVAL_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_eval.json') + env['CM_DATASET_EVAL_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_eval.json') + env['CM_DATASET_CNNDM_EVAL_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_eval.json') env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PATH'] else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_dailymail_calibration.json') - env['CM_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join(os.getcwd(), 'install', 'cnn_dailymail_calibration.json') + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_dailymail_calibration.json') + env['CM_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_dailymail_calibration.json') env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CALIBRATION_DATASET_PATH'] return {'return': 0} diff --git a/script/get-dataset-coco/customize.py b/script/get-dataset-coco/customize.py index 02fa5289bd..60e4a3ae96 100644 --- a/script/get-dataset-coco/customize.py +++ b/script/get-dataset-coco/customize.py @@ -2,10 +2,11 @@ import os import shutil + def preprocess(i): # CM script internal variables - variation_tags = i.get('variation_tags',[]) + variation_tags = i.get('variation_tags', []) automation = i['automation'] env = i['env'] meta = i['meta'] @@ -13,16 +14,17 @@ def preprocess(i): # Check if path is there to detect existing data set detected = False - path = env.get('CM_TMP_PATH','') - if path!='': + path = env.get('CM_TMP_PATH', '') + if path != '': if not os.path.isdir(path): - return {'return':1, 'error':'path to dataset "{}" doesn\'t exist'.format(path)} + return {'return': 1, + 'error': 'path to dataset "{}" doesn\'t exist'.format(path)} # Check which dataset p = os.path.join(path, 'annotations') if os.path.isdir(p): - for d in [('val2017','val','2017'), - ('train2017','train','2017')]: + for d in [('val2017', 'val', '2017'), + ('train2017', 'train', '2017')]: p = os.path.join(path, d[0]) if os.path.isdir(p): @@ -32,10 +34,11 @@ def preprocess(i): break if not detected: - return {'return':1, 'error':'COCO dataset is not detected in "{}"'.format(path)} + return { + 'return': 1, 'error': 'COCO dataset is not detected in "{}"'.format(path)} - print ('') - print ('Detected COCO dataset {} {}'.format(tp,ver)) + print('') + print('Detected COCO dataset {} {}'.format(tp, ver)) env['CM_DATASET_COCO_DETECTED'] = 'yes' env['CM_DATASET_COCO_PATH'] = path @@ -44,15 +47,17 @@ def preprocess(i): tp = env['CM_DATASET_COCO_TYPE'] # Prepare URL - size=env.get('CM_DATASET_COCO_SIZE','') - if size=='small' and tp=='val' and ver=='2017': + size = env.get('CM_DATASET_COCO_SIZE', '') + if size == 'small' and tp == 'val' and ver == '2017': # We prepared a small version with 50 images for val 2017 filename_data = 'val2017_small.zip' filename_annotation = 'annotations_val2017_small.zip' - url_data_full = 'https://www.dropbox.com/scl/fi/whokyb7b7hyjqqotruyqb/{}?rlkey=hhgt4xtir91ej0nro6h69l22s&dl=0'.format(filename_data) - url_ann_full = 'https://www.dropbox.com/scl/fi/bu41y62v9zqhee8w7q6z3/{}?rlkey=seqtgozldkc0ztu76kbd47p5w&dl=0'.format(filename_annotation) + url_data_full = 'https://www.dropbox.com/scl/fi/whokyb7b7hyjqqotruyqb/{}?rlkey=hhgt4xtir91ej0nro6h69l22s&dl=0'.format( + filename_data) + url_ann_full = 'https://www.dropbox.com/scl/fi/bu41y62v9zqhee8w7q6z3/{}?rlkey=seqtgozldkc0ztu76kbd47p5w&dl=0'.format( + filename_annotation) else: url_data = env['CM_DATASET_COCO_URL_DATA'] @@ -66,52 +71,54 @@ def preprocess(i): # Add extra tags with type and version to "download-and-extract" deps to be able to reuse them # Add "from" and "to" to "download-and-extract" deps - download_extra_cache_tags='dataset,coco,data,'+tp+','+ver + download_extra_cache_tags = 'dataset,coco,data,' + tp + ',' + ver dae_input_data = { - 'extra_cache_tags':download_extra_cache_tags + 'extra_cache_tags': download_extra_cache_tags } dae_input_annotation = { - 'extra_cache_tags':download_extra_cache_tags + 'extra_cache_tags': download_extra_cache_tags } path_from = env.get('CM_FROM', '') - if path_from!='': + if path_from != '': path_from_data = os.path.join(path_from, filename_data) if not os.path.isfile(path_from_data): - return {'return':1, 'error':'File {} not found'.format(path_from_data)} + return {'return': 1, + 'error': 'File {} not found'.format(path_from_data)} dae_input_data['local_path'] = path_from_data path_from_annotation = os.path.join(path_from, filename_annotation) if not os.path.isfile(path_from_annotation): - return {'return':1, 'error':'File {} not found'.format(path_from_annotation)} + return {'return': 1, 'error': 'File {} not found'.format( + path_from_annotation)} dae_input_annotation['local_path'] = path_from_annotation path_to = env.get('CM_TO', '') - if path_to!='': + if path_to != '': dae_input_data['extract_path'] = path_to dae_input_annotation['extract_path'] = path_to path_store = env.get('CM_STORE', '') - if path_store!='': + if path_store != '': dae_input_data['download_path'] = path_store dae_input_data['tags'] = '_keep' dae_input_annotation['download_path'] = path_store dae_input_annotation['tags'] = '_keep' - - r = automation.update_deps({'deps':meta['prehook_deps'], - 'update_deps':{ - '746e5dad5e784ad6': dae_input_data, - 'edb6cd092ff64171': dae_input_annotation - } - }) - if r['return']>0: return r + r = automation.update_deps({'deps': meta['prehook_deps'], + 'update_deps': { + '746e5dad5e784ad6': dae_input_data, + 'edb6cd092ff64171': dae_input_annotation + } + }) + if r['return'] > 0: + return r # Prepare environment variables env['CM_DATASET_COCO_VERSION'] = ver env['CM_DATASET_COCO_TYPE'] = tp - env['CM_DATASET_COCO_TYPE_AND_VERSION'] = tp+ver + env['CM_DATASET_COCO_TYPE_AND_VERSION'] = tp + ver env['CM_DATASET_COCO_URL_DATA_FULL'] = url_data_full env['CM_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full @@ -134,9 +141,9 @@ def preprocess(i): env['CM_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann if not detected: - print ('') - print ('URL for data: {}'.format(url_data_full)) - print ('URL for annotations: {}'.format(url_ann_full)) + print('') + print('URL for data: {}'.format(url_data_full)) + print('URL for annotations: {}'.format(url_ann_full)) # Add version and type to tags extra_cache_tags = [] @@ -144,7 +151,8 @@ def preprocess(i): if tag not in variation_tags: extra_cache_tags.append(tag) - return {'return':0, 'add_extra_cache_tags':extra_cache_tags} + return {'return': 0, 'add_extra_cache_tags': extra_cache_tags} + def postprocess(i): @@ -154,14 +162,16 @@ def postprocess(i): tp_ver = env['CM_DATASET_COCO_TYPE_AND_VERSION'] - path_to = env.get('CM_TO','') + path_to = env.get('CM_TO', '') # Check if detected or downloaded - if env.get('CM_DATASET_COCO_DETECTED', '').lower() == 'yes' or path_to!='': - path_all = env['CM_DATASET_COCO_PATH'] if path_to=='' else path_to + if env.get('CM_DATASET_COCO_DETECTED', + '').lower() == 'yes' or path_to != '': + path_all = env['CM_DATASET_COCO_PATH'] if path_to == '' else path_to env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) - env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(path_all, 'annotations') + env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( + path_all, 'annotations') else: path_all = os.getcwd() @@ -170,9 +180,9 @@ def postprocess(i): path_data = env['CM_DATASET_COCO_DATA_PATH'] path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH'] - print ('') - print (path_all) - print ('') + print('') + print(path_all) + print('') path_data_full = os.path.join(path_data, tp_ver) path_ann_full = os.path.join(path_ann, 'annotations') @@ -183,7 +193,8 @@ def postprocess(i): command2 = ' move /y ' + path_ann_full + ' annotations' env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) - env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(path_all, 'annotations') + env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( + path_all, 'annotations') else: # Make soft links from data and annotations into 1 directory # (standard way for COCO) @@ -192,10 +203,9 @@ def postprocess(i): command2 = ' ln -s ' + path_ann_full + ' annotations' for command in [command1, command2]: - print (command) + print(command) os.system(command) - env['CM_DATASET_COCO_PATH'] = path_all env['CM_DATASET_PATH'] = path_all env['CM_DATASET_PATH_ROOT'] = path_all diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py index 77fe3c8b0b..e231126a52 100644 --- a/script/get-dataset-coco2014/customize.py +++ b/script/get-dataset-coco2014/customize.py @@ -2,29 +2,39 @@ import os import shutil + def preprocess(i): env = i['env'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") - run_dir = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools") env['CM_RUN_DIR'] = run_dir return {'return': 0} + def postprocess(i): env = i['env'] if env.get('CM_GENERATE_SAMPLE_ID', '') == "yes": - env['CM_COCO2014_SAMPLE_ID_PATH'] = os.path.join(os.getcwd(), 'install', 'sample_ids.txt') + env['CM_COCO2014_SAMPLE_ID_PATH'] = os.path.join( + os.getcwd(), 'install', 'sample_ids.txt') print(env['CM_COCO2014_SAMPLE_ID_PATH']) - if env.get('CM_DATASET_CALIBRATION','') == "no": + if env.get('CM_DATASET_CALIBRATION', '') == "no": env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') - #env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') - env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'captions') - env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'latents') + # env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') + env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join( + os.getcwd(), 'install', 'captions') + env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join( + os.getcwd(), 'install', 'latents') else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') return {'return': 0} diff --git a/script/get-dataset-imagenet-helper/customize.py b/script/get-dataset-imagenet-helper/customize.py index 08de452865..b9dd841f57 100644 --- a/script/get-dataset-imagenet-helper/customize.py +++ b/script/get-dataset-imagenet-helper/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def postprocess(i): env = i['env'] script_path = env['CM_TMP_CURRENT_SCRIPT_PATH'] env['CM_DATASET_IMAGENET_HELPER_PATH'] = script_path - env['+PYTHONPATH'] = [ script_path ] + env['+PYTHONPATH'] = [script_path] - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py index d28c502fc5..aa90deefd8 100644 --- a/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py +++ b/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py @@ -4,65 +4,84 @@ import numpy as np -## Processing in batches: +# Processing in batches: # -BATCH_SIZE = int(os.getenv('CM_BATCH_SIZE', 1)) +BATCH_SIZE = int(os.getenv('CM_BATCH_SIZE', 1)) -## Model properties: +# Model properties: # -MODEL_IMAGE_HEIGHT = int(os.getenv('CM_ML_MODEL_IMAGE_HEIGHT', - os.getenv('CM_ONNX_MODEL_IMAGE_HEIGHT', - os.getenv('CM_TENSORFLOW_MODEL_IMAGE_HEIGHT', - '')))) -MODEL_IMAGE_WIDTH = int(os.getenv('CM_ML_MODEL_IMAGE_WIDTH', - os.getenv('CM_ONNX_MODEL_IMAGE_WIDTH', - os.getenv('CM_TENSORFLOW_MODEL_IMAGE_WIDTH', - '')))) -MODEL_IMAGE_CHANNELS = int(os.getenv('CM_ML_MODEL_IMAGE_CHANNELS', 3)) -MODEL_DATA_LAYOUT = os.getenv('CM_ML_MODEL_DATA_LAYOUT', 'NCHW') -MODEL_COLOURS_BGR = os.getenv('CM_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ('YES', 'yes', 'ON', 'on', '1') -MODEL_INPUT_DATA_TYPE = os.getenv('CM_ML_MODEL_INPUT_DATA_TYPE', 'float32') -MODEL_DATA_TYPE = os.getenv('CM_ML_MODEL_DATA_TYPE', '(unknown)') -MODEL_USE_DLA = os.getenv('CM_ML_MODEL_USE_DLA', 'NO') in ('YES', 'yes', 'ON', 'on', '1') -MODEL_MAX_BATCH_SIZE = int(os.getenv('CM_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE)) - - -## Internal processing: +MODEL_IMAGE_HEIGHT = int(os.getenv('CM_ML_MODEL_IMAGE_HEIGHT', + os.getenv('CM_ONNX_MODEL_IMAGE_HEIGHT', + os.getenv('CM_TENSORFLOW_MODEL_IMAGE_HEIGHT', + '')))) +MODEL_IMAGE_WIDTH = int(os.getenv('CM_ML_MODEL_IMAGE_WIDTH', + os.getenv('CM_ONNX_MODEL_IMAGE_WIDTH', + os.getenv('CM_TENSORFLOW_MODEL_IMAGE_WIDTH', + '')))) +MODEL_IMAGE_CHANNELS = int(os.getenv('CM_ML_MODEL_IMAGE_CHANNELS', 3)) +MODEL_DATA_LAYOUT = os.getenv('CM_ML_MODEL_DATA_LAYOUT', 'NCHW') +MODEL_COLOURS_BGR = os.getenv( + 'CM_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ( + 'YES', 'yes', 'ON', 'on', '1') +MODEL_INPUT_DATA_TYPE = os.getenv('CM_ML_MODEL_INPUT_DATA_TYPE', 'float32') +MODEL_DATA_TYPE = os.getenv('CM_ML_MODEL_DATA_TYPE', '(unknown)') +MODEL_USE_DLA = os.getenv( + 'CM_ML_MODEL_USE_DLA', + 'NO') in ( + 'YES', + 'yes', + 'ON', + 'on', + '1') +MODEL_MAX_BATCH_SIZE = int(os.getenv('CM_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE)) + + +# Internal processing: # -INTERMEDIATE_DATA_TYPE = np.float32 # default for internal conversion -#INTERMEDIATE_DATA_TYPE = np.int8 # affects the accuracy a bit +INTERMEDIATE_DATA_TYPE = np.float32 # default for internal conversion +# INTERMEDIATE_DATA_TYPE = np.int8 # affects the accuracy a bit -## Image normalization: +# Image normalization: # -MODEL_NORMALIZE_DATA = os.getenv('CM_ML_MODEL_NORMALIZE_DATA') in ('YES', 'yes', 'ON', 'on', '1') -MODEL_NORMALIZE_LOWER = float(os.getenv('CM_ML_MODEL_NORMALIZE_LOWER', -1.0)) -MODEL_NORMALIZE_UPPER = float(os.getenv('CM_ML_MODEL_NORMALIZE_UPPER', 1.0)) -SUBTRACT_MEAN = os.getenv('CM_ML_MODEL_SUBTRACT_MEANS', 'YES') in ('YES', 'yes', 'ON', 'on', '1') -GIVEN_CHANNEL_MEANS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_MEANS', '') +MODEL_NORMALIZE_DATA = os.getenv('CM_ML_MODEL_NORMALIZE_DATA') in ( + 'YES', 'yes', 'ON', 'on', '1') +MODEL_NORMALIZE_LOWER = float(os.getenv('CM_ML_MODEL_NORMALIZE_LOWER', -1.0)) +MODEL_NORMALIZE_UPPER = float(os.getenv('CM_ML_MODEL_NORMALIZE_UPPER', 1.0)) +SUBTRACT_MEAN = os.getenv( + 'CM_ML_MODEL_SUBTRACT_MEANS', 'YES') in ( + 'YES', 'yes', 'ON', 'on', '1') +GIVEN_CHANNEL_MEANS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_MEANS', '') if GIVEN_CHANNEL_MEANS: - GIVEN_CHANNEL_MEANS = np.fromstring(GIVEN_CHANNEL_MEANS, dtype=np.float32, sep=' ').astype(INTERMEDIATE_DATA_TYPE) + GIVEN_CHANNEL_MEANS = np.fromstring( + GIVEN_CHANNEL_MEANS, + dtype=np.float32, + sep=' ').astype(INTERMEDIATE_DATA_TYPE) if MODEL_COLOURS_BGR: - GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1] # swapping Red and Blue colour channels + # swapping Red and Blue colour channels + GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1] -GIVEN_CHANNEL_STDS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_STDS', '') +GIVEN_CHANNEL_STDS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_STDS', '') if GIVEN_CHANNEL_STDS: - GIVEN_CHANNEL_STDS = np.fromstring(GIVEN_CHANNEL_STDS, dtype=np.float32, sep=' ').astype(INTERMEDIATE_DATA_TYPE) + GIVEN_CHANNEL_STDS = np.fromstring( + GIVEN_CHANNEL_STDS, + dtype=np.float32, + sep=' ').astype(INTERMEDIATE_DATA_TYPE) if MODEL_COLOURS_BGR: - GIVEN_CHANNEL_STDS = GIVEN_CHANNEL_STDS[::-1] # swapping Red and Blue colour channels + # swapping Red and Blue colour channels + GIVEN_CHANNEL_STDS = GIVEN_CHANNEL_STDS[::-1] - -## ImageNet dataset properties: +# ImageNet dataset properties: # -LABELS_PATH = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] +LABELS_PATH = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] -## Preprocessed input images' properties: +# Preprocessed input images' properties: # -IMAGE_DIR = os.getenv('CM_DATASET_PREPROCESSED_PATH') -IMAGE_DATA_TYPE = os.getenv('CM_DATASET_PREPROCESSED_DATA_TYPE', 'float32') +IMAGE_DIR = os.getenv('CM_DATASET_PREPROCESSED_PATH') +IMAGE_DATA_TYPE = os.getenv('CM_DATASET_PREPROCESSED_DATA_TYPE', 'float32') def load_labels(labels_filepath): @@ -72,6 +91,7 @@ def load_labels(labels_filepath): my_labels.append(l.strip()) return my_labels + class_labels = load_labels(LABELS_PATH) @@ -82,41 +102,45 @@ def load_labels(labels_filepath): if image_file.endswith('.npy'): image_list.append(image_file) + def load_image_by_index_and_normalize(image_index): img_file = os.path.join(IMAGE_DIR, image_list[image_index]) img = np.fromfile(img_file, np.dtype(IMAGE_DATA_TYPE)) - #img = img.reshape((1,MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, 3)) - img.resize(224*224*3) - img = img.reshape((MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, MODEL_IMAGE_CHANNELS)) + # img = img.reshape((1,MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, 3)) + img.resize(224 * 224 * 3) + img = img.reshape( + (MODEL_IMAGE_HEIGHT, + MODEL_IMAGE_WIDTH, + MODEL_IMAGE_CHANNELS)) if MODEL_COLOURS_BGR: - img = img[...,::-1] # swapping Red and Blue colour channels + img = img[..., ::-1] # swapping Red and Blue colour channels if IMAGE_DATA_TYPE != 'float32': img = img.astype(np.float32) # Normalize if MODEL_NORMALIZE_DATA: - img /= (255.0/(MODEL_NORMALIZE_UPPER-MODEL_NORMALIZE_LOWER)) + img /= (255.0 / (MODEL_NORMALIZE_UPPER - MODEL_NORMALIZE_LOWER)) img += MODEL_NORMALIZE_LOWER # Subtract mean value if len(GIVEN_CHANNEL_MEANS): img -= GIVEN_CHANNEL_MEANS elif SUBTRACT_MEAN: - img -= np.mean(img, axis=(0,1), keepdims=True) + img -= np.mean(img, axis=(0, 1), keepdims=True) if len(GIVEN_CHANNEL_STDS): img /= GIVEN_CHANNEL_STDS - if MODEL_INPUT_DATA_TYPE == 'int8' or INTERMEDIATE_DATA_TYPE==np.int8: + if MODEL_INPUT_DATA_TYPE == 'int8' or INTERMEDIATE_DATA_TYPE == np.int8: img = np.clip(img, -128, 127).astype(INTERMEDIATE_DATA_TYPE) if MODEL_DATA_LAYOUT == 'NCHW': - img = img.transpose(2,0,1) + img = img.transpose(2, 0, 1) elif MODEL_DATA_LAYOUT == 'CHW4': - img = np.pad(img, ((0,0), (0,0), (0,1)), 'constant') + img = np.pad(img, ((0, 0), (0, 0), (0, 1)), 'constant') # Add img to batch return img.astype(MODEL_INPUT_DATA_TYPE) @@ -127,13 +151,15 @@ def load_preprocessed_batch(image_list, image_index): for in_batch_idx in range(BATCH_SIZE): img = load_image_by_index_and_normalize(image_index) if batch_data is None: - batch_data = np.empty( (BATCH_SIZE, *img.shape), dtype=MODEL_INPUT_DATA_TYPE) + batch_data = np.empty( + (BATCH_SIZE, *img.shape), dtype=MODEL_INPUT_DATA_TYPE) batch_data[in_batch_idx] = img image_index += 1 - #print('Data shape: {}'.format(batch_data.shape)) + # print('Data shape: {}'.format(batch_data.shape)) - if MODEL_USE_DLA and MODEL_MAX_BATCH_SIZE>len(batch_data): - return np.pad(batch_data, ((0,MODEL_MAX_BATCH_SIZE-len(batch_data)), (0,0), (0,0), (0,0)), 'constant'), image_index + if MODEL_USE_DLA and MODEL_MAX_BATCH_SIZE > len(batch_data): + return np.pad(batch_data, ((0, MODEL_MAX_BATCH_SIZE - len(batch_data)), + (0, 0), (0, 0), (0, 0)), 'constant'), image_index else: return batch_data, image_index diff --git a/script/get-dataset-imagenet-train/customize.py b/script/get-dataset-imagenet-train/customize.py index 2eba2b9b15..b1f8aea1ea 100644 --- a/script/get-dataset-imagenet-train/customize.py +++ b/script/get-dataset-imagenet-train/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -10,7 +11,7 @@ def preprocess(i): meta = i['meta'] os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':0} + return {'return': 0} env['CM_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no' @@ -23,30 +24,31 @@ def preprocess(i): env['CM_DAE_TORRENT_PATH'] = path env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' - return {'return':0} + return {'return': 0} else: - return {'return':1, 'error':'Please rerun the last CM command with --env.IMAGENET_TRAIN_PATH={path the folder containing full ImageNet training images} or envoke cm run script "get train dataset imagenet" --input={path to the folder containing ImageNet training images}'} - + return {'return': 1, 'error': 'Please rerun the last CM command with --env.IMAGENET_TRAIN_PATH={path the folder containing full ImageNet training images} or envoke cm run script "get train dataset imagenet" --input={path to the folder containing ImageNet training images}'} elif not os.path.isdir(path): if path.endswith(".tar"): - #env['CM_DAE_FILEPATH'] = path + # env['CM_DAE_FILEPATH'] = path env['CM_EXTRACT_FILEPATH'] = path env['CM_DAE_ONLY_EXTRACT'] = 'yes' - return {'return':0} + return {'return': 0} else: - return {'return':1, 'error':'Path {} doesn\'t exist'.format(path)} + return {'return': 1, + 'error': 'Path {} doesn\'t exist'.format(path)} else: env['CM_EXTRACT_EXTRACTED_PATH'] = path - return {'return':0} + return {'return': 0} + def postprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':0} + return {'return': 0} env = i['env'] @@ -55,12 +57,13 @@ def postprocess(i): path_tar = os.path.join(path, 'n01440764.tar') if not os.path.isfile(path_tar): - return {'return':1, 'error':'ImageNet file {} not found'.format(path_tar)} + return {'return': 1, + 'error': 'ImageNet file {} not found'.format(path_tar)} env['CM_DATASET_PATH'] = path env['CM_DATASET_IMAGENET_PATH'] = path env['CM_DATASET_IMAGENET_TRAIN_PATH'] = path - env['CM_GET_DEPENDENT_CACHED_PATH'] = path + env['CM_GET_DEPENDENT_CACHED_PATH'] = path - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-imagenet-val/customize.py b/script/get-dataset-imagenet-val/customize.py index 378b7927d2..2f32437350 100644 --- a/script/get-dataset-imagenet-val/customize.py +++ b/script/get-dataset-imagenet-val/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,7 +15,13 @@ def preprocess(i): full = env.get('CM_IMAGENET_FULL', '').strip() == 'yes' - path = env.get('CM_INPUT', env.get('IMAGENET_PATH', env.get('CM_DATASET_IMAGENET_PATH', ''))).strip() + path = env.get( + 'CM_INPUT', + env.get( + 'IMAGENET_PATH', + env.get( + 'CM_DATASET_IMAGENET_PATH', + ''))).strip() if path == '': if full: @@ -24,31 +31,36 @@ def preprocess(i): env['CM_DAE_EXTRA_TAGS'] = "_torrent" env['CM_DAE_TORRENT_PATH'] = path env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' - return {'return':0} + return {'return': 0} else: env['CM_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar' env['CM_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar' env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' - return {'return':0} - #return {'return':1, 'error':'Please rerun the last CM command with --env.IMAGENET_PATH={path the folder containing full ImageNet images} or envoke cm run script "get val dataset imagenet" --input={path to the folder containing ImageNet images}'} + return {'return': 0} + # return {'return':1, 'error':'Please rerun the last CM command + # with --env.IMAGENET_PATH={path the folder containing full + # ImageNet images} or envoke cm run script "get val dataset + # imagenet" --input={path to the folder containing ImageNet + # images}'} else: env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' - elif not os.path.isdir(path): if path.endswith(".tar"): env['CM_EXTRACT_FILEPATH'] = path env['CM_DAE_ONLY_EXTRACT'] = 'yes' - return {'return':0} + return {'return': 0} else: - return {'return':1, 'error':'Path {} doesn\'t exist'.format(path)} + return {'return': 1, + 'error': 'Path {} doesn\'t exist'.format(path)} else: env['CM_EXTRACT_EXTRACTED_PATH'] = path - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -63,16 +75,18 @@ def postprocess(i): path_image = os.path.join(path, 'ILSVRC2012_val_00000001.JPEG') if not os.path.isfile(path_image): - return {'return':1, 'error':'ImageNet file {} not found'.format(path_image)} + return {'return': 1, + 'error': 'ImageNet file {} not found'.format(path_image)} files = os.listdir(path) if len(files) < int(env.get('CM_DATASET_SIZE', 0)): - return {'return':1, 'error':'Only {} files found in {}. {} expected'.format(len(files), path, env.get('CM_DATASET_SIZE'))} + return {'return': 1, 'error': 'Only {} files found in {}. {} expected'.format( + len(files), path, env.get('CM_DATASET_SIZE'))} env['CM_DATASET_PATH'] = path env['CM_DATASET_IMAGENET_PATH'] = path env['CM_DATASET_IMAGENET_VAL_PATH'] = path - env['CM_GET_DEPENDENT_CACHED_PATH'] = path + env['CM_GET_DEPENDENT_CACHED_PATH'] = path - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-kits19/customize.py b/script/get-dataset-kits19/customize.py index 97c583e9d5..bf5462d9b4 100644 --- a/script/get-dataset-kits19/customize.py +++ b/script/get-dataset-kits19/customize.py @@ -2,18 +2,20 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] meta = i['meta'] - if not env.get('CM_GIT_CHECKOUT',''): - return {'return':1, 'error': 'Please provide a valid CM_GIT_SHA inside the custom variation of _cm.json'} + if not env.get('CM_GIT_CHECKOUT', ''): + return { + 'return': 1, 'error': 'Please provide a valid CM_GIT_SHA inside the custom variation of _cm.json'} if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' @@ -21,13 +23,13 @@ def preprocess(i): if 'CM_GIT_RECURSE_SUBMODULES' not in env: env['CM_GIT_RECURSE_SUBMODULES'] = '' - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') versions = meta['versions'] - if need_version!='' and not need_version in versions: + if need_version != '' and not need_version in versions: env['CM_GIT_CHECKOUT'] = need_version - return {'return':0} + return {'return': 0} def postprocess(i): @@ -36,4 +38,4 @@ def postprocess(i): env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data') state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-librispeech/customize.py b/script/get-dataset-librispeech/customize.py index 85ec8e43c9..0da64539eb 100644 --- a/script/get-dataset-librispeech/customize.py +++ b/script/get-dataset-librispeech/customize.py @@ -1,19 +1,22 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] - folder_name = env['CM_DATASET_ARCHIVE'].split(".")[0] - env['CM_DATASET_LIBRISPEECH_PATH'] = os.path.join(os.getcwd(), "LibriSpeech", folder_name) - env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), "LibriSpeech", folder_name) + folder_name = env['CM_DATASET_ARCHIVE'].split(".")[0] + env['CM_DATASET_LIBRISPEECH_PATH'] = os.path.join( + os.getcwd(), "LibriSpeech", folder_name) + env['CM_DATASET_PATH'] = os.path.join( + os.getcwd(), "LibriSpeech", folder_name) - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-mlperf-inference-gnn/customize.py b/script/get-dataset-mlperf-inference-gnn/customize.py index bed3d59337..33105ffef2 100644 --- a/script/get-dataset-mlperf-inference-gnn/customize.py +++ b/script/get-dataset-mlperf-inference-gnn/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -8,13 +9,15 @@ def preprocess(i): env = i['env'] if os_info['platform'] == "windows": - return {'return':1, 'error': 'Script not supported in windows yet!'} + return {'return': 1, 'error': 'Script not supported in windows yet!'} - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") - #run cmd + # run cmd run_cmd = "" - graph_folder = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') + graph_folder = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') download_loc = env.get('CM_IGBH_DATASET_OUT_PATH', os.getcwd()) @@ -23,27 +26,33 @@ def preprocess(i): # download the model if env['CM_IGBH_DATASET_TYPE'] == "debug": - run_cmd += x_sep + env['CM_PYTHON_BIN_WITH_PATH'] + f" tools/download_igbh_test.py --target-path {download_loc}" + run_cmd += x_sep + env['CM_PYTHON_BIN_WITH_PATH'] + \ + f" tools/download_igbh_test.py --target-path {download_loc}" else: run_cmd += x_sep + f"./tools/download_igbh_full.sh {download_loc}" # split seeds - run_cmd += x_sep + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']}" + run_cmd += x_sep + \ + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']}" # compress graph(for glt implementation) if env.get('CM_IGBH_GRAPH_COMPRESS', '') == "yes": - run_cmd += x_sep + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']} --layout {env['CM_IGBH_GRAPH_COMPRESS_LAYOUT']}" + run_cmd += x_sep + \ + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']} --layout {env['CM_IGBH_GRAPH_COMPRESS_LAYOUT']}" env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_IGBH_DATASET_PATH'] = env.get('CM_IGBH_DATASET_OUT_PATH', os.getcwd()) + env['CM_IGBH_DATASET_PATH'] = env.get( + 'CM_IGBH_DATASET_OUT_PATH', os.getcwd()) - print(f"Path to the IGBH dataset: {os.path.join(env['CM_IGBH_DATASET_PATH'], env['CM_IGBH_DATASET_SIZE'])}") + print( + f"Path to the IGBH dataset: {os.path.join(env['CM_IGBH_DATASET_PATH'], env['CM_IGBH_DATASET_SIZE'])}") - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-mlperf-inference-mixtral/customize.py b/script/get-dataset-mlperf-inference-mixtral/customize.py index 6f37e396be..38825cfdb1 100644 --- a/script/get-dataset-mlperf-inference-mixtral/customize.py +++ b/script/get-dataset-mlperf-inference-mixtral/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -8,9 +9,10 @@ def preprocess(i): env = i['env'] if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": - env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join(os.getcwd(), "mixtral-test-dataset.pkl") + env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join( + os.getcwd(), "mixtral-test-dataset.pkl") - return {'return':0} + return {'return': 0} def postprocess(i): @@ -21,4 +23,4 @@ def postprocess(i): if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py b/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py index cd449cf5b2..5e13f5b7a6 100644 --- a/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py +++ b/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py @@ -2,12 +2,25 @@ import argparse import os + def main(): # Set up argument parser - parser = argparse.ArgumentParser(description="Sample test dataset from the original dataset.") - parser.add_argument('--dataset-path', required=True, help="Path to the input dataset (pickle file).") - parser.add_argument('--output-path', default=os.path.join(os.getcwd(),"mixtral-test-dataset.pkl"), help="Path to save the output dataset (pickle file).") - parser.add_argument('--samples', default=2, help="Number of entries to be extracted from each group.") + parser = argparse.ArgumentParser( + description="Sample test dataset from the original dataset.") + parser.add_argument( + '--dataset-path', + required=True, + help="Path to the input dataset (pickle file).") + parser.add_argument( + '--output-path', + default=os.path.join( + os.getcwd(), + "mixtral-test-dataset.pkl"), + help="Path to save the output dataset (pickle file).") + parser.add_argument( + '--samples', + default=2, + help="Number of entries to be extracted from each group.") args = parser.parse_args() dataset_path = args.dataset_path @@ -21,11 +34,15 @@ def main(): # Check if 'group' column exists if 'dataset' not in df.columns: - raise ValueError("The input dataset must contain a 'dataset' column to identify data set groups.") + raise ValueError( + "The input dataset must contain a 'dataset' column to identify data set groups.") # Sample 2 entries from each group print(f"Sampling {no_of_samples} entries from each group...") - sampled_df = df.groupby('dataset').apply(lambda x: x.sample(n=no_of_samples)).reset_index(drop=True) + sampled_df = df.groupby('dataset').apply( + lambda x: x.sample( + n=no_of_samples)).reset_index( + drop=True) # Save the sampled dataset to the specified output path print(f"Saving the sampled dataset to {output_path}...") @@ -36,5 +53,6 @@ def main(): print(f"Error: {e}") exit(1) + if __name__ == '__main__': main() diff --git a/script/get-dataset-openimages-annotations/customize.py b/script/get-dataset-openimages-annotations/customize.py index d85402e2bf..f42ee200de 100644 --- a/script/get-dataset-openimages-annotations/customize.py +++ b/script/get-dataset-openimages-annotations/customize.py @@ -1,21 +1,24 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] - env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join(env['CM_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json') - env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname(env['CM_DATASET_ANNOTATIONS_FILE_PATH']) + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( + env['CM_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json') + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname( + env['CM_DATASET_ANNOTATIONS_FILE_PATH']) env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['CM_DATASET_ANNOTATIONS_DIR_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-openimages-calibration/customize.py b/script/get-dataset-openimages-calibration/customize.py index 71e1a646d4..032065bc86 100644 --- a/script/get-dataset-openimages-calibration/customize.py +++ b/script/get-dataset-openimages-calibration/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,13 +16,15 @@ def preprocess(i): if env.get("CM_CALIBRATE_FILTER", "") == "yes": i['run_script_input']['script_name'] = "run-filter" - env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join(os.getcwd(), "filtered.txt") + env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join( + os.getcwd(), "filtered.txt") env['CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-openimages-calibration/filter.py b/script/get-dataset-openimages-calibration/filter.py index 81b768249c..d8d2638b57 100644 --- a/script/get-dataset-openimages-calibration/filter.py +++ b/script/get-dataset-openimages-calibration/filter.py @@ -5,7 +5,7 @@ with open(sys.argv[1], "r") as f: data = json.load(f) -images= {} +images = {} for image in data['images']: images[image['id']] = image images[image['id']]['num_boxes'] = 0 @@ -13,8 +13,13 @@ annots = data['annotations'] for box in annots: imageid = box['image_id'] - images[imageid]['num_boxes']+=1 + images[imageid]['num_boxes'] += 1 -sorted_image_data = sorted(data['images'], key=lambda x: x['num_boxes'], reverse= os.environ.get('CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', '') == "yes") +sorted_image_data = sorted( + data['images'], + key=lambda x: x['num_boxes'], + reverse=os.environ.get( + 'CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', + '') == "yes") for image in data['images']: print(image['file_name']) diff --git a/script/get-dataset-openimages/customize.py b/script/get-dataset-openimages/customize.py index 84d1f52312..ae53a85f5c 100644 --- a/script/get-dataset-openimages/customize.py +++ b/script/get-dataset-openimages/customize.py @@ -2,86 +2,100 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] env = i['env'] - print ("") - print ("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") - print ("") + print("") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + print("") if os_info['platform'] == 'windows': - MLPERF_CLASSES=['Airplane','Antelope','Apple','Backpack','Balloon','Banana', - 'Barrel','Baseball bat','Baseball glove','Bee','Beer','Bench','Bicycle', - 'Bicycle helmet','Bicycle wheel','Billboard','Book','Bookcase','Boot', - 'Bottle','Bowl','Bowling equipment','Box','Boy','Brassiere','Bread', - 'Broccoli','Bronze sculpture','Bull','Bus','Bust','Butterfly','Cabinetry', - 'Cake','Camel','Camera','Candle','Candy','Cannon','Canoe','Carrot','Cart', - 'Castle','Cat','Cattle','Cello','Chair','Cheese','Chest of drawers','Chicken', - 'Christmas tree','Coat','Cocktail','Coffee','Coffee cup','Coffee table','Coin', - 'Common sunflower','Computer keyboard','Computer monitor','Convenience store', - 'Cookie','Countertop','Cowboy hat','Crab','Crocodile','Cucumber','Cupboard', - 'Curtain','Deer','Desk','Dinosaur','Dog','Doll','Dolphin','Door','Dragonfly', - 'Drawer','Dress','Drum','Duck','Eagle','Earrings','Egg (Food)','Elephant', - 'Falcon','Fedora','Flag','Flowerpot','Football','Football helmet','Fork', - 'Fountain','French fries','French horn','Frog','Giraffe','Girl','Glasses', - 'Goat','Goggles','Goldfish','Gondola','Goose','Grape','Grapefruit','Guitar', - 'Hamburger','Handbag','Harbor seal','Headphones','Helicopter','High heels', - 'Hiking equipment','Horse','House','Houseplant','Human arm','Human beard', - 'Human body','Human ear','Human eye','Human face','Human foot','Human hair', - 'Human hand','Human head','Human leg','Human mouth','Human nose','Ice cream', - 'Jacket','Jeans','Jellyfish','Juice','Kitchen & dining room table','Kite', - 'Lamp','Lantern','Laptop','Lavender (Plant)','Lemon','Light bulb','Lighthouse', - 'Lily','Lion','Lipstick','Lizard','Man','Maple','Microphone','Mirror', - 'Mixing bowl','Mobile phone','Monkey','Motorcycle','Muffin','Mug','Mule', - 'Mushroom','Musical keyboard','Necklace','Nightstand','Office building', - 'Orange','Owl','Oyster','Paddle','Palm tree','Parachute','Parrot','Pen', - 'Penguin','Personal flotation device','Piano','Picture frame','Pig','Pillow', - 'Pizza','Plate','Platter','Porch','Poster','Pumpkin','Rabbit','Rifle', - 'Roller skates','Rose','Salad','Sandal','Saucer','Saxophone','Scarf','Sea lion', - 'Sea turtle','Sheep','Shelf','Shirt','Shorts','Shrimp','Sink','Skateboard', - 'Ski','Skull','Skyscraper','Snake','Sock','Sofa bed','Sparrow','Spider','Spoon', - 'Sports uniform','Squirrel','Stairs','Stool','Strawberry','Street light', - 'Studio couch','Suit','Sun hat','Sunglasses','Surfboard','Sushi','Swan', - 'Swimming pool','Swimwear','Tank','Tap','Taxi','Tea','Teddy bear','Television', - 'Tent','Tie','Tiger','Tin can','Tire','Toilet','Tomato','Tortoise','Tower', - 'Traffic light','Train','Tripod','Truck','Trumpet','Umbrella','Van','Vase', - 'Vehicle registration plate','Violin','Wall clock','Waste container','Watch', - 'Whale','Wheel','Wheelchair','Whiteboard','Window','Wine','Wine glass','Woman', - 'Zebra','Zucchini'] + MLPERF_CLASSES = ['Airplane', 'Antelope', 'Apple', 'Backpack', 'Balloon', 'Banana', + 'Barrel', 'Baseball bat', 'Baseball glove', 'Bee', 'Beer', 'Bench', 'Bicycle', + 'Bicycle helmet', 'Bicycle wheel', 'Billboard', 'Book', 'Bookcase', 'Boot', + 'Bottle', 'Bowl', 'Bowling equipment', 'Box', 'Boy', 'Brassiere', 'Bread', + 'Broccoli', 'Bronze sculpture', 'Bull', 'Bus', 'Bust', 'Butterfly', 'Cabinetry', + 'Cake', 'Camel', 'Camera', 'Candle', 'Candy', 'Cannon', 'Canoe', 'Carrot', 'Cart', + 'Castle', 'Cat', 'Cattle', 'Cello', 'Chair', 'Cheese', 'Chest of drawers', 'Chicken', + 'Christmas tree', 'Coat', 'Cocktail', 'Coffee', 'Coffee cup', 'Coffee table', 'Coin', + 'Common sunflower', 'Computer keyboard', 'Computer monitor', 'Convenience store', + 'Cookie', 'Countertop', 'Cowboy hat', 'Crab', 'Crocodile', 'Cucumber', 'Cupboard', + 'Curtain', 'Deer', 'Desk', 'Dinosaur', 'Dog', 'Doll', 'Dolphin', 'Door', 'Dragonfly', + 'Drawer', 'Dress', 'Drum', 'Duck', 'Eagle', 'Earrings', 'Egg (Food)', 'Elephant', + 'Falcon', 'Fedora', 'Flag', 'Flowerpot', 'Football', 'Football helmet', 'Fork', + 'Fountain', 'French fries', 'French horn', 'Frog', 'Giraffe', 'Girl', 'Glasses', + 'Goat', 'Goggles', 'Goldfish', 'Gondola', 'Goose', 'Grape', 'Grapefruit', 'Guitar', + 'Hamburger', 'Handbag', 'Harbor seal', 'Headphones', 'Helicopter', 'High heels', + 'Hiking equipment', 'Horse', 'House', 'Houseplant', 'Human arm', 'Human beard', + 'Human body', 'Human ear', 'Human eye', 'Human face', 'Human foot', 'Human hair', + 'Human hand', 'Human head', 'Human leg', 'Human mouth', 'Human nose', 'Ice cream', + 'Jacket', 'Jeans', 'Jellyfish', 'Juice', 'Kitchen & dining room table', 'Kite', + 'Lamp', 'Lantern', 'Laptop', 'Lavender (Plant)', 'Lemon', 'Light bulb', 'Lighthouse', + 'Lily', 'Lion', 'Lipstick', 'Lizard', 'Man', 'Maple', 'Microphone', 'Mirror', + 'Mixing bowl', 'Mobile phone', 'Monkey', 'Motorcycle', 'Muffin', 'Mug', 'Mule', + 'Mushroom', 'Musical keyboard', 'Necklace', 'Nightstand', 'Office building', + 'Orange', 'Owl', 'Oyster', 'Paddle', 'Palm tree', 'Parachute', 'Parrot', 'Pen', + 'Penguin', 'Personal flotation device', 'Piano', 'Picture frame', 'Pig', 'Pillow', + 'Pizza', 'Plate', 'Platter', 'Porch', 'Poster', 'Pumpkin', 'Rabbit', 'Rifle', + 'Roller skates', 'Rose', 'Salad', 'Sandal', 'Saucer', 'Saxophone', 'Scarf', 'Sea lion', + 'Sea turtle', 'Sheep', 'Shelf', 'Shirt', 'Shorts', 'Shrimp', 'Sink', 'Skateboard', + 'Ski', 'Skull', 'Skyscraper', 'Snake', 'Sock', 'Sofa bed', 'Sparrow', 'Spider', 'Spoon', + 'Sports uniform', 'Squirrel', 'Stairs', 'Stool', 'Strawberry', 'Street light', + 'Studio couch', 'Suit', 'Sun hat', 'Sunglasses', 'Surfboard', 'Sushi', 'Swan', + 'Swimming pool', 'Swimwear', 'Tank', 'Tap', 'Taxi', 'Tea', 'Teddy bear', 'Television', + 'Tent', 'Tie', 'Tiger', 'Tin can', 'Tire', 'Toilet', 'Tomato', 'Tortoise', 'Tower', + 'Traffic light', 'Train', 'Tripod', 'Truck', 'Trumpet', 'Umbrella', 'Van', 'Vase', + 'Vehicle registration plate', 'Violin', 'Wall clock', 'Waste container', 'Watch', + 'Whale', 'Wheel', 'Wheelchair', 'Whiteboard', 'Window', 'Wine', 'Wine glass', 'Woman', + 'Zebra', 'Zucchini'] x = '' for v in MLPERF_CLASSES: - if x!='': x+=' ' - x+='"'+v+'"' - env['CM_DATASET_OPENIMAGES_CLASSES']=x + if x != '': + x += ' ' + x += '"' + v + '"' + env['CM_DATASET_OPENIMAGES_CLASSES'] = x return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'annotations') + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( + os.getcwd(), 'install', 'annotations') - if env.get('CM_DATASET_CALIBRATION','') == "no": + if env.get('CM_DATASET_CALIBRATION', '') == "no": env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') - env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') - annotations_file_path = os.path.join(env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") + env['CM_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'validation', 'data') + annotations_file_path = os.path.join( + env['CM_DATASET_ANNOTATIONS_DIR_PATH'], + "openimages-mlperf.json") env['CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path env['CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path - if env.get("CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS",'') == "yes": + if env.get("CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS", '') == "yes": annotations_file_src = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] - shutil.copy(annotations_file_src, env['CM_DATASET_ANNOTATIONS_DIR_PATH']) + shutil.copy( + annotations_file_src, + env['CM_DATASET_ANNOTATIONS_DIR_PATH']) env['CM_DATASET_OPENIMAGES_PATH'] = env['CM_DATASET_PATH'] env['CM_DATASET_OPENIMAGES_PATH_ROOT'] = env['CM_DATASET_PATH_ROOT'] else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') - env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') - env['CM_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') - annotations_file_path = os.path.join(env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-calibration-mlperf.json") + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') + env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') + env['CM_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join( + os.getcwd(), 'install') + annotations_file_path = os.path.join( + env['CM_DATASET_ANNOTATIONS_DIR_PATH'], + "openimages-calibration-mlperf.json") env['CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path - return {'return': 0} diff --git a/script/get-dataset-openorca/customize.py b/script/get-dataset-openorca/customize.py index 059c83826d..db4c116ea9 100644 --- a/script/get-dataset-openorca/customize.py +++ b/script/get-dataset-openorca/customize.py @@ -2,19 +2,23 @@ import os import shutil + def preprocess(i): env = i['env'] return {'return': 0} + def postprocess(i): env = i['env'] - if env.get('CM_DATASET_CALIBRATION','') == "no": + if env.get('CM_DATASET_CALIBRATION', '') == "no": env['CM_DATASET_PATH_ROOT'] = env['CM_DATASET_OPENORCA_PATH'] env['CM_DATASET_PATH'] = env['CM_DATASET_OPENORCA_PATH'] - env['CM_DATASET_OPENORCA_PARQUET'] = os.path.join(env['CM_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet') + env['CM_DATASET_OPENORCA_PARQUET'] = os.path.join( + env['CM_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet') else: - env['CM_CALIBRATION_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'calibration', 'data') + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') return {'return': 0} diff --git a/script/get-dataset-squad-vocab/customize.py b/script/get-dataset-squad-vocab/customize.py index cf869b0094..40d3c5dd56 100644 --- a/script/get-dataset-squad-vocab/customize.py +++ b/script/get-dataset-squad-vocab/customize.py @@ -1,13 +1,14 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): @@ -15,4 +16,4 @@ def postprocess(i): env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = env['CM_DATASET_SQUAD_VOCAB_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-dataset-squad/customize.py b/script/get-dataset-squad/customize.py index fbc01370f1..3af4c85535 100644 --- a/script/get-dataset-squad/customize.py +++ b/script/get-dataset-squad/customize.py @@ -1,20 +1,22 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] - env['CM_DATASET_SQUAD_PATH'] = os.path.dirname(env['CM_DATASET_SQUAD_VAL_PATH']) + env['CM_DATASET_SQUAD_PATH'] = os.path.dirname( + env['CM_DATASET_SQUAD_VAL_PATH']) env['CM_DATASET_PATH'] = os.path.dirname(env['CM_DATASET_SQUAD_VAL_PATH']) - #env['CM_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['CM_VAL_FILENAME']) + # env['CM_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['CM_VAL_FILENAME']) - return {'return':0} + return {'return': 0} diff --git a/script/get-dlrm-data-mlperf-inference/customize.py b/script/get-dlrm-data-mlperf-inference/customize.py index 0d1c878f9c..ebec12749e 100644 --- a/script/get-dlrm-data-mlperf-inference/customize.py +++ b/script/get-dlrm-data-mlperf-inference/customize.py @@ -1,18 +1,22 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - dlrm_data_path = env.get('CM_DLRM_DATA_PATH', env.get('DLRM_DATA_PATH', '')) + dlrm_data_path = env.get( + 'CM_DLRM_DATA_PATH', env.get( + 'DLRM_DATA_PATH', '')) if dlrm_data_path == '': - print(f'Data path is not given as input through --dlrm_data_path. Using the cache directory:{os.getcwd()} as the data path') + print( + f'Data path is not given as input through --dlrm_data_path. Using the cache directory:{os.getcwd()} as the data path') dlrm_data_path = os.getcwd() elif not os.path.exists(dlrm_data_path): - return {'return':1, 'error':"given dlrm data path does not exists"} + return {'return': 1, 'error': "given dlrm data path does not exists"} # creating required folders inside the dlrm data path if not exists # criteo dataset @@ -27,7 +31,7 @@ def preprocess(i): meta = i['meta'] - script_path=i['run_script_input']['path'] + script_path = i['run_script_input']['path'] automation = i['automation'] @@ -42,70 +46,110 @@ def preprocess(i): if not os.path.exists(os.path.join(dlrm_data_path, "criteo")): print(f'criteo directory is missing inside {dlrm_data_path}') env['CM_DLRM_DATASET_DOWNLOAD'] = True - if not os.path.exists(os.path.join(dlrm_data_path, "model", "model_weights")): - print(f'model_weights directory is missing inside {dlrm_data_path}/model') + if not os.path.exists(os.path.join( + dlrm_data_path, "model", "model_weights")): + print( + f'model_weights directory is missing inside {dlrm_data_path}/model') env['CM_DLRM_MODEL_DOWNLOAD'] = True if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23")): print(f'day23 directory is missing inside {dlrm_data_path}/day23') env['CM_DLRM_DATASET_DOWNLOAD'] = True - if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32")): - print(f'fp32 directory is missing inside {dlrm_data_path}/criteo/day23') + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32")): + print( + f'fp32 directory is missing inside {dlrm_data_path}/criteo/day23') env['CM_DLRM_DATASET_DOWNLOAD'] = True - if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) and not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): - print(f'day_23_sparse_multi_hot.npz or day_23_sparse_multi_hot_unpacked is missing inside {dlrm_data_path}/criteo/day23/fp32') + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) and not os.path.exists( + os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): + print( + f'day_23_sparse_multi_hot.npz or day_23_sparse_multi_hot_unpacked is missing inside {dlrm_data_path}/criteo/day23/fp32') env['CM_DLRM_DATASET_DOWNLOAD'] = True - if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy")): - print(f'day_23_dense.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy")): + print( + f'day_23_dense.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') env['CM_DLRM_DATASET_DOWNLOAD'] = True - if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy")): - print(f'day_23_labels.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy")): + print( + f'day_23_labels.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') env['CM_DLRM_DATASET_DOWNLOAD'] = True - if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "raw_data")): + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "raw_data")): if env.get('CM_CRITEO_DAY23_RAW_DATA_PATH', '') == '': - return {'return':1, 'error':'Raw data missing inside {dlrm_data_path}/criteo/day23. Specify the target folder through input mapping(--criteo_day23_raw_data_path="path to raw criteo dataset")'} + return { + 'return': 1, 'error': 'Raw data missing inside {dlrm_data_path}/criteo/day23. Specify the target folder through input mapping(--criteo_day23_raw_data_path="path to raw criteo dataset")'} run_cmd = '' xsep = ' && ' # addition of run command to download the datasets and model if env.get('CM_DLRM_DATASET_DOWNLOAD', False) == True: - run_cmd += 'cp -r "$CM_CRITEO_PREPROCESSED_PATH"/. ' + os.path.join(dlrm_data_path,"criteo","day23","fp32") + xsep + run_cmd += 'cp -r "$CM_CRITEO_PREPROCESSED_PATH"/. ' + \ + os.path.join(dlrm_data_path, "criteo", "day23", "fp32") + xsep if env.get('CM_DLRM_MODEL_DOWNLOAD', False) == True: - run_cmd += 'cp -r "$CM_ML_MODEL_FILE_WITH_PATH"/. ' + os.path.join(dlrm_data_path, "model") + xsep + run_cmd += 'cp -r "$CM_ML_MODEL_FILE_WITH_PATH"/. ' + \ + os.path.join(dlrm_data_path, "model") + xsep if env.get('CM_DLRM_DATASET_DOWNLOAD', '') != True: - if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): os.system(f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}") else: run_cmd += f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}" + xsep - if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) or env['CM_DLRM_DATASET_DOWNLOAD'] == True: - file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz") - run_cmd += ("echo {} {} | md5sum -c").format('c46b7e31ec6f2f8768fa60bdfc0f6e40', file_path) + xsep - - file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy") - run_cmd += ("echo {} {} | md5sum -c").format('cdf7af87cbc7e9b468c0be46b1767601', file_path) + xsep - - file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy") - run_cmd += ("echo {} {} | md5sum -c").format('dd68f93301812026ed6f58dfb0757fa7', file_path) + xsep + if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", + "day_23_sparse_multi_hot.npz")) or env['CM_DLRM_DATASET_DOWNLOAD'] == True: + file_path = os.path.join( + dlrm_data_path, + "criteo", + "day23", + "fp32", + "day_23_sparse_multi_hot.npz") + run_cmd += ("echo {} {} | md5sum -c").format( + 'c46b7e31ec6f2f8768fa60bdfc0f6e40', file_path) + xsep + + file_path = os.path.join( + dlrm_data_path, + "criteo", + "day23", + "fp32", + "day_23_dense.npy") + run_cmd += ("echo {} {} | md5sum -c").format( + 'cdf7af87cbc7e9b468c0be46b1767601', file_path) + xsep + + file_path = os.path.join( + dlrm_data_path, + "criteo", + "day23", + "fp32", + "day_23_labels.npy") + run_cmd += ("echo {} {} | md5sum -c").format( + 'dd68f93301812026ed6f58dfb0757fa7', file_path) + xsep dir_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32") - run_cmd += ("cd {}; md5sum -c {}").format(dir_path, os.path.join(script_path, "checksums.txt" )) + run_cmd += ("cd {}; md5sum -c {}").format(dir_path, + os.path.join(script_path, "checksums.txt")) - env['CM_DLRM_V2_DAY23_FILE_PATH'] = os.path.join(dlrm_data_path, "criteo", "day23", "raw_data") - env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join(dlrm_data_path, "criteo", "day23", "sample_partition.txt") + env['CM_DLRM_V2_DAY23_FILE_PATH'] = os.path.join( + dlrm_data_path, "criteo", "day23", "raw_data") + env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join( + dlrm_data_path, "criteo", "day23", "sample_partition.txt") env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - if env.get('CM_DLRM_DATA_PATH', '') == '' and env.get('DLRM_DATA_PATH', '') == '': + if env.get('CM_DLRM_DATA_PATH', '') == '' and env.get( + 'DLRM_DATA_PATH', '') == '': env['CM_DLRM_DATA_PATH'] = os.getcwd() else: - env['CM_GET_DEPENDENT_CACHED_PATH'] = env.get('CM_DLRM_DATA_PATH', env['DLRM_DATA_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env.get( + 'CM_DLRM_DATA_PATH', env['DLRM_DATA_PATH']) - return {'return':0} + return {'return': 0} diff --git a/script/get-dlrm/customize.py b/script/get-dlrm/customize.py index 5b4959942a..561545c67d 100644 --- a/script/get-dlrm/customize.py +++ b/script/get-dlrm/customize.py @@ -2,12 +2,13 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] meta = i['meta'] @@ -18,13 +19,13 @@ def preprocess(i): if 'CM_GIT_RECURSE_SUBMODULES' not in env: env['CM_GIT_RECURSE_SUBMODULES'] = '' - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') versions = meta['versions'] - if need_version!='' and not need_version in versions: + if need_version != '' and not need_version in versions: env['CM_GIT_CHECKOUT'] = need_version - return {'return':0} + return {'return': 0} def postprocess(i): @@ -33,4 +34,4 @@ def postprocess(i): env['DLRM_DIR'] = os.path.join(os.getcwd(), "dlrm") - return {'return':0} + return {'return': 0} diff --git a/script/get-docker/customize.py b/script/get-docker/customize.py index 322a087baa..a3768abcaf 100644 --- a/script/get-docker/customize.py +++ b/script/get-docker/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,49 +17,55 @@ def preprocess(i): if 'CM_DOCKER_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, - 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_DOCKER_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_DOCKER_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: run_file_name = "install" - r = automation.run_native_script({'run_script_input':i['run_script_input'], 'env':env, 'script_name':run_file_name}) - if r['return'] >0: return r + r = automation.run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': run_file_name}) + if r['return'] > 0: + return r else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'[Docker|podman] version\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_DOCKER_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_DOCKER_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_DOCKER_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) env['CM_DOCKER_INSTALLED_PATH'] = found_path - env['+PATH'] = [ found_path ] + env['+PATH'] = [found_path] - env['CM_DOCKER_CACHE_TAGS'] = 'version-'+version + env['CM_DOCKER_CACHE_TAGS'] = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-gcc/customize.py b/script/get-gcc/customize.py index b29f38e13b..9dc79c3ee8 100644 --- a/script/get-gcc/customize.py +++ b/script/get-gcc/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,7 +12,8 @@ def preprocess(i): file_name_c = 'gcc.exe' if os_info['platform'] == 'windows' else 'gcc' if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': - if "12" in env.get('CM_VERSION', '') or "12" in env.get('CM_VERSION_MIN', ''): + if "12" in env.get('CM_VERSION', '') or "12" in env.get( + 'CM_VERSION_MIN', ''): if env.get('CM_TMP_PATH', '') == '': env['CM_TMP_PATH'] = '' env['CM_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin" @@ -20,52 +22,55 @@ def preprocess(i): if 'CM_GCC_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name_c, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_GCC_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : -# if r['return'] == 16: -# if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': -# return r -# -# print (recursion_spaces+' # {}'.format(r['error'])) -# -# # Attempt to run installer -# r = {'return':0, 'skip':True, 'script':{'tags':'install,gcc,src'}} + 'detect_version': True, + 'env_path_key': 'CM_GCC_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + # if r['return'] == 16: + # if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + # return r + # + # print (recursion_spaces+' # {}'.format(r['error'])) + # + # # Attempt to run installer + # r = {'return':0, 'skip':True, 'script':{'tags':'install,gcc,src'}} return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r' \(.*\)\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_GCC_VERSION', - 'which_env':i['env']}) - if r['return'] >0: + 'env_key': 'CM_GCC_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: if 'clang' in r['error']: - return {'return':0, 'version':-1} + return {'return': 0, 'version': -1} return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r env['CM_COMPILER_FAMILY'] = 'GCC' version = r['version'] env['CM_COMPILER_VERSION'] = env['CM_GCC_VERSION'] - env['CM_GCC_CACHE_TAGS'] = 'version-'+version - env['CM_COMPILER_CACHE_TAGS'] = 'version-'+version+',family-gcc' + env['CM_GCC_CACHE_TAGS'] = 'version-' + version + env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-gcc' found_file_path = env['CM_GCC_BIN_WITH_PATH'] @@ -75,21 +80,21 @@ def postprocess(i): file_name_c = os.path.basename(found_file_path) # G: changed next line to handle cases like gcc-8 - file_name_cpp = file_name_c.replace('gcc','g++') + file_name_cpp = file_name_c.replace('gcc', 'g++') env['FILE_NAME_CPP'] = file_name_cpp - env['CM_GCC_BIN']=file_name_c + env['CM_GCC_BIN'] = file_name_c # General compiler for general program compilation - env['CM_C_COMPILER_BIN']=file_name_c - env['CM_C_COMPILER_FLAG_OUTPUT']='-o ' - env['CM_C_COMPILER_WITH_PATH']=found_file_path - env['CM_C_COMPILER_FLAG_VERSION']='--version' + env['CM_C_COMPILER_BIN'] = file_name_c + env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_C_COMPILER_WITH_PATH'] = found_file_path + env['CM_C_COMPILER_FLAG_VERSION'] = '--version' - env['CM_CXX_COMPILER_BIN']=file_name_cpp - env['CM_CXX_COMPILER_WITH_PATH']=os.path.join(found_path, file_name_cpp) - env['CM_CXX_COMPILER_FLAG_OUTPUT']='-o ' - env['CM_CXX_COMPILER_FLAG_VERSION']='--version' + env['CM_CXX_COMPILER_BIN'] = file_name_cpp + env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version' env['CM_COMPILER_FLAGS_FAST'] = "-O3" env['CM_LINKER_FLAGS_FAST'] = "-O3" @@ -98,5 +103,4 @@ def postprocess(i): env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" - - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-generic-python-lib/customize.py b/script/get-generic-python-lib/customize.py index ea27ec6603..4b837a79d7 100644 --- a/script/get-generic-python-lib/customize.py +++ b/script/get-generic-python-lib/customize.py @@ -2,6 +2,7 @@ import os import cmind as cm + def preprocess(i): os_info = i['os_info'] @@ -13,21 +14,24 @@ def preprocess(i): package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() if package_name == '': - return automation._available_variations({'meta':meta}) + return automation._available_variations({'meta': meta}) if package_name == "onnxruntime_gpu": # https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements # 20240214: ONNXRuntime 1.17.0 now support CUDA 12 so we remove next check - # TBD: if we have explicit version for ONNX < 17.0.0 and CUDA is >= 12, we should add a check to fail ... - cuda_version = env.get('CM_CUDA_VERSION','').strip() + # TBD: if we have explicit version for ONNX < 17.0.0 and CUDA is >= 12, + # we should add a check to fail ... + cuda_version = env.get('CM_CUDA_VERSION', '').strip() # if cuda_version!='': # cuda_version_split = cuda_version.split('.') # if int(cuda_version_split[0]) >= 12: # # env['CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes" -# return {'return': 1, 'error':'at this moment, PIP package "onnxruntime_gpu" needs CUDA < 12'} +# return {'return': 1, 'error':'at this moment, PIP package +# "onnxruntime_gpu" needs CUDA < 12'} - extra = env.get('CM_GENERIC_PYTHON_PIP_EXTRA','') - if (pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23) and ('--break-system-packages' not in extra): + extra = env.get('CM_GENERIC_PYTHON_PIP_EXTRA', '') + if (pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23) and ( + '--break-system-packages' not in extra): extra += ' --break-system-packages ' env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" @@ -38,111 +42,135 @@ def preprocess(i): env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --user" if env.get('CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '': - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'uninstall_deps'}) - if r['return']>0: return r + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'uninstall_deps'}) + if r['return'] > 0: + return r prepare_env_key = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '') for x in ["-", "[", "]"]: - prepare_env_key = prepare_env_key.replace(x,"_") + prepare_env_key = prepare_env_key.replace(x, "_") env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper() recursion_spaces = i['recursion_spaces'] r = automation.detect_version_using_script({ - 'env': env, - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - - force_install = (env.get('CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL', '') in ['yes', 'true', 'True', True]) - - if r['return'] >0 or force_install: + 'env': env, + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + + force_install = ( + env.get( + 'CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL', + '') in [ + 'yes', + 'true', + 'True', + True]) + + if r['return'] > 0 or force_install: if r['return'] == 16 or force_install: # Clean detected version env if exists otherwise takes detected version # for example, when we reinstall generic python lib package - env_version_key = 'CM_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper()+'_VERSION' - if env.get(env_version_key,'')!='': - del(env[env_version_key]) + env_version_key = 'CM_' + \ + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + if env.get(env_version_key, '') != '': + del (env[env_version_key]) # Check if upgrade if force_install: - extra+=' --upgrade --no-deps --force-reinstall' + extra += ' --upgrade --no-deps --force-reinstall' # Check index URL - index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL','').strip() + index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL', '').strip() if index_url != '': # Check special cases if '${CM_TORCH_CUDA}' in index_url: - index_url=index_url.replace('${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + index_url = index_url.replace( + '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) - extra += ' --index-url '+index_url + extra += ' --index-url ' + index_url # Check extra index URL - extra_index_url = env.get('CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL','').strip() + extra_index_url = env.get( + 'CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL', '').strip() if extra_index_url != '': # Check special cases if '${CM_TORCH_CUDA}' in extra_index_url: - extra_index_url=extra_index_url.replace('${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + extra_index_url = extra_index_url.replace( + '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) - extra += ' --extra-index-url '+extra_index_url + extra += ' --extra-index-url ' + extra_index_url # Check update - if env.get('CM_GENERIC_PYTHON_PIP_UPDATE','') in [True,'true','yes','on']: - extra +=' -U' + if env.get('CM_GENERIC_PYTHON_PIP_UPDATE', '') in [ + True, 'true', 'yes', 'on']: + extra += ' -U' - print ('') - print (recursion_spaces + ' Extra PIP CMD: ' + extra) - print ('') + print('') + print(recursion_spaces + ' Extra PIP CMD: ' + extra) + print('') env['CM_GENERIC_PYTHON_PIP_EXTRA'] = extra - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) + + if r['return'] > 0: + return r - if r['return']>0: return r + return {'return': 0} - return {'return':0} def detect_version(i): env = i['env'] - env_version_key = 'CM_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper()+'_VERSION' + env_version_key = 'CM_' + \ + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' r = i['automation'].parse_version({'match_text': r'\s*([\d.a-z\-]+)', 'group_number': 1, - 'env_key':env_version_key, - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': env_version_key, + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] current_detected_version = version - if env.get('CM_TMP_SILENT','')!='yes': - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + if env.get('CM_TMP_SILENT', '') != 'yes': + print( + i['recursion_spaces'] + + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + return {'return': 0, 'version': version} def postprocess(i): env = i['env'] - env_version_key = 'CM_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper()+'_VERSION' + env_version_key = 'CM_' + \ + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' - if env.get(env_version_key,'')!='': + if env.get(env_version_key, '') != '': version = env[env_version_key] else: r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] - env['CM_PYTHONLIB_'+env['CM_TMP_PYTHON_PACKAGE_NAME_ENV']+'_CACHE_TAGS'] = 'version-'+version + env['CM_PYTHONLIB_' + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] + + '_CACHE_TAGS'] = 'version-' + version import pkgutil package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() - package=pkgutil.get_loader(package_name) + package = pkgutil.get_loader(package_name) if package: installed_file_path = package.get_filename() env['CM_GET_DEPENDENT_CACHED_PATH'] = installed_file_path @@ -151,4 +179,4 @@ def postprocess(i): if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-generic-python-lib/detect-version.py b/script/get-generic-python-lib/detect-version.py index b80edf858d..001c39b372 100644 --- a/script/get-generic-python-lib/detect-version.py +++ b/script/get-generic-python-lib/detect-version.py @@ -1,7 +1,7 @@ import os import sys -package_name = os.environ.get('CM_GENERIC_PYTHON_PACKAGE_NAME','') +package_name = os.environ.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '') filename = 'tmp-ver.out' @@ -25,11 +25,12 @@ version = pkg_resources.get_distribution(package_name).version error = '' except Exception as e: - if error!='': error += '\n' + if error != '': + error += '\n' error += format(e) # We generally skip error since it usually means that # package is not installed with open(filename, 'w') as file: - file.write(str(version)+'\n') + file.write(str(version) + '\n') diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py index 54c84cacb3..6b69e52d85 100644 --- a/script/get-generic-sys-util/customize.py +++ b/script/get-generic-sys-util/customize.py @@ -2,6 +2,7 @@ import os import re + def preprocess(i): os_info = i['os_info'] @@ -10,26 +11,29 @@ def preprocess(i): state = i['state'] automation = i['automation'] - #Use VERSION_CMD and CHECK_CMD if no CHECK_CMD is set - if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' and env.get('CM_SYS_UTIL_CHECK_CMD', '') == '': + # Use VERSION_CMD and CHECK_CMD if no CHECK_CMD is set + if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' and env.get( + 'CM_SYS_UTIL_CHECK_CMD', '') == '': env['CM_SYS_UTIL_CHECK_CMD'] = env['CM_SYS_UTIL_VERSION_CMD'] if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "install": i['run_script_input']['script_name'] = "install" if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "detect": - if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get('CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '': - r = automation.run_native_script({'run_script_input':i['run_script_input'], 'env':env, 'script_name':'detect'}) - if r['return'] != 0: #detection failed, do install via prehook_deps + if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get( + 'CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '': + r = automation.run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'}) + if r['return'] != 0: # detection failed, do install via prehook_deps print("detection failed, going for installation") env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" return {'return': 0} - else: #detection is successful, no need to install - #print("detection success") + else: # detection is successful, no need to install + # print("detection success") env['CM_SYS_UTIL_INSTALL_CMD'] = "" return {'return': 0} - else: #No detction command available, just install - #print("No detection possible, going for installation") + else: # No detction command available, just install + # print("No detection possible, going for installation") env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" return {'return': 0} @@ -37,7 +41,8 @@ def preprocess(i): pm = env.get('CM_HOST_OS_PACKAGE_MANAGER') util = env.get('CM_SYS_UTIL_NAME', '') if util == '': - return {'return': 1, 'error': 'Please select a variation specifying the sys util name'} + return { + 'return': 1, 'error': 'Please select a variation specifying the sys util name'} package = state.get(util) package_name = None @@ -45,30 +50,34 @@ def preprocess(i): package_name = package.get(pm) if os_info['platform'] == 'windows' and not package_name: - print ('') - print ('WARNING: for now skipping get-generic-sys-util on Windows ...') - print ('') + print('') + print('WARNING: for now skipping get-generic-sys-util on Windows ...') + print('') - return {'return':0} + return {'return': 0} if not pm: return {'return': 1, 'error': 'Package manager not detected for the given OS'} - if not package: - return {'return': 1, 'error': f'No package name specified for {util} in the meta'} + return {'return': 1, + 'error': f'No package name specified for {util} in the meta'} if not package_name: - if str(env.get('CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '')).lower() in [ "1", "true", "yes" ]: - print(f"WARNING: No package name specified for {pm} and util name {util}. Ignoring it...") + if str(env.get('CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '') + ).lower() in ["1", "true", "yes"]: + print( + f"WARNING: No package name specified for {pm} and util name {util}. Ignoring it...") env['CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED'] = 'yes' return {'return': 0} else: - return {'return': 1, 'error': f'No package name specified for {pm} and util name {util}'} + return { + 'return': 1, 'error': f'No package name specified for {pm} and util name {util}'} if util == "libffi": if env.get("CM_HOST_OS_FLAVOR", "") == "ubuntu": - if env.get("CM_HOST_OS_VERSION", "") in [ "20.04", "20.10", "21.04", "21.10" ]: + if env.get("CM_HOST_OS_VERSION", "") in [ + "20.04", "20.10", "21.04", "21.10"]: package_name = "libffi7" else: package_name = "libffi8" @@ -77,36 +86,39 @@ def preprocess(i): tmp_values = re.findall(r'<<<(.*?)>>>', str(package_name)) for tmp_value in tmp_values: if tmp_value not in env: - return {'return':1, 'error':'variable {} is not in env'.format(tmp_value)} + return {'return': 1, + 'error': 'variable {} is not in env'.format(tmp_value)} if tmp_value in env: - if type(package_name) == str: - package_name = package_name.replace("<<<"+tmp_value+">>>", str(env[tmp_value])) + if isinstance(package_name, str): + package_name = package_name.replace("<<<" + tmp_value + ">>>", str(env[tmp_value])) install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') if not install_cmd: - return {'return': 1, 'error': 'Package manager installation command not detected for the given OS'} + return { + 'return': 1, 'error': 'Package manager installation command not detected for the given OS'} if pm == "brew": sudo = '' else: sudo = env.get('CM_SUDO', '') - env['CM_SYS_UTIL_INSTALL_CMD'] = sudo + ' ' +install_cmd + ' ' + package_name + env['CM_SYS_UTIL_INSTALL_CMD'] = sudo + \ + ' ' + install_cmd + ' ' + package_name env['+PATH'] = [] if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': if env['CM_SYS_UTIL_NAME'] == "g++12": - env['+PATH'] = [ "/opt/rh/gcc-toolset-12/root/usr/bin" ] + env['+PATH'] = ["/opt/rh/gcc-toolset-12/root/usr/bin"] - if env['CM_SYS_UTIL_NAME'] == "numactl" and env['CM_HOST_OS_VERSION'] in [ "9.1", "9.2", "9.3" ]: + if env['CM_SYS_UTIL_NAME'] == "numactl" and env['CM_HOST_OS_VERSION'] in [ + "9.1", "9.2", "9.3"]: env['CM_SYS_UTIL_INSTALL_CMD'] = '' - if env.get('CM_SYS_UTIL_CHECK_CMD', '') != '' and env['CM_SYS_UTIL_INSTALL_CMD'] != '': + if env.get('CM_SYS_UTIL_CHECK_CMD', + '') != '' and env['CM_SYS_UTIL_INSTALL_CMD'] != '': env['CM_SYS_UTIL_INSTALL_CMD'] = f"""{env['CM_SYS_UTIL_CHECK_CMD']} || {env['CM_SYS_UTIL_INSTALL_CMD']}""" - return {'return':0} - - + return {'return': 0} def detect_version(i): @@ -120,14 +132,17 @@ def detect_version(i): version = "undetected" else: r = i['automation'].parse_version({'match_text': version_check_re, - 'group_number': group_number, - 'env_key': version_env_key, - 'which_env': env}) + 'group_number': group_number, + 'env_key': version_env_key, + 'which_env': env}) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] - print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print( + i['recursion_spaces'] + + ' Detected version: {}'.format(version)) return {'return': 0, 'version': version} @@ -137,26 +152,30 @@ def postprocess(i): version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION" - if (env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get('CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get('CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes': + if (env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get('CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get( + 'CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes': automation = i['automation'] - r = automation.run_native_script({'run_script_input':i['run_script_input'], 'env':env, 'script_name':'detect'}) - if r['return'] > 0 and str(env.get('CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in [ "1", "yes", "true" ]: + r = automation.run_native_script({'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'}) + if r['return'] > 0 and str(env.get( + 'CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in ["1", "yes", "true"]: return {'return': 1, 'error': 'Version detection failed after installation. Please check the provided version command or use env.CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE=yes to ignore the error.'} elif r['return'] == 0: r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] env[version_env_key] = version - #Not used now - env['CM_GENERIC_SYS_UTIL_'+env['CM_SYS_UTIL_NAME'].upper()+'_CACHE_TAGS'] = 'version-'+version + # Not used now + env['CM_GENERIC_SYS_UTIL_' + env['CM_SYS_UTIL_NAME'].upper() + \ + '_CACHE_TAGS'] = 'version-' + version if env.get(version_env_key, '') == '': env[version_env_key] = "undetected" - return {'return':0, 'version': env[version_env_key]} + return {'return': 0, 'version': env[version_env_key]} diff --git a/script/get-gh-actions-runner/customize.py b/script/get-gh-actions-runner/customize.py index 6c3b91da4b..ea87909aec 100644 --- a/script/get-gh-actions-runner/customize.py +++ b/script/get-gh-actions-runner/customize.py @@ -2,6 +2,7 @@ import os import cmind as cm + def preprocess(i): os_info = i['os_info'] @@ -24,19 +25,21 @@ def preprocess(i): elif cmd == "uninstall": run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall" cache_rm_tags = "gh,runner,_install" - r = cm.access({'action': 'rm', 'automation': 'cache', 'tags': cache_rm_tags, 'f': True}) + r = cm.access({'action': 'rm', 'automation': 'cache', + 'tags': cache_rm_tags, 'f': True}) print(r) - if r['return'] != 0 and r['return'] != 16: ## ignore missing ones + if r['return'] != 0 and r['return'] != 16: # ignore missing ones return r elif cmd == "start": run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh start" env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py index 0a1d7b0729..a4bf817208 100644 --- a/script/get-git-repo/customize.py +++ b/script/get-git-repo/customize.py @@ -2,24 +2,29 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] # if os_info['platform'] == 'windows': -# return {'return':1, 'error': 'Windows is not supported in this script yet'} +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} env = i['env'] meta = i['meta'] - env_key = get_env_key(env) cm_git_url = env['CM_GIT_URL'] - if 'CM_GIT_REPO_NAME' not in env: - update_env(env, 'CM_GIT_REPO{}_NAME', env_key, os.path.basename(env['CM_GIT_URL'])) + update_env( + env, + 'CM_GIT_REPO{}_NAME', + env_key, + os.path.basename( + env['CM_GIT_URL'])) if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' @@ -28,37 +33,48 @@ def preprocess(i): env['CM_GIT_RECURSE_SUBMODULES'] = '' if env.get('CM_GIT_CHECKOUT', '') == '': - env['CM_GIT_CHECKOUT'] = env.get('CM_GIT_SHA', env.get('CM_GIT_BRANCH', '')) + env['CM_GIT_CHECKOUT'] = env.get( + 'CM_GIT_SHA', env.get( + 'CM_GIT_BRANCH', '')) - git_checkout_string = " -b "+ env['CM_GIT_BRANCH'] if ("CM_GIT_BRANCH" in env and env.get('CM_GIT_SHA', '') == '') else "" + git_checkout_string = " -b " + env['CM_GIT_BRANCH'] if ( + "CM_GIT_BRANCH" in env and env.get('CM_GIT_SHA', '') == '') else "" - git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + env['CM_GIT_URL'] + " " + env.get('CM_GIT_DEPTH','') + ' ' + env['CM_GIT_CHECKOUT_FOLDER'] + git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + \ + env['CM_GIT_URL'] + " " + \ + env.get('CM_GIT_DEPTH', '') + ' ' + env['CM_GIT_CHECKOUT_FOLDER'] env['CM_GIT_CLONE_CMD'] = git_clone_cmd - env['CM_TMP_GIT_PATH'] = os.path.join(os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".gitdone") + env['CM_TMP_GIT_PATH'] = os.path.join( + os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".gitdone") - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] state = i['state'] - env['CM_GIT_CHECKOUT_PATH'] = os.path.join(os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER']) + env['CM_GIT_CHECKOUT_PATH'] = os.path.join( + os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER']) git_checkout_path = env['CM_GIT_CHECKOUT_PATH'] env_key = get_env_key(env) - # We remap CM_GIT variables with CM_GIT_REPO prefix so that they don't contaminate the env of the parent script - update_env(env, 'CM_GIT_REPO{}_CHECKOUT_PATH', env_key, env['CM_GIT_CHECKOUT_PATH']) + # We remap CM_GIT variables with CM_GIT_REPO prefix so that they don't + # contaminate the env of the parent script + update_env(env, 'CM_GIT_REPO{}_CHECKOUT_PATH', + env_key, env['CM_GIT_CHECKOUT_PATH']) update_env(env, 'CM_GIT_REPO{}_URL', env_key, env['CM_GIT_URL']) update_env(env, 'CM_GIT_REPO{}_CHECKOUT', env_key, env['CM_GIT_CHECKOUT']) update_env(env, 'CM_GIT_REPO{}_DEPTH', env_key, env['CM_GIT_DEPTH']) - update_env(env, 'CM_GIT_REPO{}_CHECKOUT_FOLDER', env_key, env['CM_GIT_CHECKOUT_FOLDER']) + update_env(env, 'CM_GIT_REPO{}_CHECKOUT_FOLDER', + env_key, env['CM_GIT_CHECKOUT_FOLDER']) update_env(env, 'CM_GIT_REPO{}_PATCH', env_key, env['CM_GIT_PATCH']) - update_env(env, 'CM_GIT_REPO{}_RECURSE_SUBMODULES', env_key, env['CM_GIT_RECURSE_SUBMODULES']) + update_env(env, 'CM_GIT_REPO{}_RECURSE_SUBMODULES', + env_key, env['CM_GIT_RECURSE_SUBMODULES']) - if (env.get('CM_GIT_CHECKOUT_PATH_ENV_NAME','') != ''): + if (env.get('CM_GIT_CHECKOUT_PATH_ENV_NAME', '') != ''): env[env['CM_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path env['CM_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path @@ -68,22 +84,24 @@ def postprocess(i): git_hash = f.readline().strip() env['CM_GIT_REPO_CURRENT_HASH'] = git_hash - return {'return':0} + return {'return': 0} + def get_env_key(env): - env_key = env.get('CM_GIT_ENV_KEY','') + env_key = env.get('CM_GIT_ENV_KEY', '') - if env_key!='' and not env_key.startswith('_'): + if env_key != '' and not env_key.startswith('_'): env_key = '_' + env_key return env_key + def update_env(env, key, env_key, var): env[key.format('')] = var - if env_key!='': + if env_key != '': env[key.format(env_key)] = var return diff --git a/script/get-github-cli/customize.py b/script/get-github-cli/customize.py index 9c15d17d72..b69878bf0e 100644 --- a/script/get-github-cli/customize.py +++ b/script/get-github-cli/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,27 +15,31 @@ def preprocess(i): # Will check env['CM_TMP_PATH'] if comes from installation script r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_GITHUBCLI_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': 'CM_GITHUBCLI_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: - if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': return r - print (recursion_spaces+' # {}'.format(r['error'])) + print(recursion_spaces + ' # {}'.format(r['error'])) # Attempt to run installer - r = {'return':0, 'skip':True, 'script':{'tags':'install,github-cli'}} + r = { + 'return': 0, + 'skip': True, + 'script': { + 'tags': 'install,github-cli'}} return r found_path = r['found_path'] - return {'return':0} + return {'return': 0} def postprocess(i): @@ -42,13 +47,13 @@ def postprocess(i): r = i['automation'].parse_version({'match_text': r'gh\s*version\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_GITHUBCLI_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_GITHUBCLI_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + return {'return': 0, 'version': version} diff --git a/script/get-go/customize.py b/script/get-go/customize.py index 72f0874c8c..c343442f71 100644 --- a/script/get-go/customize.py +++ b/script/get-go/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,39 +15,43 @@ def preprocess(i): if 'CM_GO_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_GO_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': 'CM_GO_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'\s+go([\d.]+)', 'group_number': 1, - 'env_key':'CM_GO_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_GO_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_GO_BIN_WITH_PATH'] @@ -54,6 +59,6 @@ def postprocess(i): found_path = os.path.dirname(found_file_path) env['CM_GO_INSTALLED_PATH'] = found_path - env['CM_GO_CACHE_TAGS'] = 'version-'+version + env['CM_GO_CACHE_TAGS'] = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-google-saxml/customize.py b/script/get-google-saxml/customize.py index cc9342a50a..983d681f4a 100644 --- a/script/get-google-saxml/customize.py +++ b/script/get-google-saxml/customize.py @@ -1,13 +1,15 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] # TBD - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -17,6 +19,4 @@ def postprocess(i): # TBD cur_dir = os.getcwd() - - - return {'return':0} + return {'return': 0} diff --git a/script/get-google-test/customize.py b/script/get-google-test/customize.py index 5d7427929d..67cbf331de 100644 --- a/script/get-google-test/customize.py +++ b/script/get-google-test/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,10 +12,11 @@ def preprocess(i): automation = i['automation'] - env['CM_GIT_CHECKOUT'] = "v"+env['CM_VERSION'] + env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION'] quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -30,4 +32,4 @@ def postprocess(i): env['+C_INCLUDE_PATH'].append(os.path.join(gtest_install_path, "include")) env['+LD_LIBRARY_PATH'].append(os.path.join(gtest_install_path, "lib")) - return {'return':0} + return {'return': 0} diff --git a/script/get-ipol-src/customize.py b/script/get-ipol-src/customize.py index 42824f8916..3fb3504f64 100644 --- a/script/get-ipol-src/customize.py +++ b/script/get-ipol-src/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -20,26 +21,32 @@ def preprocess(i): year = env.get('CM_IPOL_YEAR', '') number = env.get('CM_IPOL_NUMBER', '') - url = url.replace('{{CM_IPOL_YEAR}}', year).replace('{{CM_IPOL_NUMBER}}', number) + url = url.replace( + '{{CM_IPOL_YEAR}}', + year).replace( + '{{CM_IPOL_NUMBER}}', + number) - print ('Downloading from {}'.format(url)) + print('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r filename = r['filename'] - print ('Unzipping file {}'.format(filename)) + print('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', - 'filename':filename}) - if r['return']>0: return r + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'filename': filename}) + if r['return'] > 0: + return r if os.path.isfile(filename): - print ('Removing file {}'.format(filename)) + print('Removing file {}'.format(filename)) os.remove(filename) # Get sub-directory from filename @@ -47,12 +54,13 @@ def preprocess(i): subdir = ff[0] - env['CM_IPOL_PATH']=os.path.join(path, subdir) + env['CM_IPOL_PATH'] = os.path.join(path, subdir) # Applying patch - cmd = 'patch -p0 < {}'.format(os.path.join(script_path, 'patch', '20240127.patch')) + cmd = 'patch -p0 < {}'.format(os.path.join(script_path, + 'patch', '20240127.patch')) - print ('Patching code: {}'.format(cmd)) + print('Patching code: {}'.format(cmd)) os.system(cmd) - return {'return':0} + return {'return': 0} diff --git a/script/get-java/customize.py b/script/get-java/customize.py index 3e28a58d85..e103ac4ffb 100644 --- a/script/get-java/customize.py +++ b/script/get-java/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -20,7 +21,7 @@ def preprocess(i): meta = i['meta'] found = False - install = env.get('CM_JAVA_PREBUILT_INSTALL','') in ['on', 'True', True] + install = env.get('CM_JAVA_PREBUILT_INSTALL', '') in ['on', 'True', True] env_path_key = 'CM_JAVA_BIN_WITH_PATH' @@ -28,14 +29,14 @@ def preprocess(i): if not install: rr = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':env_path_key, - 'run_script_input':i['run_script_input'], - 'hook': skip_path, - 'recursion_spaces':recursion_spaces}) - if rr['return'] == 0 : + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': env_path_key, + 'run_script_input': i['run_script_input'], + 'hook': skip_path, + 'recursion_spaces': recursion_spaces}) + if rr['return'] == 0: found = True elif rr['return'] != 16: return rr @@ -44,11 +45,11 @@ def preprocess(i): if not found or install: if os_info['platform'] == 'windows': - env['CM_JAVA_PREBUILT_HOST_OS']='windows' - env['CM_JAVA_PREBUILT_EXT']='.zip' + env['CM_JAVA_PREBUILT_HOST_OS'] = 'windows' + env['CM_JAVA_PREBUILT_EXT'] = '.zip' else: - env['CM_JAVA_PREBUILT_HOST_OS']='linux' - env['CM_JAVA_PREBUILT_EXT']='.tar.gz' + env['CM_JAVA_PREBUILT_HOST_OS'] = 'linux' + env['CM_JAVA_PREBUILT_EXT'] = '.tar.gz' url = env['CM_JAVA_PREBUILT_URL'] filename = env['CM_JAVA_PREBUILT_FILENAME'] @@ -60,33 +61,45 @@ def preprocess(i): 'CM_JAVA_PREBUILT_BUILD', 'CM_JAVA_PREBUILT_HOST_OS', 'CM_JAVA_PREBUILT_EXT']: - url = url.replace('${'+key+'}', env[key]) - filename = filename.replace('${'+key+'}', env[key]) + url = url.replace('${' + key + '}', env[key]) + filename = filename.replace('${' + key + '}', env[key]) env['CM_JAVA_PREBUILT_URL'] = url env['CM_JAVA_PREBUILT_FILENAME'] = filename - print ('') - print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) + print('') + print( + recursion_spaces + + ' Downloading and installing prebuilt Java from {} ...'.format( + url + + filename)) - rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) - if rr['return']>0: return rr + rr = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install-prebuilt'}) + if rr['return'] > 0: + return rr - target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') + target_path = os.path.join( + cur_dir, 'jdk-' + java_prebuilt_version, 'bin') target_file = os.path.join(target_path, file_name) if not os.path.isfile(target_file): - return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} + return {'return': 1, + 'error': 'can\'t find target file {}'.format(target_file)} - print ('') - print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + print('') + print( + recursion_spaces + + ' Registering file {} ...'.format(target_file)) env[env_path_key] = target_file - if '+PATH' not in env: env['+PATH'] = [] + if '+PATH' not in env: + env['+PATH'] = [] env['+PATH'].append(target_path) - return {'return':0} + return {'return': 0} + def skip_path(i): @@ -98,31 +111,35 @@ def skip_path(i): if 'javapath' in path: skip = True - return {'return':0, 'skip':skip} + return {'return': 0, 'skip': skip} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'\s*"(.*?)"', 'group_number': 1, - 'env_key':'CM_JAVA_VERSION', - 'which_env':i['env'], - 'debug':True}) - if r['return'] >0: return r + 'env_key': 'CM_JAVA_VERSION', + 'which_env': i['env'], + 'debug': True}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = env['CM_JAVA_VERSION'] - env['CM_JAVA_CACHE_TAGS'] = 'version-'+version + env['CM_JAVA_CACHE_TAGS'] = 'version-' + version found_file_path = env['CM_JAVA_BIN_WITH_PATH'] file_name = os.path.basename(found_file_path) @@ -134,4 +151,4 @@ def postprocess(i): env['JAVA_HOME'] = java_home_path - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-javac/customize.py b/script/get-javac/customize.py index 2f9481c4bf..a5a97c62dc 100644 --- a/script/get-javac/customize.py +++ b/script/get-javac/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -20,7 +21,7 @@ def preprocess(i): meta = i['meta'] found = False - install = env.get('CM_JAVAC_PREBUILT_INSTALL','') in ['on', 'True', True] + install = env.get('CM_JAVAC_PREBUILT_INSTALL', '') in ['on', 'True', True] env_path_key = 'CM_JAVAC_BIN_WITH_PATH' @@ -28,14 +29,14 @@ def preprocess(i): if not install: rr = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':env_path_key, - 'run_script_input':i['run_script_input'], - 'hook': skip_path, - 'recursion_spaces':recursion_spaces}) - if rr['return'] == 0 : + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': env_path_key, + 'run_script_input': i['run_script_input'], + 'hook': skip_path, + 'recursion_spaces': recursion_spaces}) + if rr['return'] == 0: found = True elif rr['return'] != 16: return rr @@ -44,11 +45,11 @@ def preprocess(i): if not found or install: if os_info['platform'] == 'windows': - env['CM_JAVAC_PREBUILT_HOST_OS']='windows' - env['CM_JAVAC_PREBUILT_EXT']='.zip' + env['CM_JAVAC_PREBUILT_HOST_OS'] = 'windows' + env['CM_JAVAC_PREBUILT_EXT'] = '.zip' else: - env['CM_JAVAC_PREBUILT_HOST_OS']='linux' - env['CM_JAVAC_PREBUILT_EXT']='.tar.gz' + env['CM_JAVAC_PREBUILT_HOST_OS'] = 'linux' + env['CM_JAVAC_PREBUILT_EXT'] = '.tar.gz' url = env['CM_JAVAC_PREBUILT_URL'] filename = env['CM_JAVAC_PREBUILT_FILENAME'] @@ -60,34 +61,45 @@ def preprocess(i): 'CM_JAVAC_PREBUILT_BUILD', 'CM_JAVAC_PREBUILT_HOST_OS', 'CM_JAVAC_PREBUILT_EXT']: - url = url.replace('${'+key+'}', env[key]) - filename = filename.replace('${'+key+'}', env[key]) + url = url.replace('${' + key + '}', env[key]) + filename = filename.replace('${' + key + '}', env[key]) env['CM_JAVAC_PREBUILT_URL'] = url env['CM_JAVAC_PREBUILT_FILENAME'] = filename - print ('') - print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) - + print('') + print( + recursion_spaces + + ' Downloading and installing prebuilt Java from {} ...'.format( + url + + filename)) - rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) - if rr['return']>0: return rr + rr = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install-prebuilt'}) + if rr['return'] > 0: + return rr - target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') + target_path = os.path.join( + cur_dir, 'jdk-' + java_prebuilt_version, 'bin') target_file = os.path.join(target_path, file_name) if not os.path.isfile(target_file): - return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} + return {'return': 1, + 'error': 'can\'t find target file {}'.format(target_file)} - print ('') - print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + print('') + print( + recursion_spaces + + ' Registering file {} ...'.format(target_file)) env[env_path_key] = target_file - if '+PATH' not in env: env['+PATH'] = [] + if '+PATH' not in env: + env['+PATH'] = [] env['+PATH'].append(target_path) - return {'return':0} + return {'return': 0} + def skip_path(i): @@ -99,22 +111,25 @@ def skip_path(i): if 'javapath' in path: skip = True - return {'return':0, 'skip':skip} + return {'return': 0, 'skip': skip} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'javac\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_JAVAC_VERSION', - 'which_env':i['env'], - 'debug':True}) - if r['return'] >0: return r + 'env_key': 'CM_JAVAC_VERSION', + 'which_env': i['env'], + 'debug': True}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): @@ -122,10 +137,11 @@ def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = env['CM_JAVAC_VERSION'] - env['CM_JAVAC_CACHE_TAGS'] = 'version-'+version + env['CM_JAVAC_CACHE_TAGS'] = 'version-' + version found_file_path = env['CM_JAVAC_BIN_WITH_PATH'] file_name = os.path.basename(found_file_path) @@ -145,4 +161,4 @@ def postprocess(i): env['JAVA_HOME'] = javac_home_path - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py index 6476cc7894..4fe934f3b6 100644 --- a/script/get-lib-armnn/customize.py +++ b/script/get-lib-armnn/customize.py @@ -1,16 +1,17 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] version = env['CM_LIB_ARMNN_VERSION'] - if env.get('CM_HOST_PLATFORM_FLAVOR','') == 'x86_64': + if env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'x86_64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" - elif env.get('CM_HOST_PLATFORM_FLAVOR','') == 'aarch64': + elif env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'aarch64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" env['CM_LIB_ARMNN_PREBUILT_BINARY_URL'] = url @@ -18,18 +19,19 @@ def preprocess(i): env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] paths = [ - "+C_INCLUDE_PATH", - "+CPLUS_INCLUDE_PATH", - "+LD_LIBRARY_PATH", - "+DYLD_FALLBACK_LIBRARY_PATH" - ] + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] for key in paths: env[key] = [] @@ -48,4 +50,4 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0} + return {'return': 0} diff --git a/script/get-lib-dnnl/customize.py b/script/get-lib-dnnl/customize.py index 0d03fd4de0..8b834eb29e 100644 --- a/script/get-lib-dnnl/customize.py +++ b/script/get-lib-dnnl/customize.py @@ -1,28 +1,33 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] env['CM_LIB_DNNL_INSTALL_DIR'] = os.getcwd() - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: -# 20221024: we save and restore env in the main script and can clean env here for determinism -# if key not in env: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: env[key] = [] - env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) - env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) lib_path = os.path.join(os.getcwd(), 'install', 'lib') env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0} + return {'return': 0} diff --git a/script/get-lib-protobuf/customize.py b/script/get-lib-protobuf/customize.py index c9e641eb44..dc9a423c24 100644 --- a/script/get-lib-protobuf/customize.py +++ b/script/get-lib-protobuf/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,10 +12,11 @@ def preprocess(i): automation = i['automation'] - env['CM_GIT_CHECKOUT'] = "v"+env['CM_VERSION'] + env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION'] quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -26,14 +28,21 @@ def postprocess(i): protobuf_install_path = os.path.join(os.getcwd(), "install") env['CM_GOOGLE_PROTOBUF_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] env['CM_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path - env['+C_INCLUDE_PATH'].append(os.path.join(protobuf_install_path, "include")) - env['+CPLUS_INCLUDE_PATH'].append(os.path.join(protobuf_install_path, "include")) + env['+C_INCLUDE_PATH'].append( + os.path.join( + protobuf_install_path, + "include")) + env['+CPLUS_INCLUDE_PATH'].append( + os.path.join(protobuf_install_path, "include")) if os.path.exists(os.path.join(protobuf_install_path, "lib")): - env['+LD_LIBRARY_PATH'].append(os.path.join(protobuf_install_path, "lib")) + env['+LD_LIBRARY_PATH'].append( + os.path.join(protobuf_install_path, "lib")) elif os.path.exists(os.path.join(protobuf_install_path, "lib64")): - env['+LD_LIBRARY_PATH'].append(os.path.join(protobuf_install_path, "lib64")) + env['+LD_LIBRARY_PATH'].append( + os.path.join(protobuf_install_path, "lib64")) else: - return {'return':1, 'error': f'Protobuf library path not found in {protobuf_install_path}'} + return { + 'return': 1, 'error': f'Protobuf library path not found in {protobuf_install_path}'} - return {'return':0} + return {'return': 0} diff --git a/script/get-lib-qaic-api/customize.py b/script/get-lib-qaic-api/customize.py index 1c95c558cd..4d9b738658 100644 --- a/script/get-lib-qaic-api/customize.py +++ b/script/get-lib-qaic-api/customize.py @@ -1,39 +1,43 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - #env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + # env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] paths = [ - "+C_INCLUDE_PATH", - "+CPLUS_INCLUDE_PATH", - "+LD_LIBRARY_PATH", - "+DYLD_FALLBACK_LIBRARY_PATH" - ] + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] for key in paths: env[key] = [] - include_paths = [ env['CM_TMP_CURRENT_SCRIPT_PATH'] ] + include_paths = [env['CM_TMP_CURRENT_SCRIPT_PATH']] for inc_path in include_paths: env['+C_INCLUDE_PATH'].append(inc_path) env['+CPLUS_INCLUDE_PATH'].append(inc_path) version = "master" - env['CM_QAIC_API_SRC_FILE'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp") - env['CM_QAIC_API_INC_FILE'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h") + env['CM_QAIC_API_SRC_FILE'] = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp") + env['CM_QAIC_API_INC_FILE'] = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h") - return {'return':0} + return {'return': 0} diff --git a/script/get-llvm/customize.py b/script/get-llvm/customize.py index dd0d69eec4..9b2ff3d5f0 100644 --- a/script/get-llvm/customize.py +++ b/script/get-llvm/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,44 +17,48 @@ def preprocess(i): if 'CM_LLVM_CLANG_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name_c, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_LLVM_CLANG_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': 'CM_LLVM_CLANG_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'clang version\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_LLVM_CLANG_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_LLVM_CLANG_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = env['CM_LLVM_CLANG_VERSION'] - env['CM_LLVM_CLANG_CACHE_TAGS'] = 'version-'+version - env['CM_COMPILER_CACHE_TAGS'] = 'version-'+version+',family-llvm' + env['CM_LLVM_CLANG_CACHE_TAGS'] = 'version-' + version + env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-llvm' env['CM_COMPILER_FAMILY'] = 'LLVM' env['CM_COMPILER_VERSION'] = env['CM_LLVM_CLANG_VERSION'] @@ -64,23 +69,24 @@ def postprocess(i): file_name_c = os.path.basename(found_file_path) file_name_cpp = file_name_c.replace("clang", "clang++") - env['CM_LLVM_CLANG_BIN']=file_name_c + env['CM_LLVM_CLANG_BIN'] = file_name_c # General compiler for general program compilation - env['CM_C_COMPILER_BIN']=file_name_c - env['CM_C_COMPILER_WITH_PATH']=found_file_path - env['CM_C_COMPILER_FLAG_OUTPUT']='-o ' - env['CM_C_COMPILER_FLAG_VERSION']='--version' - env['CM_C_COMPILER_FLAG_INCLUDE']='-I' - - env['CM_CXX_COMPILER_BIN']=file_name_cpp - env['CM_CXX_COMPILER_WITH_PATH']=os.path.join(found_path, file_name_cpp) - env['CM_CXX_COMPILER_FLAG_OUTPUT']='-o ' - env['CM_CXX_COMPILER_FLAG_VERSION']='--version' - env['CM_CXX_COMPILER_FLAG_INCLUDE']='-I' + env['CM_C_COMPILER_BIN'] = file_name_c + env['CM_C_COMPILER_WITH_PATH'] = found_file_path + env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_C_COMPILER_FLAG_VERSION'] = '--version' + env['CM_C_COMPILER_FLAG_INCLUDE'] = '-I' + + env['CM_CXX_COMPILER_BIN'] = file_name_cpp + env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version' + env['CM_CXX_COMPILER_FLAG_INCLUDE'] = '-I' env['CM_COMPILER_FLAGS_FAST'] = "-O4" - env['CM_LINKER_FLAGS_FAST'] = "-O4" # "-flto" - this flag is not always available (requires LLVMgold.so) + # "-flto" - this flag is not always available (requires LLVMgold.so) + env['CM_LINKER_FLAGS_FAST'] = "-O4" env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" env['CM_LINKER_FLAGS_DEBUG'] = "-O0" env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" @@ -88,4 +94,4 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-microtvm/customize.py b/script/get-microtvm/customize.py index 327760590a..db61f8d735 100644 --- a/script/get-microtvm/customize.py +++ b/script/get-microtvm/customize.py @@ -2,19 +2,21 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' if 'CM_GIT_RECURSE_SUBMODULES' not in env: env['CM_GIT_RECURSE_SUBMODULES'] = '' - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -23,4 +25,4 @@ def postprocess(i): env['CM_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm') - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py index e874c46d65..dc3d4c605c 100644 --- a/script/get-ml-model-3d-unet-kits19/customize.py +++ b/script/get-ml-model-3d-unet-kits19/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -19,6 +20,6 @@ def preprocess(i): else: env['CM_ML_MODEL_PATH'] = path - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-bert-large-squad/customize.py b/script/get-ml-model-bert-large-squad/customize.py index 6960f00027..c584484557 100644 --- a/script/get-ml-model-bert-large-squad/customize.py +++ b/script/get-ml-model-bert-large-squad/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -8,18 +9,23 @@ def preprocess(i): env = i['env'] if env.get('CM_ML_MODEL_BERT_PACKED', '') == 'yes': i['run_script_input']['script_name'] = "run-packed" - env['CM_BERT_CONFIG_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json") + env['CM_BERT_CONFIG_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json") env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd() - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), "model.onnx") - env['CM_ML_MODEL_BERT_PACKED_PATH'] = os.path.join(os.getcwd(), "model.onnx") + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), "model.onnx") + env['CM_ML_MODEL_BERT_PACKED_PATH'] = os.path.join( + os.getcwd(), "model.onnx") + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) if env.get('CM_ML_MODEL_PRECISION', '') == "fp32": env['CM_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] @@ -28,4 +34,4 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-efficientnet-lite/customize.py b/script/get-ml-model-efficientnet-lite/customize.py index 6aaac4cfaf..c0d8dd73cd 100644 --- a/script/get-ml-model-efficientnet-lite/customize.py +++ b/script/get-ml-model-efficientnet-lite/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,37 +17,40 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url - print ('Downloading from {}'.format(url)) + print('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r filename = r['filename'] if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": if env.get('CM_UNZIP') == "yes": - cmd="unzip " + cmd = "unzip " elif env.get('CM_UNTAR') == "yes": - cmd="tar -xvzf " - os.system(cmd+filename) + cmd = "tar -xvzf " + os.system(cmd + filename) filename = env['CM_ML_MODEL_FILE'] extract_folder = env.get('CM_EXTRACT_FOLDER', '') if extract_folder: - env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, extract_folder, filename) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + path, extract_folder, filename) else: - env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) else: - env['CM_ML_MODEL_FILE']=filename - env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + env['CM_ML_MODEL_FILE'] = filename + env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] - env['CM_ML_MODEL_PATH']=path + env['CM_ML_MODEL_PATH'] = path if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): - return {'return':1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + return { + 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-gptj/convert_gptj_ckpt.py b/script/get-ml-model-gptj/convert_gptj_ckpt.py index 26fc7b02da..544caac2bf 100644 --- a/script/get-ml-model-gptj/convert_gptj_ckpt.py +++ b/script/get-ml-model-gptj/convert_gptj_ckpt.py @@ -145,20 +145,25 @@ def convert(base_model_path, pax_model_path): ].data.numpy(), }, } - jax_weights['lm']['transformer']['x_layers_%d' % layer_idx] = layer_weight + jax_weights['lm']['transformer']['x_layers_%d' % + layer_idx] = layer_weight print(f'Saving the pax model to {pax_model_path}') jax_states = train_states.TrainState( step=0, mdl_vars={'params': jax_weights}, opt_states={} ) device_mesh = py_utils.create_device_mesh([1, 1, num_gpus]) - global_mesh = jax.sharding.Mesh(device_mesh, ['replica', 'data_mdl2', 'mdl']) + global_mesh = jax.sharding.Mesh( + device_mesh, ['replica', 'data_mdl2', 'mdl']) # Identity pjit is needed to output a GDA model_states. def identity(x): return x - pjitted_identity = pjit.pjit(identity, in_shardings=None, out_shardings=None) + pjitted_identity = pjit.pjit( + identity, + in_shardings=None, + out_shardings=None) with global_mesh: jax_states_gda = pjitted_identity(jax_states) diff --git a/script/get-ml-model-gptj/customize.py b/script/get-ml-model-gptj/customize.py index 639efcde87..90343764df 100644 --- a/script/get-ml-model-gptj/customize.py +++ b/script/get-ml-model-gptj/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -8,10 +9,17 @@ def preprocess(i): if env.get('CM_GPTJ_INTEL_MODEL', '') == 'yes': i['run_script_input']['script_name'] = 'run-intel' - harness_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', 'gptj-99', 'pytorch-cpu') + harness_root = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + 'closed', + 'Intel', + 'code', + 'gptj-99', + 'pytorch-cpu') print(f"Harness Root: {harness_root}") env['CM_HARNESS_CODE_ROOT'] = harness_root - env['CM_CALIBRATION_CODE_ROOT'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') + env['CM_CALIBRATION_CODE_ROOT'] = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] @@ -24,14 +32,18 @@ def preprocess(i): elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': i['run_script_input']['script_name'] = 'run-nvidia' - if str(env.get('CM_DOCKER_DETACHED_MODE','')).lower() in ['yes', 'true', "1"]: + if str(env.get('CM_DOCKER_DETACHED_MODE', '') + ).lower() in ['yes', 'true', "1"]: env['DOCKER_RUN_OPTS'] = "--rm --ipc=host --ulimit memlock=-1 --ulimit stack=67108864" - gpu_arch = int(float(env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * 10) + gpu_arch = int( + float( + env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * + 10) env['CM_GPU_ARCH'] = gpu_arch env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' else: - is_saxml = env.get('CM_TMP_MODEL_SAXML','') + is_saxml = env.get('CM_TMP_MODEL_SAXML', '') if is_saxml == "fp32": i['run_script_input']['script_name'] = 'run-saxml' elif is_saxml == "int8": @@ -42,35 +54,46 @@ def preprocess(i): if path == '' or not os.path.exists(path): env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - if os.path.exists(os.path.join(env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final")): - env['GPTJ_CHECKPOINT_PATH'] = os.path.join(env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final") + if os.path.exists(os.path.join( + env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final")): + env['GPTJ_CHECKPOINT_PATH'] = os.path.join( + env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final") - is_saxml = env.get('CM_TMP_MODEL_SAXML','') + is_saxml = env.get('CM_TMP_MODEL_SAXML', '') if is_saxml == "fp32": if os.path.exists("pax_gptj_checkpoint"): - env['GPTJ_SAXML_CHECKPOINT_PATH'] = os.path.join(os.getcwd(), "pax_gptj_checkpoint") + env['GPTJ_SAXML_CHECKPOINT_PATH'] = os.path.join( + os.getcwd(), "pax_gptj_checkpoint") env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH'] else: return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} elif is_saxml == "int8": if os.path.exists("int8_ckpt"): - env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] = os.path.join(os.getcwd(), "int8_ckpt") + env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] = os.path.join( + os.getcwd(), "int8_ckpt") env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] else: return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], 'models', 'GPTJ-6B', 'fp8-quantized-ammo', 'GPTJ-FP8-quantized') + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], + 'models', + 'GPTJ-6B', + 'fp8-quantized-ammo', + 'GPTJ-FP8-quantized') else: env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH'] - env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-huggingface-zoo/customize.py b/script/get-ml-model-huggingface-zoo/customize.py index 8770e5bcb4..cd0b5a5c07 100644 --- a/script/get-ml-model-huggingface-zoo/customize.py +++ b/script/get-ml-model-huggingface-zoo/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -18,13 +19,15 @@ def preprocess(i): path = os.getcwd() if env.get('CM_GIT_CLONE_REPO', '') != 'yes': - run_cmd = env.get('CM_PYTHON_BIN_WITH_PATH') + " " + os.path.join(script_path, 'download_model.py') + run_cmd = env.get('CM_PYTHON_BIN_WITH_PATH') + " " + \ + os.path.join(script_path, 'download_model.py') else: run_cmd = '' env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -32,19 +35,19 @@ def postprocess(i): env_key = env.get('CM_MODEL_ZOO_ENV_KEY', '') - path_file = env.get('CM_ML_MODEL_FILE_WITH_PATH','') - if path_file!='': + path_file = env.get('CM_ML_MODEL_FILE_WITH_PATH', '') + if path_file != '': path_dir = os.path.dirname(path_file) env['CM_ML_MODEL_PATH'] = path_dir - if env_key!='': - env['CM_ML_MODEL_'+env_key+'_PATH'] = path_dir + if env_key != '': + env['CM_ML_MODEL_' + env_key + '_PATH'] = path_dir else: path_dir = env['CM_ML_MODEL_PATH'] - if env_key!='': - env['CM_ML_MODEL_'+env_key+'_FILE_WITH_PATH'] = path_dir + if env_key != '': + env['CM_ML_MODEL_' + env_key + '_FILE_WITH_PATH'] = path_dir - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-huggingface-zoo/download_model.py b/script/get-ml-model-huggingface-zoo/download_model.py index c355c11ee7..2f35842783 100644 --- a/script/get-ml-model-huggingface-zoo/download_model.py +++ b/script/get-ml-model-huggingface-zoo/download_model.py @@ -4,7 +4,7 @@ model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') model_task = os.environ.get('CM_MODEL_TASK', '') -revision = os.environ.get('CM_HF_REVISION','') +revision = os.environ.get('CM_HF_REVISION', '') if model_task == "prune": print("Downloading model: " + model_stub) @@ -26,27 +26,28 @@ if model_filename == '': model_filename = 'model.onnx' - model_filenames = model_filename.split(',') if ',' in model_filename else [model_filename] + model_filenames = model_filename.split( + ',') if ',' in model_filename else [model_filename] base_model_filepath = None files = [] - if full_subfolder!='': + if full_subfolder != '': from huggingface_hub import HfFileSystem fs = HfFileSystem() # List all files in a directory - path = model_stub+'/'+full_subfolder + path = model_stub + '/' + full_subfolder - print ('') - print ('Listing files in {} ...'.format(path)) + print('') + print('Listing files in {} ...'.format(path)) def list_hf_files(path): all_files = [] xrevision = None if revision == '' else revision - files=fs.ls(path, revision=xrevision) #, detail=False) + files = fs.ls(path, revision=xrevision) # , detail=False) for f in files: fname = f['name'] @@ -59,50 +60,46 @@ def list_hf_files(path): return all_files + files = list_hf_files(path) - files=list_hf_files(path) - - print ('') - print ('Found {} files'.format(len(files))) + print('') + print('Found {} files'.format(len(files))) for f in files: - remove = len(model_stub)+1 + remove = len(model_stub) + 1 - if revision!='': - remove+=len(revision)+1 + if revision != '': + remove += len(revision) + 1 ff = f[remove:] if ff not in model_filenames: model_filenames.append(ff) - - print ('') + print('') for model_filename in model_filenames: print("Downloading file {} / {} ...".format(model_stub, model_filename)) extra_dir = os.path.dirname(model_filename) - if extra_dir!='' and not os.path.exists(extra_dir): + if extra_dir != '' and not os.path.exists(extra_dir): os.makedirs(extra_dir) - xrevision = None if revision == '' else revision xsubfolder = None if subfolder == '' else subfolder downloaded_path = hf_hub_download(repo_id=model_stub, - subfolder=xsubfolder, - filename=model_filename, - revision=xrevision, - cache_dir=os.getcwd()) + subfolder=xsubfolder, + filename=model_filename, + revision=xrevision, + cache_dir=os.getcwd()) print(downloaded_path) if not base_model_filepath: base_model_filepath = downloaded_path - - print ('') + print('') with open('tmp-run-env.out', 'w') as f: f.write(f"CM_ML_MODEL_FILE_WITH_PATH={base_model_filepath}") diff --git a/script/get-ml-model-llama2/customize.py b/script/get-ml-model-llama2/customize.py index a65e47f828..1f59493125 100644 --- a/script/get-ml-model-llama2/customize.py +++ b/script/get-ml-model-llama2/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -8,7 +9,10 @@ def preprocess(i): if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': i['run_script_input']['script_name'] = 'run-nvidia' - gpu_arch = int(float(env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * 10) + gpu_arch = int( + float( + env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * + 10) env['CM_GPU_ARCH'] = gpu_arch env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' else: @@ -17,24 +21,26 @@ def preprocess(i): if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'amd': env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' i['run_script_input']['script_name'] = 'run-amd' - env['AMD_CODE_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code') + env['AMD_CODE_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code') env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'] = os.getcwd() - env['CM_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join(env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors") + env['CM_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join( + env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors") else: if path == '' or not os.path.exists(path): env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - if env.get('LLAMA2_CHECKPOINT_PATH', '' ) == '': + if env.get('LLAMA2_CHECKPOINT_PATH', '') == '': env['LLAMA2_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] else: env['CM_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-mixtral/customize.py b/script/get-ml-model-mixtral/customize.py index 957a87b047..a210368833 100644 --- a/script/get-ml-model-mixtral/customize.py +++ b/script/get-ml-model-mixtral/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,7 +12,8 @@ def preprocess(i): if path == '' or not os.path.exists(path): env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -23,4 +25,4 @@ def postprocess(i): env['CM_ML_MODEL_PATH'] = env['MIXTRAL_CHECKPOINT_PATH'] env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-mobilenet/customize.py b/script/get-ml-model-mobilenet/customize.py index 6aaac4cfaf..c0d8dd73cd 100644 --- a/script/get-ml-model-mobilenet/customize.py +++ b/script/get-ml-model-mobilenet/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,37 +17,40 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url - print ('Downloading from {}'.format(url)) + print('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r filename = r['filename'] if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": if env.get('CM_UNZIP') == "yes": - cmd="unzip " + cmd = "unzip " elif env.get('CM_UNTAR') == "yes": - cmd="tar -xvzf " - os.system(cmd+filename) + cmd = "tar -xvzf " + os.system(cmd + filename) filename = env['CM_ML_MODEL_FILE'] extract_folder = env.get('CM_EXTRACT_FOLDER', '') if extract_folder: - env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, extract_folder, filename) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + path, extract_folder, filename) else: - env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) else: - env['CM_ML_MODEL_FILE']=filename - env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + env['CM_ML_MODEL_FILE'] = filename + env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] - env['CM_ML_MODEL_PATH']=path + env['CM_ML_MODEL_PATH'] = path if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): - return {'return':1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + return { + 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-neuralmagic-zoo/customize.py b/script/get-ml-model-neuralmagic-zoo/customize.py index 4e912f00e8..797932c14b 100644 --- a/script/get-ml-model-neuralmagic-zoo/customize.py +++ b/script/get-ml-model-neuralmagic-zoo/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,16 +17,18 @@ def preprocess(i): model_stub = env.get('CM_MODEL_ZOO_STUB', '') if model_stub == '': - variations = list(i.get('meta', {}).get('variations',{}).keys()) + variations = list(i.get('meta', {}).get('variations', {}).keys()) variation_models = [] for v in variations: if '#' not in v: variation_models.append(v) - return {'return':1, 'error':'ENV CM_MODEL_ZOO_STUB is not set. Please select variation from {}'.format(str(variation_models))} + return {'return': 1, 'error': 'ENV CM_MODEL_ZOO_STUB is not set. Please select variation from {}'.format( + str(variation_models))} + + return {'return': 0} - return {'return':0} def postprocess(i): @@ -40,4 +43,4 @@ def postprocess(i): if os.path.exists(onnx_path): env['CM_MLPERF_CUSTOM_MODEL_PATH'] = onnx_path - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-neuralmagic-zoo/download_sparse.py b/script/get-ml-model-neuralmagic-zoo/download_sparse.py index 1da36774bd..b2c9de6075 100644 --- a/script/get-ml-model-neuralmagic-zoo/download_sparse.py +++ b/script/get-ml-model-neuralmagic-zoo/download_sparse.py @@ -1,7 +1,7 @@ from sparsezoo import Model import os -model_stub= os.environ.get('CM_MODEL_ZOO_STUB', '') +model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') print(f"Downloading model {model_stub}") stub = f"{model_stub}" model = Model(stub) diff --git a/script/get-ml-model-resnet50/customize.py b/script/get-ml-model-resnet50/customize.py index 4f30e94181..3aced77567 100644 --- a/script/get-ml-model-resnet50/customize.py +++ b/script/get-ml-model-resnet50/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -10,7 +11,8 @@ def preprocess(i): if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": i['run_script_input']['script_name'] = "run-fix-input" - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -18,11 +20,14 @@ def postprocess(i): if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": env['CM_ML_MODEL_STARTING_FILE_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), "resnet50_v1.pb") + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), "resnet50_v1.pb") - env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - env['CM_DOWNLOAD_PATH'] = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_DOWNLOAD_PATH'] = os.path.dirname( + env['CM_ML_MODEL_FILE_WITH_PATH']) - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-retinanet-nvidia/customize.py b/script/get-ml-model-retinanet-nvidia/customize.py index 6da132316e..52fce9129a 100644 --- a/script/get-ml-model-retinanet-nvidia/customize.py +++ b/script/get-ml-model-retinanet-nvidia/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -8,16 +9,30 @@ def preprocess(i): env = i['env'] if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] - env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "single_stage_detector", "ssd")) - env['CM_ML_MODEL_DYN_BATCHSIZE_PATH'] = os.path.join(os.getcwd(), "retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + "single_stage_detector", + "ssd")) + env['CM_ML_MODEL_DYN_BATCHSIZE_PATH'] = os.path.join( + os.getcwd(), "retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") if "CM_NVIDIA_EFFICIENT_NMS" in env: - env['CM_NVIDIA_MODEL_PATCHED_PATH'] = os.path.join(os.getcwd(), "fpn_efficientnms_concatall.onnx") - env['CM_ML_MODEL_ANCHOR_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'], "code", "retinanet", "tensorrt", "onnx_generator", "retinanet_anchor_xywh_1x1.npy") - return {'return':0} + env['CM_NVIDIA_MODEL_PATCHED_PATH'] = os.path.join( + os.getcwd(), "fpn_efficientnms_concatall.onnx") + env['CM_ML_MODEL_ANCHOR_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'], + "code", + "retinanet", + "tensorrt", + "onnx_generator", + "retinanet_anchor_xywh_1x1.npy") + return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = os.path.join(os.getcwd(), "test_fpn_efficientnms_concatall.onnx") + env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = os.path.join( + os.getcwd(), "test_fpn_efficientnms_concatall.onnx") if "CM_NVIDIA_EFFICIENT_NMS" in env: env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = env['CM_NVIDIA_MODEL_PATCHED_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py index 32a61ec375..d445ef01ca 100644 --- a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py +++ b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py @@ -24,10 +24,16 @@ # in_onnx = "/work/code/retinanet/tensorrt/onnx_retina/ref_fpn_transreshapeconcat.onnx" -in_onnx = os.environ.get("CM_ML_MODEL_DYN_BATCHSIZE_PATH", "build/models/retinanet-resnext50-32x4d/new/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") -out_onnx = os.environ.get("CM_NVIDIA_MODEL_PATCHED_PATH", "/work/code/retinanet/tensorrt/onnx_generator/test_fpn_efficientnms_concatall.onnx") +in_onnx = os.environ.get( + "CM_ML_MODEL_DYN_BATCHSIZE_PATH", + "build/models/retinanet-resnext50-32x4d/new/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") +out_onnx = os.environ.get( + "CM_NVIDIA_MODEL_PATCHED_PATH", + "/work/code/retinanet/tensorrt/onnx_generator/test_fpn_efficientnms_concatall.onnx") # Anchor at [1, 1] -anchor_xywh_1x1_npy = os.environ.get("CM_ML_MODEL_ANCHOR_PATH", "/work/code/retinanet/tensorrt/onnx_generator/retinanet_anchor_xywh_1x1.npy") +anchor_xywh_1x1_npy = os.environ.get( + "CM_ML_MODEL_ANCHOR_PATH", + "/work/code/retinanet/tensorrt/onnx_generator/retinanet_anchor_xywh_1x1.npy") graph = gs.import_onnx(onnx.load(in_onnx)) @@ -43,11 +49,11 @@ node_attrs = { "background_class": -1, - "score_threshold" : 0.05, - "iou_threshold" : 0.5, - "max_output_boxes" : 1000, - "score_activation" : True, - "box_coding" : 1, + "score_threshold": 0.05, + "iou_threshold": 0.5, + "max_output_boxes": 1000, + "score_activation": True, + "box_coding": 1, } attrs = { "plugin_version": "1", @@ -67,20 +73,24 @@ # Add EfficientNMS layer # output tensors num_detections = gs.Variable(name="num_detections", - dtype=np.int32, - shape=["batch_size", 1]) + dtype=np.int32, + shape=["batch_size", 1]) detection_boxes = gs.Variable(name="detection_boxes", - dtype=np.float32, - shape=["batch_size", 1000, 4]) + dtype=np.float32, + shape=["batch_size", 1000, 4]) detection_scores = gs.Variable(name="detection_scores", - dtype=np.float32, - shape=["batch_size", 1000]) + dtype=np.float32, + shape=["batch_size", 1000]) detection_classes = gs.Variable(name="detection_classes", - dtype=np.int32, - shape=["batch_size", 1000]) + dtype=np.int32, + shape=["batch_size", 1000]) nms_inputs = [tensors["bbox_regression"], tensors["cls_logits"], anchor_tensor] -nms_outputs = [num_detections, detection_boxes, detection_scores, detection_classes] +nms_outputs = [ + num_detections, + detection_boxes, + detection_scores, + detection_classes] graph.layer(op="EfficientNMS_TRT", name="EfficientNMS", @@ -98,7 +108,11 @@ } graph.layer(op="RetinanetConcatNmsOutputsPlugin", name="RetinanetConcatNmsOutputsPlugin", - inputs=[num_detections, detection_boxes, detection_scores, detection_classes], + inputs=[ + num_detections, + detection_boxes, + detection_scores, + detection_classes], outputs=[concat_final_output], attrs=attrs) diff --git a/script/get-ml-model-retinanet/customize.py b/script/get-ml-model-retinanet/customize.py index 0c588e1ae5..63a42a72ec 100644 --- a/script/get-ml-model-retinanet/customize.py +++ b/script/get-ml-model-retinanet/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -9,21 +10,25 @@ def preprocess(i): if env.get('CM_TMP_ML_MODEL_RETINANET_NO_NMS', '') == 'yes': i['run_script_input']['script_name'] = "run-no-nms" - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), "retinanet.onnx") + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), "retinanet.onnx") + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) if env.get('CM_ENV_NAME_ML_MODEL_FILE', '') != '': env[env['CM_ENV_NAME_ML_MODEL_FILE']] = env['CM_ML_MODEL_FILE_WITH_PATH'] if env.get("CM_QAIC_PRINT_NODE_PRECISION_INFO", '') == 'yes': - env['CM_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH'] = os.path.join(os.getcwd(), 'node-precision-info.yaml') + env['CM_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH'] = os.path.join( + os.getcwd(), 'node-precision-info.yaml') env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-retinanet/node-precision-info.py b/script/get-ml-model-retinanet/node-precision-info.py index 3e4c0066ac..15d5213b9d 100644 --- a/script/get-ml-model-retinanet/node-precision-info.py +++ b/script/get-ml-model-retinanet/node-precision-info.py @@ -4,14 +4,24 @@ import argparse import yaml + def parse_args(add_help=True): - parser = argparse.ArgumentParser(description='Print node precision info for the onnx file', add_help=add_help) - parser.add_argument('--input', default="retinanet.onnx", help='input onnx file') - parser.add_argument('--output', default="node-precision.yaml", help='output node precision file') + parser = argparse.ArgumentParser( + description='Print node precision info for the onnx file', + add_help=add_help) + parser.add_argument( + '--input', + default="retinanet.onnx", + help='input onnx file') + parser.add_argument( + '--output', + default="node-precision.yaml", + help='output node precision file') args = parser.parse_args() return args + def main(args): onnx_model = onnx.load(args.input) @@ -32,11 +42,11 @@ def main(args): "1625", ] - #check which list of node names is valid + # check which list of node names is valid node_names = [] valid_list = None - #for n in enumerate_model_node_outputs(onnx_model): + # for n in enumerate_model_node_outputs(onnx_model): for n in onnx_model.graph.node: node_names.append(n.output[0]) @@ -52,9 +62,18 @@ def main(args): node_precision_info['FP16NodeInstanceNames'] = [] fp16nodes = valid_list - fp16nodes += [ "boxes_1", "boxes_2", "boxes_3", "boxes_4", "boxes_5", "scores_1", "scores_2", "scores_3", "scores_4", "scores_5"] + fp16nodes += ["boxes_1", + "boxes_2", + "boxes_3", + "boxes_4", + "boxes_5", + "scores_1", + "scores_2", + "scores_3", + "scores_4", + "scores_5"] - #node_precision_info['FP16NodeInstanceNames'] = "["+", ".join(fp16nodes)+"]" + # node_precision_info['FP16NodeInstanceNames'] = "["+", ".join(fp16nodes)+"]" node_precision_info['FP16NodeInstanceNames'] = fp16nodes yaml_output = yaml.safe_dump(node_precision_info, default_style=None) diff --git a/script/get-ml-model-rnnt/customize.py b/script/get-ml-model-rnnt/customize.py index 57a8d34e5c..f41646dba8 100644 --- a/script/get-ml-model-rnnt/customize.py +++ b/script/get-ml-model-rnnt/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,24 +16,25 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] - print ('Downloading from {}'.format(url)) + print('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r filename = r['filename'] if env.get('CM_UNZIP') == "yes": - os.system("unzip "+filename) + os.system("unzip " + filename) filename = env['CM_ML_MODEL_FILE'] - env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) else: # Add to path - env['CM_ML_MODEL_FILE']=filename - env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] + env['CM_ML_MODEL_FILE'] = filename + env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] - env['CM_ML_MODEL_PATH']=path + env['CM_ML_MODEL_PATH'] = path - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-stable-diffusion/customize.py b/script/get-ml-model-stable-diffusion/customize.py index 0f313e49ae..cdaf6b90c2 100644 --- a/script/get-ml-model-stable-diffusion/customize.py +++ b/script/get-ml-model-stable-diffusion/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,7 +12,8 @@ def preprocess(i): if path == '' or not os.path.exists(path): env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -24,4 +26,4 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['SDXL_CHECKPOINT_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-tiny-resnet/customize.py b/script/get-ml-model-tiny-resnet/customize.py index 32bf59c7d0..d936088ba4 100644 --- a/script/get-ml-model-tiny-resnet/customize.py +++ b/script/get-ml-model-tiny-resnet/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -9,16 +10,21 @@ def preprocess(i): if env.get("CM_TMP_ML_MODEL_TF2ONNX", "") == "yes": outputfile = env.get('CM_ML_MODEL_OUTFILE', 'model_quant.onnx') - env['CM_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + " -m tf2onnx.convert --tflite " + env['CM_ML_MODEL_FILE_WITH_PATH'] + " --output " + outputfile + " --inputs-as-nchw \"input_1_int8\"" - env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(os.getcwd(), outputfile) + env['CM_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + " -m tf2onnx.convert --tflite " + \ + env['CM_ML_MODEL_FILE_WITH_PATH'] + " --output " + \ + outputfile + " --inputs-as-nchw \"input_1_int8\"" + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), outputfile) + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - env['CM_ML_MODEL_FILE'] = os.path.basename(env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-ml-model-using-imagenet-from-model-zoo/customize.py b/script/get-ml-model-using-imagenet-from-model-zoo/customize.py index 4fba39521b..884be01707 100644 --- a/script/get-ml-model-using-imagenet-from-model-zoo/customize.py +++ b/script/get-ml-model-using-imagenet-from-model-zoo/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,7 +12,8 @@ def preprocess(i): cm = automation.cmind - return {'return':0} + return {'return': 0} + def postprocess(i): diff --git a/script/get-mlperf-inference-intel-scratch-space/customize.py b/script/get-mlperf-inference-intel-scratch-space/customize.py index 37d9f4a5ed..b88245660a 100644 --- a/script/get-mlperf-inference-intel-scratch-space/customize.py +++ b/script/get-mlperf-inference-intel-scratch-space/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,12 +17,13 @@ def preprocess(i): if env.get('CM_INTEL_MLPERF_SCRATCH_PATH', '') == '': env['CM_INTEL_MLPERF_SCRATCH_PATH'] = os.getcwd() - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_INTEL_MLPERF_SCRATCH_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_INTEL_MLPERF_SCRATCH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py index 077a6fae2e..524521e42a 100644 --- a/script/get-mlperf-inference-loadgen/customize.py +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -9,7 +10,8 @@ def preprocess(i): if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': i['run_script_input']['script_name'] = "donotrun" - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -17,12 +19,12 @@ def postprocess(i): env = i['env'] if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': - return {'return':0} - + return {'return': 0} - for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: -# 20221024: we save and restore env in the main script and can clean env here for determinism -# if key not in env: + for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: env[key] = [] # On Windows installs directly into Python distro for simplicity @@ -52,4 +54,4 @@ def postprocess(i): env['+PYTHONPATH'].append(python_path) env['CM_MLPERF_INFERENCE_LOADGEN_PYTHON_PATH'] = python_path - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-nvidia-common-code/customize.py b/script/get-mlperf-inference-nvidia-common-code/customize.py index 57e0ea43f4..4edd4cbde6 100644 --- a/script/get-mlperf-inference-nvidia-common-code/customize.py +++ b/script/get-mlperf-inference-nvidia-common-code/customize.py @@ -1,19 +1,21 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] - env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], "closed", "NVIDIA") - env['+PYTHONPATH'] = [ env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] ] + env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], "closed", "NVIDIA") + env['+PYTHONPATH'] = [env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH']] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-nvidia-scratch-space/customize.py b/script/get-mlperf-inference-nvidia-scratch-space/customize.py index 1bfa6c9580..5335363893 100644 --- a/script/get-mlperf-inference-nvidia-scratch-space/customize.py +++ b/script/get-mlperf-inference-nvidia-scratch-space/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,18 +15,19 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') if env.get('CM_NVIDIA_MLPERF_SCRATCH_PATH', '') == '': - if env.get('MLPERF_SCRATCH_PATH','') != '': + if env.get('MLPERF_SCRATCH_PATH', '') != '': env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = env['MLPERF_SCRATCH_PATH'] else: env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = os.getcwd() - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-results-dir/customize.py b/script/get-mlperf-inference-results-dir/customize.py index 8f013816a1..f1beabcaa9 100644 --- a/script/get-mlperf-inference-results-dir/customize.py +++ b/script/get-mlperf-inference-results-dir/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,15 +14,16 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR','') == '': + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') == '': env['CM_MLPERF_INFERENCE_RESULTS_DIR'] = os.getcwd() - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-results/customize.py b/script/get-mlperf-inference-results/customize.py index 747d99e52d..36f71224de 100644 --- a/script/get-mlperf-inference-results/customize.py +++ b/script/get-mlperf-inference-results/customize.py @@ -2,12 +2,13 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] meta = i['meta'] @@ -16,7 +17,8 @@ def preprocess(i): env['CM_GIT_URL'] = "https://github.com/GATEOverflow/nvidia-inference-code.git" if 'GITHUB_REPO_OWNER' in env and '<<>>' in env['CM_GIT_URL']: - env['CM_GIT_URL'] = env['CM_GIT_URL'].replace('<<>>', env['GITHUB_REPO_OWNER']) + env['CM_GIT_URL'] = env['CM_GIT_URL'].replace( + '<<>>', env['GITHUB_REPO_OWNER']) if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' @@ -24,13 +26,13 @@ def preprocess(i): if 'CM_GIT_RECURSE_SUBMODULES' not in env: env['CM_GIT_RECURSE_SUBMODULES'] = '' - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') versions = meta['versions'] - if need_version!='' and not need_version in versions: + if need_version != '' and not need_version in versions: env['CM_GIT_CHECKOUT'] = need_version - return {'return':0} + return {'return': 0} def postprocess(i): @@ -39,8 +41,8 @@ def postprocess(i): state = i['state'] if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': - env['CM_VERSION'] += "-git-"+env['CM_GIT_REPO_CURRENT_HASH'] + env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] # env['CM_MLPERF_INFERENCE_RESULTS_PATH'] = os.path.join(os.getcwd(), "inference_results_"+env['CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME']) - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py index 75186cdf44..c9aad1ee14 100644 --- a/script/get-mlperf-inference-src/customize.py +++ b/script/get-mlperf-inference-src/customize.py @@ -2,44 +2,53 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] # if os_info['platform'] == 'windows': -# return {'return':1, 'error': 'Windows is not supported in this script yet'} +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} env = i['env'] meta = i['meta'] script_path = i['run_script_input']['path'] - if env.get('CM_GIT_CHECKOUT', '' ) == '' and env.get('CM_GIT_URL', '' ) == '' and env.get('CM_VERSION','') == '': - # if custom checkout and url parameters are not set and CM_VERSION is not specified + if env.get('CM_GIT_CHECKOUT', '') == '' and env.get( + 'CM_GIT_URL', '') == '' and env.get('CM_VERSION', '') == '': + # if custom checkout and url parameters are not set and CM_VERSION is + # not specified env['CM_VERSION'] = "master" env["CM_GIT_CHECKOUT"] = "master" env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" - elif env.get('CM_GIT_CHECKOUT', '' ) != '' and env.get('CM_TMP_GIT_CHECKOUT', '' ) != '' and env.get('CM_GIT_CHECKOUT', '' )!=env.get('CM_TMP_GIT_CHECKOUT', '' ): - # if checkout branch is assigned inside version and custom branch is also specified - return {"return":1, "error":"Conflicting branches between version assigned and user specified."} - elif env.get('CM_GIT_URL', '' ) != '' and env.get('CM_TMP_GIT_URL', '' ) != '' and env.get('CM_GIT_URL', '' )!=env.get('CM_TMP_GIT_URL', '' ): - # if GIT URL is assigned inside version and custom branch is also specified - return {"return":1, "error":"Conflicting URL's between version assigned and user specified."} - - if env.get('CM_VERSION','') == '': + elif env.get('CM_GIT_CHECKOUT', '') != '' and env.get('CM_TMP_GIT_CHECKOUT', '') != '' and env.get('CM_GIT_CHECKOUT', '') != env.get('CM_TMP_GIT_CHECKOUT', ''): + # if checkout branch is assigned inside version and custom branch is + # also specified + return { + "return": 1, "error": "Conflicting branches between version assigned and user specified."} + elif env.get('CM_GIT_URL', '') != '' and env.get('CM_TMP_GIT_URL', '') != '' and env.get('CM_GIT_URL', '') != env.get('CM_TMP_GIT_URL', ''): + # if GIT URL is assigned inside version and custom branch is also + # specified + return { + "return": 1, "error": "Conflicting URL's between version assigned and user specified."} + + if env.get('CM_VERSION', '') == '': env['CM_VERSION'] = "custom" # check whether branch and url is specified, # if not try to assign the values specified in version parameters, - # if version parameters does not have the value to a parameter, set the default one - if env.get('CM_GIT_CHECKOUT', '' ) == '': - if env.get('CM_TMP_GIT_CHECKOUT', '' ) != '': + # if version parameters does not have the value to a parameter, set the + # default one + if env.get('CM_GIT_CHECKOUT', '') == '': + if env.get('CM_TMP_GIT_CHECKOUT', '') != '': env["CM_GIT_CHECKOUT"] = env["CM_TMP_GIT_CHECKOUT"] else: env["CM_GIT_CHECKOUT"] = "master" - if env.get('CM_GIT_URL', '' ) == '': - if env.get('CM_TMP_GIT_URL', '' ) != '': + if env.get('CM_GIT_URL', '') == '': + if env.get('CM_TMP_GIT_URL', '') != '': env["CM_GIT_URL"] = env["CM_TMP_GIT_URL"] else: env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" @@ -54,29 +63,30 @@ def preprocess(i): env['CM_GIT_RECURSE_SUBMODULES'] = '' submodules = [] possible_submodules = { - "gn": "third_party/gn", - "pybind": "third_party/pybind", - "deeplearningexamples":"language/bert/DeepLearningExamples", - "3d-unet":"vision/medical_imaging/3d-unet-brats19/nnUnet" - } + "gn": "third_party/gn", + "pybind": "third_party/pybind", + "deeplearningexamples": "language/bert/DeepLearningExamples", + "3d-unet": "vision/medical_imaging/3d-unet-brats19/nnUnet" + } for submodule in possible_submodules: - env_name = submodule.upper().replace("-","_") - if env.get("CM_SUBMODULE_"+env_name) == "yes": + env_name = submodule.upper().replace("-", "_") + if env.get("CM_SUBMODULE_" + env_name) == "yes": submodules.append(possible_submodules[submodule]) env['CM_GIT_SUBMODULES'] = ",".join(submodules) if env.get('CM_GIT_PATCH_FILENAME', '') != '': patch_file_name = env['CM_GIT_PATCH_FILENAME'] - env['CM_GIT_PATCH_FILEPATHS'] = os.path.join(script_path, 'patch', patch_file_name) + env['CM_GIT_PATCH_FILEPATHS'] = os.path.join( + script_path, 'patch', patch_file_name) - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') versions = meta['versions'] - if need_version!='' and not need_version in versions: + if need_version != '' and not need_version in versions: env['CM_GIT_CHECKOUT'] = need_version - return {'return':0} + return {'return': 0} def postprocess(i): @@ -85,21 +95,32 @@ def postprocess(i): state = i['state'] inference_root = env['CM_MLPERF_INFERENCE_SOURCE'] - env['CM_MLPERF_INFERENCE_VISION_PATH'] = os.path.join(inference_root, 'vision') - env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] = os.path.join(inference_root, 'vision', 'classification_and_detection') - env['CM_MLPERF_INFERENCE_BERT_PATH'] = os.path.join(inference_root, 'language', 'bert') - env['CM_MLPERF_INFERENCE_GPTJ_PATH'] = os.path.join(inference_root, 'language', 'gpt-j') - env['CM_MLPERF_INFERENCE_RNNT_PATH'] = os.path.join(inference_root, 'speech_recognition', 'rnnt') - env['CM_MLPERF_INFERENCE_DLRM_PATH'] = os.path.join(inference_root, 'recommendation', 'dlrm') - env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'] = os.path.join(inference_root, 'recommendation', 'dlrm_v2') - env['CM_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join(inference_root,'vision', 'medical_imaging', '3d-unet-kits19') + env['CM_MLPERF_INFERENCE_VISION_PATH'] = os.path.join( + inference_root, 'vision') + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] = os.path.join( + inference_root, 'vision', 'classification_and_detection') + env['CM_MLPERF_INFERENCE_BERT_PATH'] = os.path.join( + inference_root, 'language', 'bert') + env['CM_MLPERF_INFERENCE_GPTJ_PATH'] = os.path.join( + inference_root, 'language', 'gpt-j') + env['CM_MLPERF_INFERENCE_RNNT_PATH'] = os.path.join( + inference_root, 'speech_recognition', 'rnnt') + env['CM_MLPERF_INFERENCE_DLRM_PATH'] = os.path.join( + inference_root, 'recommendation', 'dlrm') + env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'] = os.path.join( + inference_root, 'recommendation', 'dlrm_v2') + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join( + inference_root, 'vision', 'medical_imaging', '3d-unet-kits19') env['CM_GET_DEPENDENT_CACHED_PATH'] = inference_root # 20221024: we save and restore env in the main script and can clean env here for determinism # if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] - env['+PYTHONPATH']=[] - env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) + env['+PYTHONPATH'] = [] + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], + 'python')) if os.path.exists(os.path.join(inference_root, "loadgen", "VERSION.txt")): with open(os.path.join(inference_root, "loadgen", "VERSION.txt")) as f: @@ -107,20 +128,26 @@ def postprocess(i): env['CM_MLPERF_INFERENCE_SOURCE_VERSION'] = version_info if env.get('CM_GET_MLPERF_IMPLEMENTATION_ONLY', '') == "yes": - return {'return':0} - - env['CM_MLPERF_INFERENCE_CONF_PATH'] = os.path.join(inference_root, 'mlperf.conf') - env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) + return {'return': 0} - valid_models = get_valid_models(env['CM_MLPERF_LAST_RELEASE'], env['CM_MLPERF_INFERENCE_SOURCE']) + env['CM_MLPERF_INFERENCE_CONF_PATH'] = os.path.join( + inference_root, 'mlperf.conf') + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + 'tools', + 'submission')) + valid_models = get_valid_models( + env['CM_MLPERF_LAST_RELEASE'], + env['CM_MLPERF_INFERENCE_SOURCE']) state['CM_MLPERF_INFERENCE_MODELS'] = valid_models if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': - env['CM_VERSION'] += "-git-"+env['CM_GIT_REPO_CURRENT_HASH'] + env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] - return {'return':0, 'version': env['CM_VERSION']} + return {'return': 0, 'version': env['CM_VERSION']} def get_valid_models(mlperf_version, mlperf_path): @@ -131,9 +158,10 @@ def get_valid_models(mlperf_version, mlperf_path): sys.path.append(submission_checker_dir) - if not os.path.exists(os.path.join(submission_checker_dir, "submission_checker.py")): - shutil.copy(os.path.join(submission_checker_dir,"submission-checker.py"), os.path.join(submission_checker_dir, - "submission_checker.py")) + if not os.path.exists(os.path.join( + submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir, "submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) import submission_checker as checker diff --git a/script/get-mlperf-inference-submission-dir/customize.py b/script/get-mlperf-inference-submission-dir/customize.py index 92fb3735ce..f0f95f62fd 100644 --- a/script/get-mlperf-inference-submission-dir/customize.py +++ b/script/get-mlperf-inference-submission-dir/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,17 +14,19 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR','') == '': + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': if not os.path.exists("mlperf-inference-submission"): os.mkdir("mlperf-inference-submission") - env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join(os.getcwd(), "mlperf-inference-submission") + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( + os.getcwd(), "mlperf-inference-submission") + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-sut-configs/customize.py b/script/get-mlperf-inference-sut-configs/customize.py index 27461d7b24..8adbdc3b8c 100644 --- a/script/get-mlperf-inference-sut-configs/customize.py +++ b/script/get-mlperf-inference-sut-configs/customize.py @@ -3,19 +3,21 @@ import yaml import shutil + def postprocess(i): env = i['env'] state = i['state'] if env.get('CM_HW_NAME', '') == '': - host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") + host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") env['CM_HW_NAME'] = host_name device = env.get('CM_MLPERF_DEVICE', 'cpu') backend = env.get('CM_MLPERF_BACKEND', 'default') if env.get('CM_MLPERF_BACKEND_VERSION', '') != '': - backend_version = "v" + env.get('CM_MLPERF_BACKEND_VERSION') if not env.get('CM_MLPERF_BACKEND_VERSION').startswith("v") else env.get('CM_MLPERF_BACKEND_VERSION') + backend_version = "v" + env.get('CM_MLPERF_BACKEND_VERSION') if not env.get( + 'CM_MLPERF_BACKEND_VERSION').startswith("v") else env.get('CM_MLPERF_BACKEND_VERSION') else: backend_version = 'vdefault' @@ -24,37 +26,94 @@ def postprocess(i): if 'CM_SUT_CONFIG_PATH' not in state: state['CM_SUT_CONFIG_PATH'] = {} - implementation_string = env['CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX'] if env.get('CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX', '') != '' else env.get('CM_MLPERF_IMPLEMENTATION', 'default') + implementation_string = env['CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX'] if env.get( + 'CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX', '') != '' else env.get( + 'CM_MLPERF_IMPLEMENTATION', 'default') run_config = [] - for i in range(1,6): + for i in range(1, 6): if env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}', '') != '': - run_config.append(env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}')) + run_config.append( + env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}')) - run_config_string = "_".join(run_config) if run_config else 'default_config' + run_config_string = "_".join( + run_config) if run_config else 'default_config' env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] = run_config_string if env.get('CM_SUT_NAME', '') == '': - env['CM_SUT_NAME'] = env['CM_HW_NAME'] + "-" + implementation_string + "-" + device + "-" + backend + "-" + backend_version + "-" + run_config_string + env['CM_SUT_NAME'] = env['CM_HW_NAME'] + "-" + implementation_string + "-" + \ + device + "-" + backend + "-" + backend_version + "-" + run_config_string - if env.get('CM_SUT_CONFIGS_PATH','') != '': + if env.get('CM_SUT_CONFIGS_PATH', '') != '': path = env['CM_SUT_CONFIGS_PATH'] elif env.get('CM_SUT_USE_EXTERNAL_CONFIG_REPO', '') == "yes": path = env.get('CM_GIT_CHECKOUT_PATH') else: path = os.path.join(os.getcwd(), "configs") - config_path = os.path.join(path, env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "framework-version-"+backend_version, run_config_string + "-config.yaml") + config_path = os.path.join( + path, + env['CM_HW_NAME'], + implementation_string + + "-implementation", + device + + "-device", + backend + + "-framework", + "framework-version-" + + backend_version, + run_config_string + + "-config.yaml") if not os.path.exists(config_path): os.makedirs(os.path.dirname(config_path), exist_ok=True) - config_path_default = os.path.join(path, env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "default-config.yaml") + config_path_default = os.path.join( + path, + env['CM_HW_NAME'], + implementation_string + + "-implementation", + device + + "-device", + backend + + "-framework", + "default-config.yaml") if os.path.exists(config_path_default): shutil.copy(config_path_default, config_path) else: - src_config_full = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "framework-version-"+backend_version, run_config_string + "-config.yaml") - src_config_partial1 = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "framework-version-"+backend_version, "default-config.yaml") - src_config_partial2 = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "framework-version-default", "default-config.yaml") - src_config_partial3 = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", env['CM_HW_NAME'], implementation_string+"-implementation", device+"-device", backend+"-framework", "default-config.yaml") + src_config_full = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "framework-version-" + backend_version, + run_config_string + "-config.yaml") + src_config_partial1 = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "framework-version-" + backend_version, + "default-config.yaml") + src_config_partial2 = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "framework-version-default", + "default-config.yaml") + src_config_partial3 = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "default-config.yaml") if os.path.exists(src_config_full): shutil.copy(src_config_full, config_path) elif os.path.exists(src_config_partial1): @@ -64,14 +123,22 @@ def postprocess(i): elif os.path.exists(src_config_partial3): shutil.copy(src_config_partial3, config_path) else: - print(f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") - src_config = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "default", "config.yaml") + print( + f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") + src_config = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + "default", + "config.yaml") shutil.copy(src_config, config_path) - os.makedirs(os.path.dirname(config_path_default), exist_ok=True) + os.makedirs( + os.path.dirname(config_path_default), + exist_ok=True) shutil.copy(src_config, config_path_default) - state['CM_SUT_CONFIG'][env['CM_SUT_NAME']] = yaml.load(open(config_path), Loader=yaml.SafeLoader) + state['CM_SUT_CONFIG'][env['CM_SUT_NAME']] = yaml.load( + open(config_path), Loader=yaml.SafeLoader) state['CM_SUT_CONFIG_NAME'] = env['CM_SUT_NAME'] state['CM_SUT_CONFIG_PATH'][env['CM_SUT_NAME']] = config_path - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py index d0a0fed54a..6145d96cbd 100644 --- a/script/get-mlperf-inference-sut-description/customize.py +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -3,6 +3,7 @@ import json import shutil + def preprocess(i): env = i['env'] state = i['state'] @@ -12,7 +13,7 @@ def preprocess(i): auto_detected_hw_name = False if env.get('CM_HW_NAME', '') == '': - host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") + host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") env['CM_HW_NAME'] = host_name auto_detected_hw_name = True @@ -33,7 +34,7 @@ def preprocess(i): sut = hw_name + sut_suffix script_path = i['run_script_input']['path'] - sut_desc_path=env['CM_MLPERF_INFERENCE_SUT_DESC_PATH'] + sut_desc_path = env['CM_MLPERF_INFERENCE_SUT_DESC_PATH'] sut_path = os.path.join(sut_desc_path, "suts", sut + ".json") if os.path.exists(sut_path) and env.get('CM_SUT_DESC_CACHE', '') == "yes": @@ -48,8 +49,10 @@ def preprocess(i): if not os.path.exists(os.path.dirname(hw_path)): os.makedirs(os.path.dirname(hw_path)) if not os.path.exists(hw_path): - default_hw_path = os.path.join(script_path, "hardware", "default.json") - print("HW description file for " + hw_name + " not found. Copying from default!!!") + default_hw_path = os.path.join( + script_path, "hardware", "default.json") + print("HW description file for " + hw_name + + " not found. Copying from default!!!") shutil.copy(default_hw_path, hw_path) state['CM_HW_META'] = json.load(open(hw_path)) @@ -76,50 +79,55 @@ def preprocess(i): # If Windows and os_name_string is empty, rebuild it: - if os_name_string=='' and os_info['platform'] == 'windows': + if os_name_string == '' and os_info['platform'] == 'windows': import platform os_name_string = str(platform.platform()) state['CM_SUT_META']['operating_system'] = os_name_string - state['CM_SUT_META']['other_software_stack'] = "Python: " + python_version + ", " + compiler + "-" + compiler_version + state['CM_SUT_META']['other_software_stack'] = "Python: " + \ + python_version + ", " + compiler + "-" + compiler_version - if state['CM_SUT_META'].get('system_name','') == '': + if state['CM_SUT_META'].get('system_name', '') == '': system_name = env.get('CM_MLPERF_SYSTEM_NAME') if not system_name: system_name = env.get('CM_HW_NAME') if system_name: if auto_detected_hw_name: - system_name+=" (auto detected)" + system_name += " (auto detected)" else: system_name = " (generic)" state['CM_SUT_META']['system_name'] = system_name # Add GPU info - if env.get('CM_MLPERF_DEVICE','') == "gpu" or env.get('CM_MLPERF_DEVICE','') == "cuda": - if env.get('CM_CUDA_VERSION','') != '': - cuda_version = " , CUDA " + env['CM_CUDA_VERSION'] + if env.get('CM_MLPERF_DEVICE', '') == "gpu" or env.get( + 'CM_MLPERF_DEVICE', '') == "cuda": + if env.get('CM_CUDA_VERSION', '') != '': + cuda_version = " , CUDA " + env['CM_CUDA_VERSION'] state['CM_SUT_META']['other_software_stack'] += cuda_version if 'cm_cuda_device_prop' in state: state['CM_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] - state['CM_SUT_META']['accelerator_memory_capacity'] = str(int(state['cm_cuda_device_prop']['Global memory'])/(1024*1024.0*1024)) + " GB" + state['CM_SUT_META']['accelerator_memory_capacity'] = str(int( + state['cm_cuda_device_prop']['Global memory']) / (1024 * 1024.0 * 1024)) + " GB" state['CM_SUT_META']['accelerator_model_name'] = state['cm_cuda_device_prop']['GPU Name'] num_accelerators = env.get('CM_CUDA_NUM_DEVICES', "1") state['CM_SUT_META']['accelerators_per_node'] = num_accelerators if state['CM_SUT_META'].get('host_processor_core_count', '') == '': - physical_cores_per_node = env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET') + physical_cores_per_node = env.get( + 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET') - if physical_cores_per_node == None or physical_cores_per_node == '': + if physical_cores_per_node is None or physical_cores_per_node == '': if os_info['platform'] == 'windows': physical_cores_per_node = '1' state['CM_SUT_META']['host_processor_core_count'] = physical_cores_per_node if state['CM_SUT_META'].get('host_processor_model_name', '') == '': - state['CM_SUT_META']['host_processor_model_name'] = env.get('CM_HOST_CPU_MODEL_NAME', 'undefined') + state['CM_SUT_META']['host_processor_model_name'] = env.get( + 'CM_HOST_CPU_MODEL_NAME', 'undefined') if state['CM_SUT_META'].get('host_processors_per_node', '') == '': x = env.get('CM_HOST_CPU_SOCKETS', '') if x == '' and os_info['platform'] == 'windows': @@ -128,40 +136,45 @@ def preprocess(i): if state['CM_SUT_META'].get('host_processor_caches', '') == '': state['CM_SUT_META']['host_processor_caches'] = "L1d cache: " + env.get('CM_HOST_CPU_L1D_CACHE_SIZE', ' ') + \ - ", L1i cache: " + env.get('CM_HOST_CPU_L1I_CACHE_SIZE', ' ') + ", L2 cache: " + \ - env.get('CM_HOST_CPU_L2_CACHE_SIZE', ' ') + \ - ", L3 cache: " + env.get('CM_HOST_CPU_L3_CACHE_SIZE', ' ') + ", L1i cache: " + env.get('CM_HOST_CPU_L1I_CACHE_SIZE', ' ') + ", L2 cache: " + \ + env.get('CM_HOST_CPU_L2_CACHE_SIZE', ' ') + \ + ", L3 cache: " + env.get('CM_HOST_CPU_L3_CACHE_SIZE', ' ') if state['CM_SUT_META'].get('host_processor_frequency', '') == '': - state['CM_SUT_META']['host_processor_frequency'] = env.get('CM_HOST_CPU_MAX_MHZ') if env.get('CM_HOST_CPU_MAX_MHZ', '') != '' else 'undefined' + state['CM_SUT_META']['host_processor_frequency'] = env.get( + 'CM_HOST_CPU_MAX_MHZ') if env.get('CM_HOST_CPU_MAX_MHZ', '') != '' else 'undefined' if state['CM_SUT_META'].get('host_memory_capacity', '') == '': - state['CM_SUT_META']['host_memory_capacity'] = env.get('CM_HOST_MEMORY_CAPACITY') if env.get('CM_HOST_MEMORY_CAPACITY', '') != '' else 'undefined' + state['CM_SUT_META']['host_memory_capacity'] = env.get( + 'CM_HOST_MEMORY_CAPACITY') if env.get('CM_HOST_MEMORY_CAPACITY', '') != '' else 'undefined' if state['CM_SUT_META'].get('host_storage_capacity', '') == '': - state['CM_SUT_META']['host_storage_capacity'] = env.get('CM_HOST_DISK_CAPACITY') if env.get('CM_HOST_DISK_CAPACITY', '') != '' else 'undefined' + state['CM_SUT_META']['host_storage_capacity'] = env.get( + 'CM_HOST_DISK_CAPACITY') if env.get('CM_HOST_DISK_CAPACITY', '') != '' else 'undefined' if 'CM_SUT_SW_NOTES' in env: sw_notes = env['CM_SUT_SW_NOTES'] else: sw_notes = '' state['CM_SUT_META']['sw_notes'] = sw_notes - if env.get('CM_SUDO_USER', '') == "yes" and env.get('CM_HOST_OS_TYPE', 'linux'): - r = i['automation'].run_native_script({'run_script_input':i['run_script_input'], 'env':env, 'script_name':'detect_memory'}) - if r['return']>0: + if env.get('CM_SUDO_USER', '') == "yes" and env.get( + 'CM_HOST_OS_TYPE', 'linux'): + r = i['automation'].run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect_memory'}) + if r['return'] > 0: return r if env.get('CM_HOST_MEM_INFO', '') != '': state['CM_SUT_META']['host_memory_configuration'] = env['CM_HOST_MEM_INFO'] - state['CM_SUT_META'] = dict(sorted(state['CM_SUT_META'].items())) sut_file = open(sut_path, "w") - json.dump(state['CM_SUT_META'], sut_file, indent = 4) + json.dump(state['CM_SUT_META'], sut_file, indent=4) sut_file.close() - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-sut-description/get_memory_info.py b/script/get-mlperf-inference-sut-description/get_memory_info.py index 29e2058000..27d0f870ad 100644 --- a/script/get-mlperf-inference-sut-description/get_memory_info.py +++ b/script/get-mlperf-inference-sut-description/get_memory_info.py @@ -10,7 +10,7 @@ parsedObj = json.loads(str(parser)) memory = [] - ind = 0; + ind = 0 needed_global_keys = ['Speed', 'Configured Memory Speed', 'Type'] added_global_keys = [] needed_keys = ['Size', 'Rank'] @@ -20,7 +20,7 @@ ecc_value = item['props']['Error Correction Type']['values'][0] if not ecc_value or 'None' in ecc_value: ecc_value = "No ECC" - memory.append({"info": ['Error Correction Type: ' + ecc_value ]}) + memory.append({"info": ['Error Correction Type: ' + ecc_value]}) ind += 1 continue if item['name'] != 'Memory Device': @@ -44,16 +44,18 @@ for key in item['props']: if key in needed_global_keys and key not in added_global_keys: - memory[0]['info'].append(f'{key}: {";".join(item["props"][key]["values"])}') + memory[0]['info'].append( + f'{key}: {";".join(item["props"][key]["values"])}') added_global_keys.append(key) elif key in needed_keys: - memory[ind]['info'].append(f'{key}: {";".join(item["props"][key]["values"])}') - ind+=1 + memory[ind]['info'].append( + f'{key}: {";".join(item["props"][key]["values"])}') + ind += 1 meminfo = [] for item in memory: - meminfo.append( "; ".join(item['info'])) + meminfo.append("; ".join(item['info'])) - meminfo_string =", ".join(meminfo) + meminfo_string = ", ".join(meminfo) with open("tmp-run-env.out", "w") as f: f.write(f"CM_HOST_MEM_INFO={meminfo_string}") diff --git a/script/get-mlperf-inference-utils/customize.py b/script/get-mlperf-inference-utils/customize.py index 8c4bbbbbf4..6f7f0a49b8 100644 --- a/script/get-mlperf-inference-utils/customize.py +++ b/script/get-mlperf-inference-utils/customize.py @@ -2,6 +2,7 @@ import os import sys + def preprocess(i): os_info = i['os_info'] @@ -16,17 +17,19 @@ def preprocess(i): utils_path = i['run_script_input']['path'] - env['+PYTHONPATH'] = [ utils_path ] + env['+PYTHONPATH'] = [utils_path] - submission_checker_dir = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission") + submission_checker_dir = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission") sys.path.append(submission_checker_dir) sys.path.append(utils_path) - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-inference-utils/mlperf_utils.py b/script/get-mlperf-inference-utils/mlperf_utils.py index 071dd26f8f..1e60cafc15 100644 --- a/script/get-mlperf-inference-utils/mlperf_utils.py +++ b/script/get-mlperf-inference-utils/mlperf_utils.py @@ -4,7 +4,8 @@ from log_parser import MLPerfLog -def get_result_from_log(version, model, scenario, result_path, mode, inference_src_version = None): +def get_result_from_log(version, model, scenario, + result_path, mode, inference_src_version=None): config = checker.Config( version, @@ -13,7 +14,7 @@ def get_result_from_log(version, model, scenario, result_path, mode, inference_s skip_power_check=False, ) mlperf_model = config.get_mlperf_model(model) - #scenario = checker.SCENARIO_MAPPING[scenario] + # scenario = checker.SCENARIO_MAPPING[scenario] result = '' power_result = None @@ -24,11 +25,16 @@ def get_result_from_log(version, model, scenario, result_path, mode, inference_s if inference_src_version: version_tuple = tuple(map(int, inference_src_version.split('.'))) - if version_tuple and version_tuple >= (4,1,22): - result_ = checker.get_performance_metric(config, mlperf_model, result_path, scenario) + if version_tuple and version_tuple >= (4, 1, 22): + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario) else: - result_ = checker.get_performance_metric(config, mlperf_model, result_path, scenario, None, None, has_power) - mlperf_log = MLPerfLog(os.path.join(result_path, "mlperf_log_detail.txt")) + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario, None, None, has_power) + mlperf_log = MLPerfLog( + os.path.join( + result_path, + "mlperf_log_detail.txt")) if ( "result_validity" not in mlperf_log.get_keys() or mlperf_log["result_validity"] != "VALID" @@ -38,20 +44,21 @@ def get_result_from_log(version, model, scenario, result_path, mode, inference_s valid['performance'] = True if "stream" in scenario.lower(): - result = result_ / 1000000 #convert to milliseconds + result = result_ / 1000000 # convert to milliseconds else: result = result_ result = str(round(result, 3)) if has_power: - power_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric(config, scenario, result_path, True, result_) + power_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + config, scenario, result_path, True, result_) power_result = f"{round(power_metric,3)},{round(avg_power_efficiency,3)}" valid['power'] = power_valid - elif mode == "accuracy" and os.path.exists(os.path.join(result_path, 'accuracy.txt')): - acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric(config, mlperf_model, result_path) + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, result_path) valid['accuracy'] = acc_valid if len(acc_results) == 1: @@ -66,6 +73,7 @@ def get_result_from_log(version, model, scenario, result_path, mode, inference_s return result, valid, power_result + def get_accuracy_metric(config, model, path): import re @@ -77,25 +85,24 @@ def get_accuracy_metric(config, model, path): acc_upper_limit = config.get_accuracy_upper_limit(model) patterns = [] acc_targets = [] - acc_limits = [None] * (len(target)//2) - up_patterns = [None] * (len(target)//2) + acc_limits = [None] * (len(target) // 2) + up_patterns = [None] * (len(target) // 2) acc_types = [] if acc_upper_limit is not None: acc_limit_check = True for ii in range(0, len(target), 2): - acc_type1,tmp = target[ii:ii+2] + acc_type1, tmp = target[ii:ii + 2] for i in range(0, len(acc_upper_limit), 2): - acc_type, acc_target = acc_upper_limit[i:i+2] + acc_type, acc_target = acc_upper_limit[i:i + 2] if acc_type != acc_type1: continue - acc_limits[ii//2] = acc_target - up_patterns[ii//2] = checker.ACC_PATTERN[acc_type] - + acc_limits[ii // 2] = acc_target + up_patterns[ii // 2] = checker.ACC_PATTERN[acc_type] for i in range(0, len(target), 2): - acc_type, acc_target = target[i:i+2] + acc_type, acc_target = target[i:i + 2] acc_types.append(acc_type) patterns.append(checker.ACC_PATTERN[acc_type]) acc_targets.append(acc_target) @@ -104,7 +111,8 @@ def get_accuracy_metric(config, model, path): acc_results = {} with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: for line in f: - for i, (pattern, acc_target, acc_type) in enumerate(zip(patterns, acc_targets, acc_types)): + for i, (pattern, acc_target, acc_type) in enumerate( + zip(patterns, acc_targets, acc_types)): m = re.match(pattern, line) if m: acc = m.group(1) @@ -116,31 +124,34 @@ def get_accuracy_metric(config, model, path): acc_seen[i] = True elif acc is not None: all_accuracy_valid = False - #log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) + # log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) if i == 0 and acc: result_acc = acc acc = None if acc_upper_limit is not None: - for i, (pattern, acc_limit) in enumerate(zip(up_patterns, acc_limits)): + for i, (pattern, acc_limit) in enumerate( + zip(up_patterns, acc_limits)): if not pattern: continue m = re.match(pattern, line) if m: acc = m.group(1) - if acc is not None and acc_upper_limit is not None and float(acc) > acc_limit: + if acc is not None and acc_upper_limit is not None and float( + acc) > acc_limit: acc_limit_check = False - #log.warning("%s accuracy not met: upper limit=%f, found=%s", path, acc_limit, acc) + # log.warning("%s accuracy not met: upper limit=%f, found=%s", path, acc_limit, acc) acc = None if all(acc_seen): - break; + break is_valid = all_accuracy_valid & all(acc_seen) if acc_upper_limit is not None: is_valid &= acc_limit_check - return is_valid, acc_results, acc_targets, acc_limits -def get_result_string(version, model, scenario, result_path, has_power, sub_res, division="open", system_json=None, model_precision="fp32", inference_src_version = None): + +def get_result_string(version, model, scenario, result_path, has_power, sub_res, + division="open", system_json=None, model_precision="fp32", inference_src_version=None): config = checker.Config( version, @@ -163,21 +174,25 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, if inference_src_version: version_tuple = tuple(map(int, inference_src_version.split('.'))) - if version_tuple and version_tuple >= (4,1,22): - performance_result = checker.get_performance_metric(config, mlperf_model, performance_path, scenario) + if version_tuple and version_tuple >= (4, 1, 22): + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario) else: - performance_result = checker.get_performance_metric(config, mlperf_model, performance_path, scenario, None, None) + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario, None, None) if "stream" in scenario.lower(): - performance_result_ = performance_result / 1000000 #convert to milliseconds + performance_result_ = performance_result / 1000000 # convert to milliseconds else: performance_result_ = performance_result result['performance'] = round(performance_result_, 3) if scenario != effective_scenario: - inferred, inferred_result = checker.get_inferred_result(scenario, effective_scenario, performance_result, mlperf_log, config, False) + inferred, inferred_result = checker.get_inferred_result( + scenario, effective_scenario, performance_result, mlperf_log, config, False) if has_power: - is_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric(config, scenario, performance_path, True, performance_result) + is_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + config, scenario, performance_path, True, performance_result) if "stream" in scenario.lower(): power_metric_unit = "milliJoules" else: @@ -189,22 +204,25 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, result['power'] = power_result result['power_efficiency'] = power_efficiency_result - compliance_list = [ "TEST01", "TEST04", "TEST06" ] + compliance_list = ["TEST01", "TEST04", "TEST06"] if division == "closed": for test in compliance_list: test_path = os.path.join(result_path, test) - if os.path.exists(test_path): #We dont consider missing test folders now - submission checker will do that - #test_pass = checker.check_compliance_dir(test_path, mlperf_model, scenario, config, "closed", system_json, sub_res) - test_pass = checker.check_compliance_perf_dir(test_path) if test != "TEST06" else True - if test_pass and test in [ "TEST01", "TEST06" ]: - #test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) - pass # accuracy truncation script is done after submission generation. We assume here that it'll pass + if os.path.exists( + test_path): # We dont consider missing test folders now - submission checker will do that + # test_pass = checker.check_compliance_dir(test_path, mlperf_model, scenario, config, "closed", system_json, sub_res) + test_pass = checker.check_compliance_perf_dir( + test_path) if test != "TEST06" else True + if test_pass and test in ["TEST01", "TEST06"]: + # test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) + pass # accuracy truncation script is done after submission generation. We assume here that it'll pass if test_pass: result[test] = "passed" else: result[test] = "failed" - acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric(config, mlperf_model, accuracy_path) + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, accuracy_path) result_field = checker.RESULT_FIELD[effective_scenario] @@ -227,7 +245,7 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, if len(accuracy_results) == 1: accuracy_result = accuracy_results[0] else: - accuracy_result = "(" + ", ".join(accuracy_results)+")" + accuracy_result = "(" + ", ".join(accuracy_results) + ")" result['accuracy'] = accuracy_result result_string = f"\n\n## Results\n" @@ -238,13 +256,20 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, if has_power: result_string += "\n### Power Results \n" + power_result_string - return result_string, result -def get_result_table(results): +def get_result_table(results): - headers = ["Model", "Scenario", "Accuracy", "Throughput", "Latency (in ms)", "Power Efficiency (in samples/J)", "TEST01", "TEST04"] + headers = [ + "Model", + "Scenario", + "Accuracy", + "Throughput", + "Latency (in ms)", + "Power Efficiency (in samples/J)", + "TEST01", + "TEST04"] table = [] for model in results: for scenario in results[model]: @@ -254,7 +279,7 @@ def get_result_table(results): if results[model][scenario].get('accuracy'): val = str(results[model][scenario]['accuracy']) if not results[model][scenario].get('accuracy_valid', True): - val = "X "+val + val = "X " + val row.append(val) else: row.append("-") @@ -265,38 +290,52 @@ def get_result_table(results): if float(results[model][scenario]['performance']) == 0: row.append("-") elif scenario.lower() == "singlestream": - val_qps = str(round(1000/float(results[model][scenario]['performance']), 3)) - if not results[model][scenario].get('performance_valid', True): # we explicitly mark invalid results - val_qps = "X "+val_qps + val_qps = str( + round( + 1000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): # we explicitly mark invalid results + val_qps = "X " + val_qps row.append(val_qps) elif scenario.lower() == "multistream": - val_qps = str(round(8000/float(results[model][scenario]['performance']), 3)) - if not results[model][scenario].get('performance_valid', True): - val_qps = "X "+val_qps + val_qps = str( + round( + 8000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): + val_qps = "X " + val_qps row.append(val_qps) val = str(results[model][scenario]['performance']) - if not results[model][scenario].get('performance_valid', True): - val = "X "+val + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val row.append(val) else: val = str(results[model][scenario]['performance']) - if not results[model][scenario].get('performance_valid', True): - val = "X "+val + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val row.append(val) row.append("-") val1 = results[model][scenario].get('TEST01') - #val2 = results[model][scenario].get('TEST05') + # val2 = results[model][scenario].get('TEST05') val3 = results[model][scenario].get('TEST04') - #if results[model][scenario].get('power','') != '': + # if results[model][scenario].get('power','') != '': # row.append(results[model][scenario]['power']) - if results[model][scenario].get('power_efficiency','') != '': + if results[model][scenario].get('power_efficiency', '') != '': val = str(results[model][scenario]['power_efficiency']) if not results[model][scenario].get('power_valid', True): - val = "X "+val + val = "X " + val row.append(val) - elif val1 or val3: #Don't output unless there are any further column data + elif val1 or val3: # Don't output unless there are any further column data row.append(None) if val1: diff --git a/script/get-mlperf-logging/customize.py b/script/get-mlperf-logging/customize.py index ac1a2a641e..fea70fc389 100644 --- a/script/get-mlperf-logging/customize.py +++ b/script/get-mlperf-logging/customize.py @@ -2,6 +2,7 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] @@ -11,11 +12,12 @@ def preprocess(i): env['CM_MLPERF_LOGGING_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - env['+PYTHONPATH'] = [ env['CM_MLPERF_LOGGING_SRC_PATH'] ] + env['+PYTHONPATH'] = [env['CM_MLPERF_LOGGING_SRC_PATH']] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-power-dev/customize.py b/script/get-mlperf-power-dev/customize.py index 50afb3ba49..d091365bb7 100644 --- a/script/get-mlperf-power-dev/customize.py +++ b/script/get-mlperf-power-dev/customize.py @@ -2,11 +2,12 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] - return {'return':0} + return {'return': 0} def postprocess(i): @@ -16,6 +17,6 @@ def postprocess(i): env['CM_VERSION'] = "master" if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': - env['CM_VERSION'] += "-git-"+env['CM_GIT_REPO_CURRENT_HASH'] + env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] - return {'return':0, 'version': env['CM_VERSION']} + return {'return': 0, 'version': env['CM_VERSION']} diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py index 93a162b980..a2e5243154 100644 --- a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py @@ -2,12 +2,14 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] # if os_info['platform'] == 'windows': -# return {'return':1, 'error': 'Windows is not supported in this script yet'} +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} env = i['env'] meta = i['meta'] @@ -15,7 +17,7 @@ def preprocess(i): if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' - return {'return':0} + return {'return': 0} def postprocess(i): @@ -28,31 +30,37 @@ def postprocess(i): env['CM_EEMBC_ENERGY_RUNNER_SRC_DATASETS'] = datasets_src_path # Get user directory for EEMBC runner path - home_directory = os.path.expanduser( '~' ) + home_directory = os.path.expanduser('~') sessions_path = os.path.join(home_directory, 'eembc', 'runner', 'sessions') - print ('') - print ('Path to EEMBC runner sessions: {}'.format(sessions_path)) + print('') + print('Path to EEMBC runner sessions: {}'.format(sessions_path)) env['CM_EEMBC_ENERGY_RUNNER_SESSIONS'] = sessions_path if not os.path.isdir(sessions_path): os.makedirs(sessions_path) - datasets_path = os.path.join(home_directory, 'eembc', 'runner', 'benchmarks', 'ulp-mlperf', 'datasets') + datasets_path = os.path.join( + home_directory, + 'eembc', + 'runner', + 'benchmarks', + 'ulp-mlperf', + 'datasets') - print ('') - print ('Path to EEMBC runner datasets: {}'.format(datasets_path)) + print('') + print('Path to EEMBC runner datasets: {}'.format(datasets_path)) if not os.path.isdir(datasets_path): os.makedirs(datasets_path) env['CM_EEMBC_ENERGY_RUNNER_DATASETS'] = datasets_path - print ('') - print ('Copying datasets to EEMBC user space ...') + print('') + print('Copying datasets to EEMBC user space ...') shutil.copytree(datasets_src_path, datasets_path, dirs_exist_ok=True) - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-tiny-src/customize.py b/script/get-mlperf-tiny-src/customize.py index f361f009f8..f07ee475db 100644 --- a/script/get-mlperf-tiny-src/customize.py +++ b/script/get-mlperf-tiny-src/customize.py @@ -2,12 +2,14 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] # if os_info['platform'] == 'windows': -# return {'return':1, 'error': 'Windows is not supported in this script yet'} +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} env = i['env'] meta = i['meta'] @@ -15,7 +17,7 @@ def preprocess(i): if 'CM_GIT_DEPTH' not in env: env['CM_GIT_DEPTH'] = '' - return {'return':0} + return {'return': 0} def postprocess(i): @@ -24,18 +26,30 @@ def postprocess(i): state = i['state'] env['CM_MLPERF_TINY_SRC'] = os.path.join(os.getcwd(), 'src') - env['CM_MLPERF_TINY_BENCHMARK'] = os.path.join(os.getcwd(), 'src', 'benchmark') - env['CM_MLPERF_TINY_DATASETS'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets') - env['CM_MLPERF_TINY_DATASETS_AD'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ad01') - env['CM_MLPERF_TINY_DATASETS_IC'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ic01') - env['CM_MLPERF_TINY_DATASETS_KWS'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01') - env['CM_MLPERF_TINY_DATASETS_KWS_OPEN'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01-open') - env['CM_MLPERF_TINY_DATASETS_VWW'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'vww01') - env['CM_MLPERF_TINY_TRAINING'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training') - env['CM_MLPERF_TINY_TRAINING_AD'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'anomaly_detection') - env['CM_MLPERF_TINY_TRAINING_IC'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'image_classification') - env['CM_MLPERF_TINY_TRAINING_KWS'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'keyword_spotting') - env['CM_MLPERF_TINY_TRAINING_VWW'] = os.path.join(os.getcwd(), 'src', 'benchmark', 'training', 'visual_wake_words') + env['CM_MLPERF_TINY_BENCHMARK'] = os.path.join( + os.getcwd(), 'src', 'benchmark') + env['CM_MLPERF_TINY_DATASETS'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets') + env['CM_MLPERF_TINY_DATASETS_AD'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ad01') + env['CM_MLPERF_TINY_DATASETS_IC'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ic01') + env['CM_MLPERF_TINY_DATASETS_KWS'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01') + env['CM_MLPERF_TINY_DATASETS_KWS_OPEN'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01-open') + env['CM_MLPERF_TINY_DATASETS_VWW'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'vww01') + env['CM_MLPERF_TINY_TRAINING'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training') + env['CM_MLPERF_TINY_TRAINING_AD'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'anomaly_detection') + env['CM_MLPERF_TINY_TRAINING_IC'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'image_classification') + env['CM_MLPERF_TINY_TRAINING_KWS'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'keyword_spotting') + env['CM_MLPERF_TINY_TRAINING_VWW'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'visual_wake_words') # 20221024: we save and restore env in the main script and can clean env here for determinism # if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] @@ -43,4 +57,4 @@ def postprocess(i): # env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) # env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-training-nvidia-code/customize.py b/script/get-mlperf-training-nvidia-code/customize.py index a58acfbad3..4a2d1f33d0 100644 --- a/script/get-mlperf-training-nvidia-code/customize.py +++ b/script/get-mlperf-training-nvidia-code/customize.py @@ -1,22 +1,25 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] - env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] = os.path.join(env['CM_MLPERF_TRAINING_RESULTS_PATH'], "NVIDIA") + env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] = os.path.join( + env['CM_MLPERF_TRAINING_RESULTS_PATH'], "NVIDIA") if not os.path.exists(env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH']): - return {'return': 1, 'error': f'Nvidia code path not found in the repository{env["CM_MLPERF_TRAINING_RESULTS_PATH"]}'} + return { + 'return': 1, 'error': f'Nvidia code path not found in the repository{env["CM_MLPERF_TRAINING_RESULTS_PATH"]}'} env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-mlperf-training-src/customize.py b/script/get-mlperf-training-src/customize.py index d2de607ed2..1af8336a89 100644 --- a/script/get-mlperf-training-src/customize.py +++ b/script/get-mlperf-training-src/customize.py @@ -2,6 +2,7 @@ import os import shutil + def preprocess(i): env = i['env'] @@ -12,11 +13,12 @@ def preprocess(i): patch_files = env['CM_GIT_PATCH_FILENAMES'].split(",") patch_files_full_paths = [] for patch_file in patch_files: - patch_file_full_path = os.path.join(script_path, "patch", patch_file) + patch_file_full_path = os.path.join( + script_path, "patch", patch_file) patch_files_full_paths.append(patch_file_full_path) env['CM_GIT_PATCH_FILEPATHS'] = ",".join(patch_files_full_paths) - return {'return':0} + return {'return': 0} def postprocess(i): @@ -24,4 +26,4 @@ def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/get-nvidia-mitten/customize.py b/script/get-nvidia-mitten/customize.py index cc9342a50a..983d681f4a 100644 --- a/script/get-nvidia-mitten/customize.py +++ b/script/get-nvidia-mitten/customize.py @@ -1,13 +1,15 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] # TBD - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -17,6 +19,4 @@ def postprocess(i): # TBD cur_dir = os.getcwd() - - - return {'return':0} + return {'return': 0} diff --git a/script/get-onnxruntime-prebuilt/customize.py b/script/get-onnxruntime-prebuilt/customize.py index 5fb09c9218..ebb13a72fb 100644 --- a/script/get-onnxruntime-prebuilt/customize.py +++ b/script/get-onnxruntime-prebuilt/customize.py @@ -1,25 +1,30 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - machine = env.get('CM_HOST_OS_MACHINE','') - if machine == '': machine = 'x86_64' - if machine == 'x86_64': machine = 'x64' + machine = env.get('CM_HOST_OS_MACHINE', '') + if machine == '': + machine = 'x86_64' + if machine == 'x86_64': + machine = 'x64' - hostos=env['CM_HOST_OS_TYPE'] + hostos = env['CM_HOST_OS_TYPE'] ext = '.tgz' - if hostos =='darwin': hostos='osx' - elif hostos =='windows': - hostos='win' + if hostos == 'darwin': + hostos = 'osx' + elif hostos == 'windows': + hostos = 'win' ext = '.zip' - device=env.get('CM_ONNXRUNTIME_DEVICE','') - if device!='': machine+='-'+device + device = env.get('CM_ONNXRUNTIME_DEVICE', '') + if device != '': + machine += '-' + device version = env['CM_VERSION'] @@ -27,34 +32,39 @@ def preprocess(i): FILENAME = FOLDER + ext - URL = 'https://github.com/microsoft/onnxruntime/releases/download/v{}/{}'.format(version, FILENAME) + URL = 'https://github.com/microsoft/onnxruntime/releases/download/v{}/{}'.format( + version, FILENAME) - print ('') - print ('Downloading from {}'.format(URL)) - print ('') + print('') + print('Downloading from {}'.format(URL)) + print('') env['FOLDER'] = FOLDER env['FILENAME'] = FILENAME env['URL'] = URL + return {'return': 0} - return {'return':0} - def postprocess(i): env = i['env'] - hostos=env['CM_HOST_OS_TYPE'] + hostos = env['CM_HOST_OS_TYPE'] install_folder = env['CM_TMP_INSTALL_FOLDER'] - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: -# 20221024: we save and restore env in the main script and can clean env here for determinism -# if key not in env: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: env[key] = [] - include_path = os.path.join(os.getcwd(), 'install', install_folder, 'include') + include_path = os.path.join( + os.getcwd(), + 'install', + install_folder, + 'include') env['+C_INCLUDE_PATH'].append(include_path) env['+CPLUS_INCLUDE_PATH'].append(include_path) @@ -64,11 +74,11 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - if hostos =='windows': + if hostos == 'windows': # For dynamic libraries env['+PATH'] = [lib_path] env['CM_ONNXRUNTIME_LIB_PATH'] = lib_path env['CM_ONNXRUNTIME_INCLUDE_PATH'] = include_path - return {'return':0} + return {'return': 0} diff --git a/script/get-openssl/customize.py b/script/get-openssl/customize.py index 9d126fd79e..2ac5ac73b8 100644 --- a/script/get-openssl/customize.py +++ b/script/get-openssl/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,45 +14,49 @@ def preprocess(i): if 'CM_OPENSSL_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_OPENSSL_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':i['recursion_spaces']}) - if r['return']>0: + 'detect_version': True, + 'env_path_key': 'CM_OPENSSL_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': i['recursion_spaces']}) + if r['return'] > 0: if r['return'] == 16 and os_info['platform'] != 'windows': env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'OpenSSL\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_OPENSSL_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_OPENSSL_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_OPENSSL_BIN_WITH_PATH'] found_path = os.path.dirname(found_file_path) env['CM_OPENSSL_INSTALLED_PATH'] = found_path - # Save tags that can be used to specialize further dependencies (such as python packages) - tags = 'version-'+version + # Save tags that can be used to specialize further dependencies (such as + # python packages) + tags = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-platform-details/customize.py b/script/get-platform-details/customize.py index faeaa802f5..485e5cee62 100644 --- a/script/get-platform-details/customize.py +++ b/script/get-platform-details/customize.py @@ -2,21 +2,25 @@ import os import subprocess + def check_installation(command, os_info): if os_info['platform'] == "windows": - return subprocess.call([command, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) == 0 + return subprocess.call( + [command, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) == 0 elif os_info['platform'] == "linux": - return subprocess.call(['which',command], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 #0 means the package is there + return subprocess.call(['which', command], stdout=subprocess.PIPE, + stderr=subprocess.PIPE) == 0 # 0 means the package is there + def preprocess(i): os_info = i['os_info'] env = i['env'] - if not check_installation("numactl",os_info): + if not check_installation("numactl", os_info): env['CM_INSTALL_NUMACTL'] = 'True' - #if not check_installation("cpupower",os_info): + # if not check_installation("cpupower",os_info): env['CM_INSTALL_CPUPOWER'] = 'True' if env.get('CM_PLATFORM_DETAILS_FILE_PATH', '') == '': @@ -24,9 +28,10 @@ def preprocess(i): env['CM_PLATFORM_DETAILS_DIR_PATH'] = os.getcwd() if env.get('CM_PLATFORM_DETAILS_FILE_NAME', '') == '': env['CM_PLATFORM_DETAILS_FILE_NAME'] = "system-info.txt" - env['CM_PLATFORM_DETAILS_FILE_PATH'] = os.path.join(env['CM_PLATFORM_DETAILS_DIR_PATH'], env['CM_PLATFORM_DETAILS_FILE_NAME']) + env['CM_PLATFORM_DETAILS_FILE_PATH'] = os.path.join( + env['CM_PLATFORM_DETAILS_DIR_PATH'], env['CM_PLATFORM_DETAILS_FILE_NAME']) - return {'return':0} + return {'return': 0} def postprocess(i): @@ -39,4 +44,4 @@ def postprocess(i): automation = i['automation'] - return {'return':0} + return {'return': 0} diff --git a/script/get-preprocessed-dataset-criteo/customize.py b/script/get-preprocessed-dataset-criteo/customize.py index 3fc23050d0..bb60894f6d 100644 --- a/script/get-preprocessed-dataset-criteo/customize.py +++ b/script/get-preprocessed-dataset-criteo/customize.py @@ -2,6 +2,7 @@ import os import shutil + def preprocess(i): env = i['env'] @@ -12,24 +13,33 @@ def preprocess(i): Path with preprocessed dataset given as input ''' skip_preprocessing = True - print("Using preprocessed criteo dataset from '" + env['CM_DATASET_PREPROCESSED_PATH'] +"'") + print("Using preprocessed criteo dataset from '" + + env['CM_DATASET_PREPROCESSED_PATH'] + "'") - if not skip_preprocessing and env.get('CM_DATASET_PREPROCESSED_OUTPUT_PATH','') != '': + if not skip_preprocessing and env.get( + 'CM_DATASET_PREPROCESSED_OUTPUT_PATH', '') != '': env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() - if not skip_preprocessing and env.get('CM_DATASET_CRITEO_MULTIHOT', '') == 'yes': + if not skip_preprocessing and env.get( + 'CM_DATASET_CRITEO_MULTIHOT', '') == 'yes': i['run_script_input']['script_name'] = "run-multihot" - #${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py + # ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py output_dir = env['CM_DATASET_PREPROCESSED_PATH'] dataset_path = env['CM_DATASET_PATH'] tmp_dir = os.path.join(output_dir, "tmp") - run_dir = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "recommendation_v2", "torchrec_dlrm", "scripts") + run_dir = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + "recommendation_v2", + "torchrec_dlrm", + "scripts") env['CM_RUN_CMD'] = f'cd {run_dir} && bash ./process_Criteo_1TB_Click_Logs_dataset.sh {dataset_path} {tmp_dir} {output_dir} ' - print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + print("Using MLCommons Training source from '" + + env['CM_MLPERF_TRAINING_SOURCE'] + "'") return {'return': 0} + def postprocess(i): env = i['env'] diff --git a/script/get-preprocessed-dataset-criteo/preprocess.py b/script/get-preprocessed-dataset-criteo/preprocess.py index bd2e5f5430..5a5c429c62 100644 --- a/script/get-preprocessed-dataset-criteo/preprocess.py +++ b/script/get-preprocessed-dataset-criteo/preprocess.py @@ -1,32 +1,35 @@ +import dataset +import criteo import os import sys mlperf_dlrm_path = os.environ['CM_MLPERF_INFERENCE_DLRM_PATH'] python_path = os.path.join(mlperf_dlrm_path, "pytorch", "python") sys.path.insert(0, python_path) -import criteo -import dataset dataset_name = os.environ['CM_DATASET'] dataset_path = os.environ['CM_DATASET_PATH'] dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) -samples_to_aggregate_fix = os.environ.get('CM_DATASET_SAMPLES_TO_AGGREGATE_FIX', None) -samples_to_aggregate_min = os.environ.get('CM_DATASET_SAMPLES_TO_AGGREGATE_MIN', None) -samples_to_aggregate_max = os.environ.get('CM_DATASET_SAMPLES_TO_AGGREGATE_MAX', None) +samples_to_aggregate_fix = os.environ.get( + 'CM_DATASET_SAMPLES_TO_AGGREGATE_FIX', None) +samples_to_aggregate_min = os.environ.get( + 'CM_DATASET_SAMPLES_TO_AGGREGATE_MIN', None) +samples_to_aggregate_max = os.environ.get( + 'CM_DATASET_SAMPLES_TO_AGGREGATE_MAX', None) count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None -max_ind_range = os.environ.get('CM_DATASET_MAX_IND_RANGE',-1) +max_ind_range = os.environ.get('CM_DATASET_MAX_IND_RANGE', -1) threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) criteo.Criteo(data_path=dataset_path, - name=dataset_name, - pre_process = criteo.pre_process_criteo_dlrm, - use_cache=True, - samples_to_aggregate_fix=samples_to_aggregate_fix, - samples_to_aggregate_min=samples_to_aggregate_min, - samples_to_aggregate_max=samples_to_aggregate_max, - max_ind_range=max_ind_range, - count=count, - mlperf_bin_loader=False, - test_num_workers=threads - ) + name=dataset_name, + pre_process=criteo.pre_process_criteo_dlrm, + use_cache=True, + samples_to_aggregate_fix=samples_to_aggregate_fix, + samples_to_aggregate_min=samples_to_aggregate_min, + samples_to_aggregate_max=samples_to_aggregate_max, + max_ind_range=max_ind_range, + count=count, + mlperf_bin_loader=False, + test_num_workers=threads + ) diff --git a/script/get-preprocessed-dataset-generic/customize.py b/script/get-preprocessed-dataset-generic/customize.py index b882aaf93d..c470734b90 100644 --- a/script/get-preprocessed-dataset-generic/customize.py +++ b/script/get-preprocessed-dataset-generic/customize.py @@ -1,10 +1,11 @@ from cmind import utils import os + def preprocess(i): env = i['env'] path = i['run_script_input']['path'] - env['+PYTHONPATH'] = [ os.path.join(path, "src") ] + env['+PYTHONPATH'] = [os.path.join(path, "src")] return {'return': 0} diff --git a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py index 68aa28997e..05d65cdd29 100644 --- a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py +++ b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py @@ -1,36 +1,39 @@ #!/usr/bin/env python3 +import numpy as np +import cv2 +import os supported_extensions = ['jpeg', 'jpg', 'gif', 'png'] -import os -import cv2 -import numpy as np # Load and preprocess image + def load_image(image_path, # Full path to processing image target_size, # Desired size of resulting image - intermediate_size = 0, # Scale to this size then crop to target size - crop_percentage = 87.5,# Crop to this percentage then scale to target size - data_type = 'uint8', # Data type to store - data_layout = 'nhwc', # Data layout to store - convert_to_bgr = False,# Swap image channel RGB -> BGR - interpolation_method = cv2.INTER_LINEAR # Interpolation method. + intermediate_size=0, # Scale to this size then crop to target size + crop_percentage=87.5, # Crop to this percentage then scale to target size + data_type='uint8', # Data type to store + data_layout='nhwc', # Data layout to store + convert_to_bgr=False, # Swap image channel RGB -> BGR + interpolation_method=cv2.INTER_LINEAR # Interpolation method. ): out_height = target_size - out_width = target_size + out_width = target_size def resize_with_aspectratio(img): height, width, _ = img.shape - new_height = int(100. * out_height / crop_percentage) # intermediate oversized image from which to crop - new_width = int(100. * out_width / crop_percentage) # ---------------------- ,, --------------------- + # intermediate oversized image from which to crop + new_height = int(100. * out_height / crop_percentage) + # ---------------------- ,, --------------------- + new_width = int(100. * out_width / crop_percentage) if height > width: w = new_width h = int(new_height * height / width) else: h = new_height w = int(new_width * width / height) - img = cv2.resize(img, (w, h), interpolation = interpolation_method) + img = cv2.resize(img, (w, h), interpolation=interpolation_method) return img def center_crop(img): @@ -42,7 +45,6 @@ def center_crop(img): img = img[top:bottom, left:right] return img - img = cv2.imread(image_path) if len(img.shape) < 3 or img.shape[2] != 3: @@ -62,7 +64,7 @@ def center_crop(img): def preprocess_files(selected_filenames, source_dir, destination_dir, crop_percentage, square_side, inter_size, convert_to_bgr, - data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method): + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method): "Go through the selected_filenames and preprocess all the files (optionally normalize and subtract mean)" output_filenames = [] @@ -72,19 +74,19 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, crop_perce full_input_path = os.path.join(source_dir, input_filename) - image_data = load_image(image_path = full_input_path, - target_size = square_side, - intermediate_size = inter_size, - crop_percentage = crop_percentage, - data_type = data_type, - convert_to_bgr = convert_to_bgr, - interpolation_method = interpolation_method) + image_data = load_image(image_path=full_input_path, + target_size=square_side, + intermediate_size=inter_size, + crop_percentage=crop_percentage, + data_type=data_type, + convert_to_bgr=convert_to_bgr, + interpolation_method=interpolation_method) image_data = np.asarray(image_data, dtype=data_type) # Normalize. if normalize_data: - image_data = image_data/127.5 - 1.0 + image_data = image_data / 127.5 - 1.0 # Subtract mean value. if subtract_mean: @@ -99,82 +101,88 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, crop_perce # NHWC -> NCHW. if data_layout == 'nchw': - image_data = image_data[:,:,0:3].transpose(2, 0, 1) + image_data = image_data[:, :, 0:3].transpose(2, 0, 1) # Value 1 for quantization to int8 if quantize == 1: - image_data = quantize_to_int8(image_data, quant_scale, quant_offset) + image_data = quantize_to_int8( + image_data, quant_scale, quant_offset) # Value 1 to convert from int8 to uint8 if convert_to_unsigned == 1: image_data = int8_to_uint8(image_data) - output_filename = input_filename.rsplit('.', 1)[0] + '.' + new_file_extension if new_file_extension else input_filename + output_filename = input_filename.rsplit( + '.', 1)[0] + '.' + new_file_extension if new_file_extension else input_filename full_output_path = os.path.join(destination_dir, output_filename) image_data.tofile(full_output_path) - print("[{}]: Stored {}".format(current_idx+1, full_output_path) ) + print("[{}]: Stored {}".format(current_idx + 1, full_output_path)) output_filenames.append(output_filename) return output_filenames + def quantize_to_int8(image, scale, offset): - quant_image = (image/scale + offset).astype(np.float32) + quant_image = (image / scale + offset).astype(np.float32) output = np.copy(quant_image) gtZero = (quant_image > 0).astype(int) gtZero = gtZero * 0.5 - output=output+gtZero + output = output + gtZero ltZero = (quant_image < 0).astype(int) ltZero = ltZero * (-0.5) - output=output+ltZero + output = output + ltZero return output.astype(np.int8) def int8_to_uint8(image): - image = (image+128).astype(np.uint8) + image = (image + 128).astype(np.uint8) return image + def preprocess(): import sys - source_dir = os.environ['CM_DATASET_PATH'] - destination_dir = os.environ['CM_DATASET_PREPROCESSED_PATH'] - - square_side = int( os.environ['CM_DATASET_INPUT_SQUARE_SIDE'] ) - crop_percentage = float( os.environ['CM_DATASET_CROP_FACTOR'] ) - inter_size = int( os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0) ) - convert_to_bgr = int( os.getenv('CM_DATASET_CONVERT_TO_BGR', 0) ) - offset = int( os.getenv('CM_DATASET_SUBSET_OFFSET', 0) ) - volume = int( os.environ['CM_DATASET_SIZE'] ) - fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') - data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') - data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() - new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') - normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) - subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) - given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') - given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') - quant_scale = float( os.environ['CM_DATASET_QUANT_SCALE'] ) - quant_offset = float( os.environ['CM_DATASET_QUANT_OFFSET'] ) - quantize = int( os.environ['CM_DATASET_QUANTIZE'] ) #1 for quantize to int8 - convert_to_unsigned = int( os.environ['CM_DATASET_CONVERT_TO_UNSIGNED'] ) #1 for int8 to uint8 + source_dir = os.environ['CM_DATASET_PATH'] + destination_dir = os.environ['CM_DATASET_PREPROCESSED_PATH'] + + square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) + crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) + inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) + convert_to_bgr = int(os.getenv('CM_DATASET_CONVERT_TO_BGR', 0)) + offset = int(os.getenv('CM_DATASET_SUBSET_OFFSET', 0)) + volume = int(os.environ['CM_DATASET_SIZE']) + fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) + quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) + quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 + convert_to_unsigned = int( + os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 images_list = os.getenv('CM_DATASET_IMAGES_LIST') if given_channel_means: - given_channel_means = [ float(x) for x in given_channel_means.split(' ') ] + given_channel_means = [float(x) + for x in given_channel_means.split(' ')] if given_channel_stds: - given_channel_stds = [ float(x) for x in given_channel_stds.split(' ') ] + given_channel_stds = [float(x) for x in given_channel_stds.split(' ')] - interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') - print(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {},"+ - " DTYPE: {}, DLAYOUT: {}, EXT: {}, NORM: {}, SMEAN: {}, GCM: {}, GSTD: {}, QUANTIZE: {}, QUANT_SCALE: {}, QUANT_OFFSET: {}, CONV_UNSIGNED: {}, INTER: {}").format( + print(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {}," + + " DTYPE: {}, DLAYOUT: {}, EXT: {}, NORM: {}, SMEAN: {}, GCM: {}, GSTD: {}, QUANTIZE: {}, QUANT_SCALE: {}, QUANT_OFFSET: {}, CONV_UNSIGNED: {}, INTER: {}").format( source_dir, destination_dir, square_side, crop_percentage, inter_size, convert_to_bgr, offset, volume, fof_name, - data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method) ) + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method)) if interpolation_method == 'INTER_AREA': # Used for ResNet in pre_process_vgg. @@ -190,16 +198,16 @@ def preprocess(): else: filenames = sorted(os.listdir(source_dir)) - if os.path.isdir(source_dir): - sorted_filenames = [filename for filename in filenames if any(filename.lower().endswith(extension) for extension in supported_extensions) and not filename.startswith(".") ] + sorted_filenames = [filename for filename in filenames if any(filename.lower().endswith( + extension) for extension in supported_extensions) and not filename.startswith(".")] total_volume = len(sorted_filenames) - if offset<0: # support offsets "from the right" + if offset < 0: # support offsets "from the right" offset += total_volume - selected_filenames = sorted_filenames[offset:offset+volume] + selected_filenames = sorted_filenames[offset:offset + volume] assert len(selected_filenames) == volume diff --git a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py index 8821bc0d5f..4c6a31dc67 100644 --- a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py +++ b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py @@ -9,6 +9,7 @@ SUPPORTED_EXTENSIONS = ['jpeg', 'jpg', 'gif', 'png'] + def load_image(image_path, target_size, data_type='uint8', convert_to_bgr=False, normalize_data=False, normalize_lower=-1, normalize_upper=1, subtract_mean=False, given_channel_means='', given_channel_stds='', @@ -21,14 +22,17 @@ def load_image(image_path, target_size, data_type='uint8', convert_to_bgr=False, tensor_image = torchvision.transforms.functional.to_tensor(image) mean = torch.as_tensor(given_channel_means) std = torch.as_tensor(given_channel_stds) - normalized_image = (tensor_image - mean[:, None, None]) / std[:, None, None] + normalized_image = ( + tensor_image - mean[:, None, None]) / std[:, None, None] resized_image = torch.nn.functional.interpolate(normalized_image[None], - size=(target_size, target_size), + size=(target_size, + target_size), mode='bilinear')[0].numpy() if quantize == 1: - resized_image = quantize_to_uint8(resized_image, quant_scale, quant_offset) + resized_image = quantize_to_uint8( + resized_image, quant_scale, quant_offset) original_height, original_width, _ = resized_image.shape batch_shape = (1, target_size, target_size, 3) @@ -36,12 +40,19 @@ def load_image(image_path, target_size, data_type='uint8', convert_to_bgr=False, return batch_data, original_width, original_height + def quantize_to_uint8(image, scale, offset): - quantized_image = (image.astype(np.float64) / scale + offset).astype(np.float64) + quantized_image = ( + image.astype( + np.float64) / + scale + + offset).astype( + np.float64) output = np.round(quantized_image) output = np.clip(output, 0, 255) return output.astype(np.uint8) + def preprocess_files(selected_filenames, source_dir, destination_dir, square_side, data_type, convert_to_bgr, normalize_data, normalize_lower, normalize_upper, subtract_mean, given_channel_means, @@ -73,15 +84,18 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, square_sid image_data.tofile(full_output_path) print(f"[{current_idx+1}]: Stored {full_output_path}") - output_signatures.append(f'{output_filename};{original_width};{original_height}') + output_signatures.append( + f'{output_filename};{original_width};{original_height}') return output_signatures + def preprocess(): source_directory = os.environ['CM_DATASET_PATH'] destination_directory = os.environ['CM_DATASET_PREPROCESSED_PATH'] - intermediate_data_type = os.environ.get('CM_DATASET_INTERMEDIATE_DATA_TYPE', np.float32) + intermediate_data_type = os.environ.get( + 'CM_DATASET_INTERMEDIATE_DATA_TYPE', np.float32) square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) @@ -100,7 +114,8 @@ def preprocess(): quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 - convert_to_unsigned = int(os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 + convert_to_unsigned = int( + os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 images_list = os.getenv('CM_DATASET_IMAGES_LIST') interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') @@ -113,21 +128,26 @@ def preprocess(): normalize_upper = float(os.getenv('CM_DATASET_NORMALIZE_UPPER', 1.0)) if given_channel_means: - given_channel_means = np.fromstring(given_channel_means, dtype=np.float32, sep=' ').astype(intermediate_data_type) + given_channel_means = np.fromstring( + given_channel_means, + dtype=np.float32, + sep=' ').astype(intermediate_data_type) if convert_to_bgr: given_channel_means = given_channel_means[::-1] given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') if given_channel_stds: - given_channel_stds = np.fromstring(given_channel_stds, dtype=np.float32, sep=' ').astype(intermediate_data_type) + given_channel_stds = np.fromstring( + given_channel_stds, + dtype=np.float32, + sep=' ').astype(intermediate_data_type) if convert_to_bgr: given_channel_stds = given_channel_stds[::-1] print(f"From: {source_directory}, To: {destination_directory}, Size: {square_side}, Crop: {crop_percentage}, InterSize: {inter_size}, 2BGR: {convert_to_bgr}, " + - f"OFF: {offset}, VOL: '{volume}', FOF: {fof_name}, DTYPE: {data_type}, DLAYOUT: {data_layout}, EXT: {new_file_extension}, " + - f"NORM: {normalize_data}, SMEAN: {subtract_mean}, GCM: {given_channel_means}, GSTD: {given_channel_stds}, QUANTIZE: {quantize}, QUANT_SCALE: {quant_scale}, " + - f"QUANT_OFFSET: {quant_offset}, CONV_UNSIGNED: {convert_to_unsigned}, INTER: {interpolation_method}") - + f"OFF: {offset}, VOL: '{volume}', FOF: {fof_name}, DTYPE: {data_type}, DLAYOUT: {data_layout}, EXT: {new_file_extension}, " + + f"NORM: {normalize_data}, SMEAN: {subtract_mean}, GCM: {given_channel_means}, GSTD: {given_channel_stds}, QUANTIZE: {quantize}, QUANT_SCALE: {quant_scale}, " + + f"QUANT_OFFSET: {quant_offset}, CONV_UNSIGNED: {convert_to_unsigned}, INTER: {interpolation_method}") if image_file: source_directory = os.path.dirname(image_file) @@ -136,11 +156,17 @@ def preprocess(): if annotations_filepath and not is_calibration: with open(annotations_filepath, "r") as annotations_fh: annotations_struct = json.load(annotations_fh) - ordered_filenames = [image_entry['file_name'] for image_entry in annotations_struct['images']] + ordered_filenames = [image_entry['file_name'] + for image_entry in annotations_struct['images']] elif os.path.isdir(source_directory): - ordered_filenames = [filename for filename in sorted(os.listdir(source_directory)) if any(filename.lower().endswith(extension) for extension in SUPPORTED_EXTENSIONS)] + ordered_filenames = [ + filename for filename in sorted( + os.listdir(source_directory)) if any( + filename.lower().endswith(extension) for extension in SUPPORTED_EXTENSIONS)] else: - raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), source_directory) + raise FileNotFoundError( + errno.ENOENT, os.strerror( + errno.ENOENT), source_directory) total_volume = len(ordered_filenames) @@ -162,5 +188,6 @@ def preprocess(): for filename in output_signatures: fof_file.write(f'{filename}\n') + if __name__ == "__main__": preprocess() diff --git a/script/get-preprocessed-dataset-imagenet/customize.py b/script/get-preprocessed-dataset-imagenet/customize.py index 6a7a992556..60fd02198b 100644 --- a/script/get-preprocessed-dataset-imagenet/customize.py +++ b/script/get-preprocessed-dataset-imagenet/customize.py @@ -4,27 +4,36 @@ import shutil import glob + def preprocess(i): env = i['env'] if 'CM_IMAGENET_PREPROCESSED_PATH' in env: - files = glob.glob(env['CM_IMAGENET_PREPROCESSED_PATH']+"/**/"+env['CM_IMAGENET_PREPROCESSED_FILENAME'], recursive = True) + files = glob.glob( + env['CM_IMAGENET_PREPROCESSED_PATH'] + + "/**/" + + env['CM_IMAGENET_PREPROCESSED_FILENAME'], + recursive=True) if files: env['CM_DATASET_PREPROCESSED_PATH'] = env['CM_IMAGENET_PREPROCESSED_PATH'] else: - return {'return': 1, 'error': 'No preprocessed images found in '+env['CM_IMAGENET_PREPROCESSED_PATH']} + return {'return': 1, 'error': 'No preprocessed images found in ' + + env['CM_IMAGENET_PREPROCESSED_PATH']} else: - if env.get('CM_DATASET_REFERENCE_PREPROCESSOR',"0") == "1": - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + if env.get('CM_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() - if env['CM_DATASET_TYPE'] == "validation" and not exists(os.path.join(env['CM_DATASET_PATH'], "val_map.txt")): + if env['CM_DATASET_TYPE'] == "validation" and not exists( + os.path.join(env['CM_DATASET_PATH'], "val_map.txt")): shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), os.path.join(env['CM_DATASET_PATH'], - "val_map.txt")) + "val_map.txt")) preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] - if env.get('CM_DATASET_TYPE', '') == "validation" and not exists(os.path.join(preprocessed_path, "val_map.txt")): + if env.get('CM_DATASET_TYPE', '') == "validation" and not exists( + os.path.join(preprocessed_path, "val_map.txt")): shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), os.path.join(preprocessed_path, "val_map.txt")) @@ -37,6 +46,7 @@ def preprocess(i): return {'return': 0} + def postprocess(i): env = i['env'] @@ -46,7 +56,7 @@ def postprocess(i): preprocessed_images_list = [] preprocessed_imagenames_list = [] - match_text = "/*."+env.get("CM_DATASET_PREPROCESSED_EXTENSION","*") + match_text = "/*." + env.get("CM_DATASET_PREPROCESSED_EXTENSION", "*") for filename in sorted(glob.glob(preprocessed_path + match_text)): preprocessed_images_list.append(filename) preprocessed_imagenames_list.append(os.path.basename(filename)) @@ -55,7 +65,9 @@ def postprocess(i): with open("preprocessed_filenames.txt", "w") as f: f.write("\n".join(preprocessed_imagenames_list)) - env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join(os.getcwd(), "preprocessed_files.txt") - env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join(os.getcwd(), "preprocessed_filenames.txt") + env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_files.txt") + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_filenames.txt") - return {'return':0} + return {'return': 0} diff --git a/script/get-preprocessed-dataset-imagenet/preprocess.py b/script/get-preprocessed-dataset-imagenet/preprocess.py index 6acb29a739..beefd1dcae 100644 --- a/script/get-preprocessed-dataset-imagenet/preprocess.py +++ b/script/get-preprocessed-dataset-imagenet/preprocess.py @@ -16,7 +16,8 @@ dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') count = int(os.environ.get('CM_DATASET_SIZE', 1)) - preprocessed_dir = os.environ.get('CM_DATASET_PREPROCESSED_PATH', os.getcwd()) + preprocessed_dir = os.environ.get( + 'CM_DATASET_PREPROCESSED_PATH', os.getcwd()) threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) threads = int(os.environ.get('CM_NUM_PREPROCESS_THREADS', threads)) @@ -30,11 +31,11 @@ pre_process = dataset.pre_process_vgg imagenet.Imagenet(data_path=dataset_path, - image_list=dataset_list, - name="imagenet", - image_format=img_format, - pre_process = pre_process, - use_cache=True, - count=count, - threads=threads, - preprocessed_dir=preprocessed_dir) + image_list=dataset_list, + name="imagenet", + image_format=img_format, + pre_process=pre_process, + use_cache=True, + count=count, + threads=threads, + preprocessed_dir=preprocessed_dir) diff --git a/script/get-preprocessed-dataset-kits19/customize.py b/script/get-preprocessed-dataset-kits19/customize.py index c8a0914d24..3b108f88a8 100644 --- a/script/get-preprocessed-dataset-kits19/customize.py +++ b/script/get-preprocessed-dataset-kits19/customize.py @@ -2,17 +2,25 @@ import os import shutil + def preprocess(i): env = i['env'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") - preprocess_src = os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], 'preprocess.py') - cmd = 'cd '+ env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + env['CM_DATASET_PATH'] + ' --results_dir ' + os.getcwd() + ' --mode preprocess' + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + preprocess_src = os.path.join( + env['CM_MLPERF_INFERENCE_3DUNET_PATH'], + 'preprocess.py') + cmd = 'cd ' + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + \ + ' && ${CM_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + \ + env['CM_DATASET_PATH'] + ' --results_dir ' + \ + os.getcwd() + ' --mode preprocess' env['CM_TMP_CMD'] = cmd return {'return': 0} + def postprocess(i): env = i['env'] if 'CM_DATASET_PREPROCESSED_PATH' not in env: diff --git a/script/get-preprocessed-dataset-librispeech/customize.py b/script/get-preprocessed-dataset-librispeech/customize.py index e5a8a12e2b..7a3f6c73f0 100644 --- a/script/get-preprocessed-dataset-librispeech/customize.py +++ b/script/get-preprocessed-dataset-librispeech/customize.py @@ -2,20 +2,31 @@ import os import shutil + def preprocess(i): env = i['env'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") - preprocess_src = os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], 'pytorch', 'utils', 'convert_librispeech.py') - cmd = 'cd '+ env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['CM_DATASET_LIBRISPEECH_PATH'] + ' --dest_dir ' + os.path.join(os.getcwd(), 'dev-clean-wav') + ' --output_json ' + os.path.join(os.getcwd(), 'dev-clean-wav.json') + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + preprocess_src = os.path.join( + env['CM_MLPERF_INFERENCE_RNNT_PATH'], + 'pytorch', + 'utils', + 'convert_librispeech.py') + cmd = 'cd ' + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['CM_DATASET_LIBRISPEECH_PATH'] + \ + ' --dest_dir ' + os.path.join(os.getcwd(), 'dev-clean-wav') + \ + ' --output_json ' + os.path.join(os.getcwd(), 'dev-clean-wav.json') env['CM_TMP_CMD'] = cmd return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join(os.getcwd(), 'dev-clean-wav') - env['CM_DATASET_PREPROCESSED_JSON'] = os.path.join(os.getcwd(), 'dev-clean-wav.json') + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + os.getcwd(), 'dev-clean-wav') + env['CM_DATASET_PREPROCESSED_JSON'] = os.path.join( + os.getcwd(), 'dev-clean-wav.json') return {'return': 0} diff --git a/script/get-preprocessed-dataset-openimages/customize.py b/script/get-preprocessed-dataset-openimages/customize.py index fd2adcb5f6..cec47ecea5 100644 --- a/script/get-preprocessed-dataset-openimages/customize.py +++ b/script/get-preprocessed-dataset-openimages/customize.py @@ -3,6 +3,7 @@ import shutil import glob + def preprocess(i): env = i['env'] @@ -10,8 +11,9 @@ def preprocess(i): if 'CM_DATASET_PREPROCESSED_PATH' not in env: env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() - if env.get('CM_DATASET_REFERENCE_PREPROCESSOR',"0") == "1": - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + if env.get('CM_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") if env.get('CM_ML_MODEL_NAME', '') == 'retinanet': if env.get('CM_DATASET_QUANTIZE', '') == '1': @@ -22,20 +24,23 @@ def preprocess(i): return {'return': 0} + def postprocess(i): env = i['env'] if env["CM_DATASET_TYPE"] == "validation": - env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "annotations") - env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join(env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( + env['CM_DATASET_PREPROCESSED_PATH'], "annotations") + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( + env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") # finalize path preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] preprocessed_images_list = [] preprocessed_imagenames_list = [] - match_text = "/*."+env.get("CM_DATASET_PREPROCESSED_EXTENSION","*") + match_text = "/*." + env.get("CM_DATASET_PREPROCESSED_EXTENSION", "*") for filename in sorted(glob.glob(preprocessed_path + match_text)): preprocessed_images_list.append(filename) preprocessed_imagenames_list.append(os.path.basename(filename)) @@ -44,7 +49,9 @@ def postprocess(i): with open("preprocessed_filenames.txt", "w") as f: f.write("\n".join(preprocessed_imagenames_list)) - env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join(os.getcwd(), "preprocessed_files.txt") - env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join(os.getcwd(), "preprocessed_filenames.txt") + env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_files.txt") + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_filenames.txt") return {'return': 0} diff --git a/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py b/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py index 423bfe7594..cdafac1231 100644 --- a/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py +++ b/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py @@ -29,7 +29,8 @@ from code.common.image_preprocessor import ImagePreprocessor, center_crop, resize_with_aspectratio -def preprocess_openimage_for_retinanet(data_dir, preprocessed_data_dir, formats, overwrite=False, cal_only=False, val_only=False): +def preprocess_openimage_for_retinanet( + data_dir, preprocessed_data_dir, formats, overwrite=False, cal_only=False, val_only=False): def loader(fpath): loaded_tensor = F.to_tensor(Image.open(fpath).convert("RGB")) dtype = torch.float32 @@ -45,7 +46,6 @@ def loader(fpath): img = img_resize.numpy() return img - def quantizer(image): # Dynamic range of image is [-2.64064, 2.64064] based on calibration cache. # Calculated by: @@ -54,23 +54,33 @@ def quantizer(image): image_int8 = image.clip(-max_abs, max_abs) / max_abs * 127.0 return image_int8.astype(dtype=np.int8, order='C') - preprocessor = ImagePreprocessor(loader, quantizer) if not val_only: - # Preprocess calibration set. FP32 only because calibrator always takes FP32 input. + # Preprocess calibration set. FP32 only because calibrator always takes + # FP32 input. preprocessor.run(os.path.join(data_dir, "open-images-v6-mlperf", "calibration", "train", "data"), - os.path.join(preprocessed_data_dir, "open-images-v6-mlperf", "calibration", "Retinanet"), - "data_maps/open-images-v6-mlperf/cal_map.txt", ["fp32"], overwrite) + os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf", + "calibration", + "Retinanet"), + "data_maps/open-images-v6-mlperf/cal_map.txt", ["fp32"], overwrite) if not cal_only: # Preprocess validation set. preprocessor.run(os.path.join(data_dir, "open-images-v6-mlperf", "validation", "data"), - os.path.join(preprocessed_data_dir, "open-images-v6-mlperf", "validation", "Retinanet"), - "data_maps/open-images-v6-mlperf/val_map.txt", formats, overwrite) + os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf", + "validation", + "Retinanet"), + "data_maps/open-images-v6-mlperf/val_map.txt", formats, overwrite) def copy_openimage_annotations(data_dir, preprocessed_data_dir): src_dir = os.path.join(data_dir, "open-images-v6-mlperf/annotations") - dst_dir = os.path.join(preprocessed_data_dir, "open-images-v6-mlperf/annotations") + dst_dir = os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf/annotations") if not os.path.exists(dst_dir): shutil.copytree(src_dir, dst_dir) @@ -135,10 +145,17 @@ def main(): default_formats = ["int8_linear"] # Now, actually preprocess the input images - logging.info("Loading and preprocessing images. This might take a while...") + logging.info( + "Loading and preprocessing images. This might take a while...") if args.formats == "default": formats = default_formats - preprocess_openimage_for_retinanet(data_dir, preprocessed_data_dir, formats, overwrite, cal_only, val_only) + preprocess_openimage_for_retinanet( + data_dir, + preprocessed_data_dir, + formats, + overwrite, + cal_only, + val_only) # Copy annotations from data_dir to preprocessed_data_dir. copy_openimage_annotations(data_dir, preprocessed_data_dir) diff --git a/script/get-preprocessed-dataset-openimages/preprocess.py b/script/get-preprocessed-dataset-openimages/preprocess.py index b2b05fe1dc..c5af0ff041 100644 --- a/script/get-preprocessed-dataset-openimages/preprocess.py +++ b/script/get-preprocessed-dataset-openimages/preprocess.py @@ -1,3 +1,6 @@ +import shutil +import dataset +import openimages import os import sys import os.path @@ -6,17 +9,13 @@ python_path = os.path.join(mlperf_src_path, "python") sys.path.insert(0, python_path) -import openimages -import dataset -import shutil - dataset_path = os.environ['CM_DATASET_PATH'] preprocessed_dir = os.environ.get('CM_DATASET_PREPROCESSED_PATH', os.getcwd()) if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": - #import generic_preprocess - #generic_preprocess.preprocess() + # import generic_preprocess + # generic_preprocess.preprocess() import preprocess_object_detection_dataset as pp pp.preprocess() else: @@ -26,22 +25,26 @@ image_width = int(os.environ.get('CM_DATASET_OPENIMAGES_RESIZE', 800)) threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) - name="openimages-" + str(image_width) + "-retinanet" + name = "openimages-" + str(image_width) + "-retinanet" openimages.OpenImages(data_path=dataset_path, - image_list=dataset_list, - name=name, - image_format=img_format, - pre_process = dataset.pre_process_openimages_retinanet, - use_cache=True, - image_size=[image_width, image_width, 3], - count=count, - threads=threads, - preprocessed_dir=preprocessed_dir) + image_list=dataset_list, + name=name, + image_format=img_format, + pre_process=dataset.pre_process_openimages_retinanet, + use_cache=True, + image_size=[image_width, image_width, 3], + count=count, + threads=threads, + preprocessed_dir=preprocessed_dir) if os.environ["CM_DATASET_TYPE"] == "validation": - src_path=os.environ.get('CM_DATASET_ANNOTATIONS_DIR_PATH', os.path.join(dataset_path, "annotations")) - dest_path=os.path.join(preprocessed_dir, "annotations") + src_path = os.environ.get( + 'CM_DATASET_ANNOTATIONS_DIR_PATH', + os.path.join( + dataset_path, + "annotations")) + dest_path = os.path.join(preprocessed_dir, "annotations") if not os.path.exists(dest_path): shutil.copytree(src_path, dest_path) diff --git a/script/get-preprocessed-dataset-openorca/customize.py b/script/get-preprocessed-dataset-openorca/customize.py index 25b3acdc9a..7c042c839b 100644 --- a/script/get-preprocessed-dataset-openorca/customize.py +++ b/script/get-preprocessed-dataset-openorca/customize.py @@ -2,21 +2,27 @@ import os import shutil + def preprocess(i): env = i['env'] - if str(env.get('CM_DATASET_PREPROCESSED_BY_MLC','')).lower() in [ "yes", "1", "true" ]: + if str(env.get('CM_DATASET_PREPROCESSED_BY_MLC', '') + ).lower() in ["yes", "1", "true"]: run_dir = os.getcwd() if env.get('CM_DATASET_CALIBRATION', '') == "yes": - env['CM_DATASET_CALIBRATION_PATH'] = os.path.join(env['CM_OPENORCA_PREPROCESSED_ROOT'], "open_orca_gpt4_tokenized_llama.calibration_1000.pkl.gz") + env['CM_DATASET_CALIBRATION_PATH'] = os.path.join( + env['CM_OPENORCA_PREPROCESSED_ROOT'], + "open_orca_gpt4_tokenized_llama.calibration_1000.pkl.gz") env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_CALIBRATION_PATH'] env['CM_DATASET_OPENORCA_CALIBRATION_PATH'] = env['CM_DATASET_CALIBRATION_PATH'] else: - env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join(env['CM_OPENORCA_PREPROCESSED_ROOT'], "open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz") + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + env['CM_OPENORCA_PREPROCESSED_ROOT'], + "open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz") env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] env['CM_DATASET_OPENORCA_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] - #run_cmd = f"gunzip -k {env['CM_DATASET_PREPROCESSED_PATH']}" + # run_cmd = f"gunzip -k {env['CM_DATASET_PREPROCESSED_PATH']}" run_cmd = '' else: inference_src = env['CM_MLPERF_INFERENCE_SOURCE'] @@ -25,14 +31,24 @@ def preprocess(i): if env.get('CM_DATASET_CALIBRATION', '') == "yes": return {'return': 1, 'error': 'No raw preprocessing information is available for openorca calibration. Please use _mlcommons variation to use the MLCommons shared calibration dataset'} else: - env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join(os.path.join(os.getcwd(), "processed-openorca", 'open_orca_gpt4_tokenized_llama.sampled_'+env['CM_DATASET_SIZE']+'.pkl')) - run_cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' processorca.py --dataset_pq_path=' + env['CM_DATASET_OPENORCA_PARQUET'] + ' --model_dir=' + model_dir +' --seqlen_limit=2048 --export_dir=' + os.path.join(os.getcwd(), "processed-openorca") + ' --num_total_samples=' + env['CM_DATASET_SIZE'] + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + os.path.join( + os.getcwd(), + "processed-openorca", + 'open_orca_gpt4_tokenized_llama.sampled_' + + env['CM_DATASET_SIZE'] + + '.pkl')) + run_cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' processorca.py --dataset_pq_path=' + env['CM_DATASET_OPENORCA_PARQUET'] + ' --model_dir=' + model_dir + \ + ' --seqlen_limit=2048 --export_dir=' + \ + os.path.join(os.getcwd(), "processed-openorca") + \ + ' --num_total_samples=' + env['CM_DATASET_SIZE'] env['CM_RUN_DIR'] = run_dir env['CM_RUN_CMD'] = run_cmd return {'return': 0} + def postprocess(i): env = i['env'] diff --git a/script/get-preprocessed-dataset-squad/customize.py b/script/get-preprocessed-dataset-squad/customize.py index 8e0ff47db5..1f72f139f6 100644 --- a/script/get-preprocessed-dataset-squad/customize.py +++ b/script/get-preprocessed-dataset-squad/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,10 +15,18 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') if env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "one": - env['DATASET_CALIBRATION_FILE'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'calibration', 'SQuAD-v1.1', 'bert_calibration_features.txt') + env['DATASET_CALIBRATION_FILE'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + 'calibration', + 'SQuAD-v1.1', + 'bert_calibration_features.txt') env['DATASET_CALIBRATION_ID'] = 1 elif env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "two": - env['DATASET_CALIBRATION_FILE'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'calibration', 'SQuAD-v1.1', 'bert_calibration_qas_ids.txt') + env['DATASET_CALIBRATION_FILE'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + 'calibration', + 'SQuAD-v1.1', + 'bert_calibration_qas_ids.txt') env['DATASET_CALIBRATION_ID'] = 2 else: env['DATASET_CALIBRATION_FILE'] = "''" @@ -32,7 +41,8 @@ def preprocess(i): env['+PYTHONPATH'].append(env['CM_MLPERF_INFERENCE_BERT_PATH']) - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -42,11 +52,15 @@ def postprocess(i): if env.get('CM_DATASET_SQUAD_PACKED', '') != "yes": env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] = cur if env.get('CM_DATASET_RAW', '') == "yes": - env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS'] = os.path.join(cur, 'bert_tokenized_squad_v1_1_input_ids.raw') - env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'] = os.path.join(cur, 'bert_tokenized_squad_v1_1_segment_ids.raw') - env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK'] = os.path.join(cur, 'bert_tokenized_squad_v1_1_input_mask.raw') + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1_input_ids.raw') + env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1_segment_ids.raw') + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1_input_mask.raw') else: - env['CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE'] = os.path.join(cur, 'bert_tokenized_squad_v1_1.pickle') + env['CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1.pickle') env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] = env['CM_DATASET_MAX_SEQ_LENGTH'] env['CM_DATASET_SQUAD_TOKENIZED_DOC_STRIDE'] = env['CM_DATASET_DOC_STRIDE'] @@ -56,7 +70,28 @@ def postprocess(i): with open("packed_filenames.txt", "w") as f: for dirname in os.listdir(cur): if os.path.isdir(dirname) and not dirname.startswith("_"): - f.write(os.path.join(cur, dirname, "input_ids.raw") + "," + os.path.join(cur, dirname, "input_mask.raw") + "," + os.path.join(cur, dirname, "segment_ids.raw") + "," + os.path.join(cur, dirname, "input_position_ids.raw")+ "\n") - env['CM_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE'] = os.path.join(cur, "packed_filenames.txt") - - return {'return':0} + f.write( + os.path.join( + cur, + dirname, + "input_ids.raw") + + "," + + os.path.join( + cur, + dirname, + "input_mask.raw") + + "," + + os.path.join( + cur, + dirname, + "segment_ids.raw") + + "," + + os.path.join( + cur, + dirname, + "input_position_ids.raw") + + "\n") + env['CM_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE'] = os.path.join( + cur, "packed_filenames.txt") + + return {'return': 0} diff --git a/script/get-python3/customize.py b/script/get-python3/customize.py index ab00dc9c99..1f05701a48 100644 --- a/script/get-python3/customize.py +++ b/script/get-python3/customize.py @@ -1,40 +1,46 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - if env.get('CM_PYTHON_CONDA', '') == 'yes' and env.get('CM_CONDA_BIN_PATH', '') != '': - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_BIN_PATH'], "python") + if env.get('CM_PYTHON_CONDA', '') == 'yes' and env.get( + 'CM_CONDA_BIN_PATH', '') != '': + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_BIN_PATH'], "python") recursion_spaces = i['recursion_spaces'] # we need to understand whether this script is called first and CM_PYTHON_BIN_WITH_PATH is empty # then we should search for related artifacts (python in our case) # or this script is called after install-python* and CM_PYTHON_BIN_WITH_PATH is set there - # then we do not search for an artifact (python) but pick it up from the installation + # then we do not search for an artifact (python) but pick it up from the + # installation if 'CM_PYTHON_BIN_WITH_PATH' not in env: - #file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python[0-9|\.]*$' + # file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python[0-9|\.]*$' file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' - extra_paths = {"include" : "+C_INCLUDE_PATH", "lib" : "+LD_LIBRARY_PATH"} + extra_paths = {"include": "+C_INCLUDE_PATH", "lib": "+LD_LIBRARY_PATH"} r = i['automation'].find_artifact({'file_name': file_name, 'default_path_env_key': 'PATH', 'env': env, - 'os_info':os_info, - # this key defines env key with paths where to find an artifact - 'detect_version':True, - # the next key is used in run.sh to detect python version - 'env_path_key':'CM_PYTHON_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':i['recursion_spaces'], + 'os_info': os_info, + # this key defines env key with + # paths where to find an artifact + 'detect_version': True, + # the next key is used in run.sh to + # detect python version + 'env_path_key': 'CM_PYTHON_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': i['recursion_spaces'], 'extra_paths': extra_paths }) - if r['return']>0: + if r['return'] > 0: if r['return'] == 16 and os_info['platform'] != 'windows': # If artifact is not found and we are not on windows # we should try to install python from src @@ -45,20 +51,23 @@ def preprocess(i): else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'Python\s*([\d.]+)', 'group_number': 1, - 'env_key':'CM_PYTHON_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_PYTHON_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): @@ -66,7 +75,8 @@ def postprocess(i): os_info = i['os_info'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] @@ -77,12 +87,13 @@ def postprocess(i): env['CM_PYTHON_BIN'] = os.path.basename(found_file_path) env['CM_PYTHON_BIN_PATH'] = os.path.dirname(found_file_path) - # Save tags that can be used to specialize further dependencies (such as python packages) - tags = 'version-'+version + # Save tags that can be used to specialize further dependencies (such as + # python packages) + tags = 'version-' + version add_extra_cache_tags = [] - extra_tags = env.get('CM_EXTRA_CACHE_TAGS','') + extra_tags = env.get('CM_EXTRA_CACHE_TAGS', '') if extra_tags != '': tags += ',' + extra_tags @@ -106,16 +117,17 @@ def postprocess(i): # but keep LD_LIBRARY_PATH and C_INCLUDE_PATH from the native python for k in ['+PATH']: if k in env: - del(env[k]) + del (env[k]) elif os_info['platform'] == 'windows': extra_path = os.path.join(found_path, 'Scripts') - if extra_path not in default_path_list and extra_path+os.sep not in default_path_list: - paths = env.get('+PATH',[]) + if extra_path not in default_path_list and extra_path + \ + os.sep not in default_path_list: + paths = env.get('+PATH', []) if extra_path not in paths: paths.append(extra_path) - env['+PATH']=paths + env['+PATH'] = paths version_split = version.split(".") python_major_version = version_split[0] @@ -127,4 +139,5 @@ def postprocess(i): env['CM_PYTHON_MINOR_VERSION'] = python_minor_version env['CM_PYTHON_PATCH_VERSION'] = python_patch_version - return {'return':0, 'version': version, 'add_extra_cache_tags':add_extra_cache_tags} + return {'return': 0, 'version': version, + 'add_extra_cache_tags': add_extra_cache_tags} diff --git a/script/get-qaic-apps-sdk/customize.py b/script/get-qaic-apps-sdk/customize.py index b84d58b178..5fd343f71c 100644 --- a/script/get-qaic-apps-sdk/customize.py +++ b/script/get-qaic-apps-sdk/customize.py @@ -2,6 +2,7 @@ import os import xml.etree.ElementTree as et + def preprocess(i): os_info = i['os_info'] @@ -14,26 +15,29 @@ def preprocess(i): apps_sdk_path = None - if env.get('CM_INPUT','').strip() != '': + if env.get('CM_INPUT', '').strip() != '': path = env['CM_INPUT'] if os.path.exists(os.path.join(path, "exec", "qaic-exec")): apps_sdk_path = path else: - return {'return':1, 'error': 'exec/qaic-exec not found in the input path (--input)'} + return { + 'return': 1, 'error': 'exec/qaic-exec not found in the input path (--input)'} else: path = "/opt/qti-aic/" if os.path.exists(os.path.join(path, "exec", "qaic-exec")): apps_sdk_path = path if not apps_sdk_path: - return {'return':1, 'error': f'qaic-exec not found in the default path: {path}'} + return {'return': 1, + 'error': f'qaic-exec not found in the default path: {path}'} env['CM_QAIC_APPS_SDK_PATH'] = path env['CM_QAIC_EXEC_PATH'] = os.path.join(path, "exec", "qaic-exec") quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def detect_version(i): @@ -54,13 +58,14 @@ def detect_version(i): if child2.tag == "build_id": build_id = child2.text if build_id: - version=version+"."+build_id + version = version + "." + build_id if not version: - return {'return':1, 'error': f'qaic apps sdk version info not found'} + return {'return': 1, 'error': f'qaic apps sdk version info not found'} + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} def postprocess(i): @@ -77,11 +82,11 @@ def postprocess(i): env['+PATH'].append(os.path.dirname(env['CM_QAIC_EXEC_PATH'])) paths = [ - "+C_INCLUDE_PATH", - "+CPLUS_INCLUDE_PATH", - "+LD_LIBRARY_PATH", - "+DYLD_FALLBACK_LIBRARY_PATH" - ] + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] for key in paths: env[key] = [] @@ -97,8 +102,11 @@ def postprocess(i): env['+C_INCLUDE_PATH'].append(inc_path) env['+CPLUS_INCLUDE_PATH'].append(inc_path) - - lib_path = os.path.join(env['CM_QAIC_APPS_SDK_PATH'], "dev", "lib", "x86_64") + lib_path = os.path.join( + env['CM_QAIC_APPS_SDK_PATH'], + "dev", + "lib", + "x86_64") if os.path.exists(lib_path): lib_paths.append(lib_path) @@ -106,4 +114,4 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-qaic-platform-sdk/customize.py b/script/get-qaic-platform-sdk/customize.py index 5a68188bd5..33e9548ba7 100644 --- a/script/get-qaic-platform-sdk/customize.py +++ b/script/get-qaic-platform-sdk/customize.py @@ -2,6 +2,7 @@ import os import xml.etree.ElementTree as et + def preprocess(i): os_info = i['os_info'] @@ -14,19 +15,21 @@ def preprocess(i): platform_sdk_path = None - if env.get('CM_INPUT','').strip() != '': + if env.get('CM_INPUT', '').strip() != '': path = env['CM_INPUT'] if os.path.exists(os.path.join(path, "exec", "qaic-runner")): platform_sdk_path = path else: - return {'return':1, 'error': 'exec/qaic-runner not found in the input path (--input)'} + return { + 'return': 1, 'error': 'exec/qaic-runner not found in the input path (--input)'} else: path = "/opt/qti-aic/" if os.path.exists(os.path.join(path, "exec", "qaic-runner")): platform_sdk_path = path if not platform_sdk_path: - return {'return':1, 'error': f'qaic-runner not found in the default path: {path}'} + return {'return': 1, + 'error': f'qaic-runner not found in the default path: {path}'} env['CM_QAIC_PLATFORM_SDK_PATH'] = path env['CM_QAIC_RUNNER_PATH'] = os.path.join(path, "exec", "qaic-runner") @@ -34,7 +37,8 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def detect_version(i): @@ -55,13 +59,14 @@ def detect_version(i): if child2.tag == "build_id": build_id = child2.text if build_id: - version=version+"."+build_id + version = version + "." + build_id if not version: - return {'return':1, 'error': f'qaic platform sdk version info not found'} + return {'return': 1, 'error': f'qaic platform sdk version info not found'} + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} def postprocess(i): @@ -78,11 +83,11 @@ def postprocess(i): env['+PATH'].append(os.path.dirname(env['CM_QAIC_RUNNER_PATH'])) paths = [ - "+C_INCLUDE_PATH", - "+CPLUS_INCLUDE_PATH", - "+LD_LIBRARY_PATH", - "+DYLD_FALLBACK_LIBRARY_PATH" - ] + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] for key in paths: env[key] = [] @@ -98,8 +103,11 @@ def postprocess(i): env['+C_INCLUDE_PATH'].append(inc_path) env['+CPLUS_INCLUDE_PATH'].append(inc_path) - - lib_path = os.path.join(env['CM_QAIC_PLATFORM_SDK_PATH'], "dev", "lib", env['CM_HOST_PLATFORM_FLAVOR']) + lib_path = os.path.join( + env['CM_QAIC_PLATFORM_SDK_PATH'], + "dev", + "lib", + env['CM_HOST_PLATFORM_FLAVOR']) if os.path.exists(lib_path): lib_paths.append(lib_path) @@ -107,4 +115,4 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-qaic-software-kit/customize.py b/script/get-qaic-software-kit/customize.py index 00aced2dda..77edac7696 100644 --- a/script/get-qaic-software-kit/customize.py +++ b/script/get-qaic-software-kit/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -46,17 +47,20 @@ def preprocess(i): if clang_major_version == 12: env['+ CXXFLAGS'].append("-Wno-error=unknown-warning-option") - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + env['CM_QAIC_RUNNER_PATH'] = os.path.join( + env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") if '+PATH' not in env: env['+PATH'] = [] env['+PATH'].append(env['CM_QAIC_RUNNER_PATH']) - env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_RUNNER_PATH'], "qaic-runner") + env['CM_QAIC_RUNNER_PATH'] = os.path.join( + env['CM_QAIC_RUNNER_PATH'], "qaic-runner") - return {'return':0} + return {'return': 0} diff --git a/script/get-rclone-config/customize.py b/script/get-rclone-config/customize.py index 576b6b73f8..10893238fe 100644 --- a/script/get-rclone-config/customize.py +++ b/script/get-rclone-config/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,10 +17,11 @@ def preprocess(i): if env.get('CM_RCLONE_CONFIG_CMD', '') != '': env['CM_RUN_CMD'] = env['CM_RCLONE_CONFIG_CMD'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-rclone/customize.py b/script/get-rclone/customize.py index c33fdd7a82..7dec29c71d 100644 --- a/script/get-rclone/customize.py +++ b/script/get-rclone/customize.py @@ -2,6 +2,7 @@ import os import configparser + def preprocess(i): os_info = i['os_info'] @@ -16,25 +17,27 @@ def preprocess(i): run_script_input = i['run_script_input'] automation = i['automation'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') host_os_machine = '' if os_info['platform'] != 'windows': - host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI r = automation.detect_version_using_script({ - 'env': env, - 'run_script_input': run_script_input, - 'recursion_spaces':recursion_spaces}) + 'env': env, + 'run_script_input': run_script_input, + 'recursion_spaces': recursion_spaces}) - if r['return'] >0: + if r['return'] > 0: if r['return'] == 16: install_script = 'install' - if os_info['platform'] != 'windows' and env.get('CM_RCLONE_SYSTEM','')=='yes': + if os_info['platform'] != 'windows' and env.get( + 'CM_RCLONE_SYSTEM', '') == 'yes': install_script += '-system' else: if os_info['platform'] != 'windows': - x1 = 'arm64' if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch') else 'amd64' + x1 = 'arm64' if host_os_machine.startswith( + 'arm') or host_os_machine.startswith('aarch') else 'amd64' filebase = 'rclone-v{}-{}-{}' urlbase = 'https://downloads.rclone.org/v{}/{}' @@ -44,11 +47,15 @@ def preprocess(i): elif os_info['platform'] == 'linux': filename = filebase.format(need_version, 'linux', x1) - env['CM_RCLONE_URL'] = urlbase.format(need_version, filename+'.zip') + env['CM_RCLONE_URL'] = urlbase.format( + need_version, filename + '.zip') env['CM_RCLONE_ARCHIVE'] = filename - env['CM_RCLONE_ARCHIVE_WITH_EXT'] = filename+'.zip' + env['CM_RCLONE_ARCHIVE_WITH_EXT'] = filename + '.zip' - print(recursion_spaces + 'Downloading {}'.format(env['CM_RCLONE_URL'])) + print( + recursion_spaces + + 'Downloading {}'.format( + env['CM_RCLONE_URL'])) cur_dir = os.getcwd() path_bin = os.path.join(cur_dir, file_name) @@ -62,28 +69,31 @@ def preprocess(i): env['+PATH'] = [] env['+PATH'].append(cur_dir) - - r = automation.run_native_script({'run_script_input':run_script_input, - 'env':env, - 'script_name':install_script}) - if r['return']>0: return r + r = automation.run_native_script({'run_script_input': run_script_input, + 'env': env, + 'script_name': install_script}) + if r['return'] > 0: + return r else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'rclone v([\d.]+)', 'group_number': 1, - 'env_key':'CM_RCLONE_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_RCLONE_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} - return {'return':0, 'version':version} def postprocess(i): @@ -93,12 +103,14 @@ def postprocess(i): gdrive = env.get('CM_RCLONE_GDRIVE', '') if gdrive == "yes": config = configparser.ConfigParser() - config_file_path = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "rclone.conf") + config_file_path = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "rclone.conf") config.read(config_file_path) - #config['cm-team']['service_account_file'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "accessfiles", "rclone-gdrive.json") + # config['cm-team']['service_account_file'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "accessfiles", "rclone-gdrive.json") - default_config_path = os.path.join(os.path.expanduser( '~' ), ".config", "rclone", "rclone.conf") + default_config_path = os.path.join( + os.path.expanduser('~'), ".config", "rclone", "rclone.conf") default_config = configparser.ConfigParser() default_config.read(default_config_path) @@ -109,24 +121,27 @@ def postprocess(i): with open(default_config_path, 'w') as configfile: default_config.write(configfile) - print({section: dict(default_config[section]) for section in default_config.sections()}) + print({section: dict(default_config[section]) + for section in default_config.sections()}) r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] - env['CM_RCLONE_CACHE_TAGS'] = 'version-'+version + env['CM_RCLONE_CACHE_TAGS'] = 'version-' + version file_name = 'rclone.exe' if os_info['platform'] == 'windows' else 'rclone' - if os_info['platform'] == 'windows' or env.get('CM_RCLONE_SYSTEM','')!='yes': + if os_info['platform'] == 'windows' or env.get( + 'CM_RCLONE_SYSTEM', '') != 'yes': cur_dir = os.getcwd() path_bin = os.path.join(cur_dir, file_name) if os.path.isfile(path_bin): # Was downloaded and extracted by CM env['CM_RCLONE_BIN_WITH_PATH'] = path_bin - env['+PATH']=[cur_dir] + env['+PATH'] = [cur_dir] - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-rocm-devices/customize.py b/script/get-rocm-devices/customize.py index 03a0efd4ea..0b0d0d9ce6 100644 --- a/script/get-rocm-devices/customize.py +++ b/script/get-rocm-devices/customize.py @@ -2,14 +2,17 @@ import os import subprocess + def preprocess(i): env = i['env'] - if str(env.get('CM_DETECT_USING_HIP-PYTHON', '')).lower() in [ "1", "yes", "true"]: + if str(env.get('CM_DETECT_USING_HIP-PYTHON', '') + ).lower() in ["1", "yes", "true"]: i['run_script_input']['script_name'] = 'detect' - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -19,9 +22,10 @@ def postprocess(i): os_info = i['os_info'] r = utils.load_txt(file_name='tmp-run.out', - check_if_exists = True, - split = True) - if r['return']>0: return r + check_if_exists=True, + split=True) + if r['return'] > 0: + return r lst = r['list'] @@ -32,16 +36,16 @@ def postprocess(i): gpu_id = -1 for line in lst: - #print (line) + # print (line) j = line.find(':') - if j>=0: + if j >= 0: key = line[:j].strip() - val = line[j+1:].strip() + val = line[j + 1:].strip() if key == "GPU Device ID": - gpu_id+=1 + gpu_id += 1 gpu[gpu_id] = {} if gpu_id < 0: @@ -50,7 +54,7 @@ def postprocess(i): gpu[gpu_id][key] = val p[key] = val - key_env = 'CM_ROCM_DEVICE_PROP_'+key.upper().replace(' ','_') + key_env = 'CM_ROCM_DEVICE_PROP_' + key.upper().replace(' ', '_') env[key_env] = val state['cm_rocm_num_devices'] = gpu_id + 1 @@ -59,4 +63,4 @@ def postprocess(i): state['cm_rocm_device_prop'] = p state['cm_rocm_devices_prop'] = gpu - return {'return':0} + return {'return': 0} diff --git a/script/get-rocm/customize.py b/script/get-rocm/customize.py index dac6707cf2..1e9c5071ce 100644 --- a/script/get-rocm/customize.py +++ b/script/get-rocm/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,39 +17,43 @@ def preprocess(i): if 'CM_ROCM_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_ROCM_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': 'CM_ROCM_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'([\d.]+[-\d+]*)', 'group_number': 1, - 'env_key':'CM_ROCM_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_ROCM_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_ROCM_BIN_WITH_PATH'] @@ -56,6 +61,6 @@ def postprocess(i): found_path = os.path.dirname(found_file_path) env['CM_ROCM_INSTALLED_PATH'] = found_path - env['CM_ROCM_CACHE_TAGS'] = 'version-'+version + env['CM_ROCM_CACHE_TAGS'] = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-spec-ptd/customize.py b/script/get-spec-ptd/customize.py index 250ddd887b..b4c949179d 100644 --- a/script/get-spec-ptd/customize.py +++ b/script/get-spec-ptd/customize.py @@ -2,11 +2,12 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] - return {'return':0} + return {'return': 0} def postprocess(i): @@ -19,7 +20,8 @@ def postprocess(i): else: binary_name = "ptd-linux-x86" if 'CM_MLPERF_PTD_PATH' not in env: - env['CM_MLPERF_PTD_PATH'] = os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'inference_v1.0', binary_name) + env['CM_MLPERF_PTD_PATH'] = os.path.join( + env['CM_MLPERF_POWER_SOURCE'], 'inference_v1.0', binary_name) env['CM_SPEC_PTD_PATH'] = env['CM_MLPERF_PTD_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/get-sys-utils-cm/customize.py b/script/get-sys-utils-cm/customize.py index 3c8aa3c910..994f9f46be 100644 --- a/script/get-sys-utils-cm/customize.py +++ b/script/get-sys-utils-cm/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,16 +16,17 @@ def preprocess(i): i['run_script_input']['script_name'] = "run-rhel" # Test (not needed - will be removed) - if str(env.get('CM_SKIP_SYS_UTILS','')).lower() in [True, 'yes', 'on']: - return {'return':0, 'skip':True} + if str(env.get('CM_SKIP_SYS_UTILS', '')).lower() in [True, 'yes', 'on']: + return {'return': 0, 'skip': True} -# Windows has moved to get-sys-utils-min and will be always run with "detect,os"! +# Windows has moved to get-sys-utils-min and will be always run with +# "detect,os"! if os_info['platform'] == 'windows': - print ('') - print ('This script is not used on Windows') - print ('') + print('') + print('This script is not used on Windows') + print('') # If windows, download here otherwise use run.sh @@ -78,10 +80,10 @@ def preprocess(i): # env['+PATH']=[os.path.join(path, 'bin')] # else: - print ('') - print ('***********************************************************************') - print ('This script will attempt to install minimal system dependencies for CM.') - print ('Note that you may be asked for your SUDO password ...') - print ('***********************************************************************') + print('') + print('***********************************************************************') + print('This script will attempt to install minimal system dependencies for CM.') + print('Note that you may be asked for your SUDO password ...') + print('***********************************************************************') - return {'return':0} + return {'return': 0} diff --git a/script/get-sys-utils-min/customize.py b/script/get-sys-utils-min/customize.py index f9d4a29d76..6efb11fa07 100644 --- a/script/get-sys-utils-min/customize.py +++ b/script/get-sys-utils-min/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,50 +16,52 @@ def preprocess(i): path = os.getcwd() - clean_dirs = env.get('CM_CLEAN_DIRS','').strip() - if clean_dirs!='': + clean_dirs = env.get('CM_CLEAN_DIRS', '').strip() + if clean_dirs != '': import shutil for cd in clean_dirs.split(','): if cd != '': if os.path.isdir(cd): - print ('Clearning directory {}'.format(cd)) + print('Clearning directory {}'.format(cd)) shutil.rmtree(cd) url = env['CM_PACKAGE_WIN_URL'] urls = [url] if ';' not in url else url.split(';') - print ('') - print ('Current directory: {}'.format(os.getcwd())) + print('') + print('Current directory: {}'.format(os.getcwd())) for url in urls: url = url.strip() - print ('') - print ('Downloading from {}'.format(url)) + print('') + print('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r filename = r['filename'] - print ('Unzipping file {}'.format(filename)) + print('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', - 'filename':filename}) - if r['return']>0: return r + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'filename': filename}) + if r['return'] > 0: + return r if os.path.isfile(filename): - print ('Removing file {}'.format(filename)) + print('Removing file {}'.format(filename)) os.remove(filename) - print ('') + print('') # Add to path - env['+PATH']=[os.path.join(path, 'bin')] + env['+PATH'] = [os.path.join(path, 'bin')] - return {'return':0} + return {'return': 0} diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py index df74a08fd6..addc7322c2 100644 --- a/script/get-tensorrt/customize.py +++ b/script/get-tensorrt/customize.py @@ -2,6 +2,7 @@ import os import tarfile + def preprocess(i): recursion_spaces = i['recursion_spaces'] @@ -10,18 +11,18 @@ def preprocess(i): env = i['env'] - - #Not enforcing dev requirement for now - if env.get('CM_TENSORRT_TAR_FILE_PATH','')=='' and env.get('CM_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('CM_HOST_PLATFORM_FLAVOR', '') != 'aarch64': + # Not enforcing dev requirement for now + if env.get('CM_TENSORRT_TAR_FILE_PATH', '') == '' and env.get( + 'CM_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('CM_HOST_PLATFORM_FLAVOR', '') != 'aarch64': if os_info['platform'] == 'windows': - extra_pre='' - extra_ext='lib' + extra_pre = '' + extra_ext = 'lib' else: - extra_pre='lib' - extra_ext='so' + extra_pre = 'lib' + extra_ext = 'so' - libfilename = extra_pre + 'nvinfer.' +extra_ext + libfilename = extra_pre + 'nvinfer.' + extra_ext env['CM_TENSORRT_VERSION'] = 'vdetected' if env.get('CM_TMP_PATH', '').strip() != '': @@ -34,10 +35,12 @@ def preprocess(i): env['CM_TMP_PATH'] = '' if os_info['platform'] == 'windows': - if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" paths = [] - for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", + "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: if os.path.isdir(path): dirs = os.listdir(path) for dr in dirs: @@ -45,9 +48,9 @@ def preprocess(i): if os.path.isdir(path2): paths.append(path2) - if len(paths)>0: + if len(paths) > 0: tmp_paths = ';'.join(paths) - tmp_paths += ';'+os.environ.get('PATH','') + tmp_paths += ';' + os.environ.get('PATH', '') env['CM_TMP_PATH'] = tmp_paths env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' @@ -56,41 +59,42 @@ def preprocess(i): # paths to cuda are not always in PATH - add a few typical locations to search for # (unless forced by a user) - if env.get('CM_INPUT','').strip()=='': - if env.get('CM_TMP_PATH','').strip()!='': - env['CM_TMP_PATH']+=':' + if env.get('CM_INPUT', '').strip() == '': + if env.get('CM_TMP_PATH', '').strip() != '': + env['CM_TMP_PATH'] += ':' env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): - if(os.path.exists(lib_path)): - env['CM_TMP_PATH']+=':'+lib_path + for lib_path in env.get( + '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if (os.path.exists(lib_path)): + env['CM_TMP_PATH'] += ':' + lib_path r = i['automation'].find_artifact({'file_name': libfilename, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'LD_LIBRARY_PATH', - 'detect_version':False, - 'env_path_key':'CM_TENSORRT_LIB_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': False, + 'env_path_key': 'CM_TENSORRT_LIB_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if os_info['platform'] == 'windows': return r else: - return {'return':0} + return {'return': 0} if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is currently not supported!'} - if env.get('CM_TENSORRT_TAR_FILE_PATH','')=='': - tags = [ "get", "tensorrt" ] + if env.get('CM_TENSORRT_TAR_FILE_PATH', '') == '': + tags = ["get", "tensorrt"] if env.get('CM_TENSORRT_REQUIRE_DEV', '') != 'yes': tags.append("_dev") - return {'return': 1, 'error': 'Please envoke cmr "' + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} + return {'return': 1, 'error': 'Please envoke cmr "' + + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} - - print ('Untaring file - can take some time ...') + print('Untaring file - can take some time ...') file_name = "trtexec" my_tar = tarfile.open(os.path.expanduser(env['CM_TENSORRT_TAR_FILE_PATH'])) @@ -109,11 +113,20 @@ def preprocess(i): env['CM_TENSORRT_INSTALL_PATH'] = os.path.join(os.getcwd(), folder_name) env['CM_TENSORRT_LIB_PATH'] = os.path.join(os.getcwd(), folder_name, "lib") env['CM_TMP_PATH'] = os.path.join(os.getcwd(), folder_name, "bin") - env['+CPLUS_INCLUDE_PATH'] = [ os.path.join(os.getcwd(), folder_name, "include") ] - env['+C_INCLUDE_PATH'] = [ os.path.join(os.getcwd(), folder_name, "include") ] - env['+LD_LIBRARY_PATH'] = [ os.path.join(os.getcwd(), folder_name, "lib") ] + env['+CPLUS_INCLUDE_PATH'] = [ + os.path.join( + os.getcwd(), + folder_name, + "include")] + env['+C_INCLUDE_PATH'] = [ + os.path.join( + os.getcwd(), + folder_name, + "include")] + env['+LD_LIBRARY_PATH'] = [os.path.join(os.getcwd(), folder_name, "lib")] + + return {'return': 0} - return {'return':0} def postprocess(i): @@ -121,22 +134,22 @@ def postprocess(i): env = i['env'] - if '+LD_LIBRARY_PATH' not in env: + if '+LD_LIBRARY_PATH' not in env: env['+LD_LIBRARY_PATH'] = [] - if '+PATH' not in env: + if '+PATH' not in env: env['+PATH'] = [] if '+ LDFLAGS' not in env: env['+ LDFLAGS'] = [] - #if 'CM_TENSORRT_LIB_WITH_PATH' in env: + # if 'CM_TENSORRT_LIB_WITH_PATH' in env: # tensorrt_lib_path = os.path.dirname(env['CM_TENSORRT_LIB_WITH_PATH']) if 'CM_TENSORRT_LIB_PATH' in env: env['+LD_LIBRARY_PATH'].append(env['CM_TENSORRT_LIB_PATH']) - env['+PATH'].append(env['CM_TENSORRT_LIB_PATH']) #for cmake - env['+ LDFLAGS'].append("-L"+env['CM_TENSORRT_LIB_PATH']) + env['+PATH'].append(env['CM_TENSORRT_LIB_PATH']) # for cmake + env['+ LDFLAGS'].append("-L" + env['CM_TENSORRT_LIB_PATH']) version = env['CM_TENSORRT_VERSION'] - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-terraform/customize.py b/script/get-terraform/customize.py index 4c3c668b51..1ec8af5c6a 100644 --- a/script/get-terraform/customize.py +++ b/script/get-terraform/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,39 +15,43 @@ def preprocess(i): if 'CM_TERRAFORM_BIN_WITH_PATH' not in env: r = i['automation'].find_artifact({'file_name': file_name, 'env': env, - 'os_info':os_info, + 'os_info': os_info, 'default_path_env_key': 'PATH', - 'detect_version':True, - 'env_path_key':'CM_TERRAFORM_BIN_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + 'detect_version': True, + 'env_path_key': 'CM_TERRAFORM_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} else: return r - return {'return':0} + return {'return': 0} + def detect_version(i): r = i['automation'].parse_version({'match_text': r'Terraform\s*v([\d.]+)', 'group_number': 1, - 'env_key':'CM_TERRAFORM_VERSION', - 'which_env':i['env']}) - if r['return'] >0: return r + 'env_key': 'CM_TERRAFORM_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + def postprocess(i): env = i['env'] r = detect_version(i) - if r['return'] >0: return r + if r['return'] > 0: + return r version = r['version'] found_file_path = env['CM_TERRAFORM_BIN_WITH_PATH'] @@ -54,6 +59,6 @@ def postprocess(i): found_path = os.path.dirname(found_file_path) env['CM_TERRAFORM_INSTALLED_PATH'] = found_path - env['CM_TERRAFORM_CACHE_TAGS'] = 'version-'+version + env['CM_TERRAFORM_CACHE_TAGS'] = 'version-' + version - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/get-tvm-model/customize.py b/script/get-tvm-model/customize.py index e776149dba..ce883ce36a 100644 --- a/script/get-tvm-model/customize.py +++ b/script/get-tvm-model/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -32,10 +33,9 @@ def preprocess(i): print("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") env["CM_TUNE_TVM_MODEL"] = "no" + return {'return': 0} - return {'return':0} - def postprocess(i): env = i['env'] @@ -51,4 +51,4 @@ def postprocess(i): "BATCH_SIZE", env['CM_ML_MODEL_MAX_BATCH_SIZE']) if 'CM_TVM_FRONTEND_FRAMEWORK' in env and env['CM_TVM_FRONTEND_FRAMEWORK'] == 'pytorch': env['CM_PREPROCESS_PYTORCH'] = 'yes' - return {'return':0} + return {'return': 0} diff --git a/script/get-tvm-model/process.py b/script/get-tvm-model/process.py index 0e89263665..c7384000dc 100644 --- a/script/get-tvm-model/process.py +++ b/script/get-tvm-model/process.py @@ -10,6 +10,7 @@ from tvm import relay, meta_schedule from tvm.driver.tvmc.frontends import load_model + def get_shape_dict_from_onnx( shape: List[int], model_path: str @@ -24,10 +25,11 @@ def get_shape_dict_from_onnx( if dimension.dim_value != 0: shape.append(dimension.dim_value) input_all = [node.name for node in onnx_model.graph.input] - input_initializer = [node.name for node in onnx_model.graph.initializer] - net_feed_input = list(set(input_all) - set(input_initializer)) + input_initializer = [node.name for node in onnx_model.graph.initializer] + net_feed_input = list(set(input_all) - set(input_initializer)) return {input_name: shape for input_name in net_feed_input} + def get_mod_params( model_path: str, model_name: str, @@ -40,12 +42,18 @@ def get_mod_params( image_height: Optional[int] = None, max_seq_length: Optional[int] = None ) -> Tuple[tvm.IRModule, Dict[str, tvm.nd.NDArray]]: - if not input_shapes_str and (not image_width or not image_height) and not max_seq_length and frontend != "onnx": + if not input_shapes_str and ( + not image_width or not image_height) and not max_seq_length and frontend != "onnx": raise RuntimeError( "Error: None of environment variables storing shape is set!" ) if input_shapes_str: - shape_dict = eval('{' + input_shapes_str.replace('BATCH_SIZE', str(batch_size)) + '}') + shape_dict = eval( + '{' + + input_shapes_str.replace( + 'BATCH_SIZE', + str(batch_size)) + + '}') else: shape = [] if image_width and image_height: @@ -53,7 +61,8 @@ def get_mod_params( elif max_seq_length: shape = [batch_size, max_seq_length] if frontend == "onnx": - shape_dict = get_shape_dict_from_onnx(shape if len(shape) > 0 else [batch_size], model_path) + shape_dict = get_shape_dict_from_onnx( + shape if len(shape) > 0 else [batch_size], model_path) else: raise RuntimeError( "Error: Cannot find proper shapes in environment variables" @@ -73,7 +82,8 @@ def get_mod_params( mod, params = tvm.relay.frontend.from_pytorch(traced_model, shape_list) else: tvmc_model = load_model(path=model_path, shape_dict=shape_dict) - mod, params = tvm.relay.transform.DynamicToStatic()(tvmc_model.mod), tvmc_model.params + mod, params = tvm.relay.transform.DynamicToStatic()( + tvmc_model.mod), tvmc_model.params input_layer_name_file = os.path.join(os.getcwd(), "input_layer_name") if not input_layer_name: @@ -83,6 +93,7 @@ def get_mod_params( return mod, params + def tune_model( mod: tvm.IRModule, params: Dict[str, tvm.nd.NDArray], @@ -169,6 +180,7 @@ def compile_model( ) return lib + def serialize_vm( vm_exec: tvm.runtime.vm.Executable ) -> tvm.runtime.Module: @@ -186,6 +198,7 @@ def serialize_vm( file.write(code) return lib + def main() -> None: model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None) compiled_model = os.path.join(os.getcwd(), 'model-tvm.so') @@ -204,11 +217,17 @@ def main() -> None: batch_size=int(os.environ.get('CM_ML_MODEL_MAX_BATCH_SIZE', 1)), frontend=os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None), input_shapes_str=os.environ.get('CM_ML_MODEL_INPUT_SHAPES', None), - input_layer_name=os.environ.get('CM_ML_MODEL_INPUT_LAYER_NAME', None), - num_channels=int(os.environ.get('CM_ML_MODEL_IMAGE_NUM_CHANNELS', 3)), + input_layer_name=os.environ.get( + 'CM_ML_MODEL_INPUT_LAYER_NAME', None), + num_channels=int( + os.environ.get( + 'CM_ML_MODEL_IMAGE_NUM_CHANNELS', + 3)), image_width=int(os.environ.get('CM_ML_MODEL_IMAGE_WIDTH', 0)), image_height=int(os.environ.get('CM_ML_MODEL_IMAGE_HEIGHT', 0)), - max_seq_length=int(os.environ.get('CM_ML_MODEL_MAX_SEQ_LENGTH', 0)), + max_seq_length=int( + os.environ.get( + 'CM_ML_MODEL_MAX_SEQ_LENGTH', 0)), ) opt_level = int(os.environ.get('CM_MLPERF_TVM_OPT_LEVEL', 3)) target = os.environ.get( @@ -231,7 +250,8 @@ def main() -> None: lib = compile_model( mod=mod, params=params, - work_dir=work_dir if work_dir != '' else os.environ.get('CM_TUNE_TVM_MODEL_WORKDIR', ''), + work_dir=work_dir if work_dir != '' else os.environ.get( + 'CM_TUNE_TVM_MODEL_WORKDIR', ''), target=tvm_target, opt_level=opt_level, build_conf=build_conf, @@ -248,5 +268,6 @@ def main() -> None: lib.export_library(compiled_model) print('TVM compiled model: ' + compiled_model) + if __name__ == "__main__": main() diff --git a/script/get-tvm/customize.py b/script/get-tvm/customize.py index fba65ec4b7..b7995264ba 100644 --- a/script/get-tvm/customize.py +++ b/script/get-tvm/customize.py @@ -1,13 +1,15 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} - return {'return':0} def postprocess(i): @@ -15,23 +17,22 @@ def postprocess(i): env = i['env'] if env.get('CM_TVM_PIP_INSTALL', '') == "yes": - return {'return':0} - + return {'return': 0} tvm_home = env['TVM_HOME'] # 20221024: we save and restore env in the main script and can clean env here for determinism # if '+PYTHONPATH' not in env: env['+PYTHONPATH']=[] - env['+PYTHONPATH']=[] - - env['+PYTHONPATH'].append(os.path.join(tvm_home,'python')) + env['+PYTHONPATH'] = [] + env['+PYTHONPATH'].append(os.path.join(tvm_home, 'python')) # Prepare paths - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: env[key] = [] - ## Include + # Include include_path = os.path.join(tvm_home, 'include') if os.path.isdir(include_path): if os_info['platform'] != 'windows': @@ -40,11 +41,10 @@ def postprocess(i): env['CM_TVM_PATH_INCLUDE'] = include_path - ## Lib + # Lib lib_path = os.path.join(tvm_home, 'build') env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) env['CM_TVM_PATH_LIB'] = lib_path - - return {'return':0} + return {'return': 0} diff --git a/script/get-xilinx-sdk/customize.py b/script/get-xilinx-sdk/customize.py index 02e31c620f..52922d29c0 100644 --- a/script/get-xilinx-sdk/customize.py +++ b/script/get-xilinx-sdk/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -15,18 +16,19 @@ def preprocess(i): file_path = env.get("CM_XILINX_SDK_BIN_PATH") if not file_path or not os.path.exists(file_path): - return {'return':1, 'error': 'FILE_PATH does not exist'} + return {'return': 1, 'error': 'FILE_PATH does not exist'} bin_folder_path = os.path.dirname(file_path) if '+PATH' in env: env['+PATH'].append(bin_foler_path) else: - env['+PATH'] = [ bin_folder_path ] + env['+PATH'] = [bin_folder_path] + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-zendnn/customize.py b/script/get-zendnn/customize.py index d9918a266d..5310eea521 100644 --- a/script/get-zendnn/customize.py +++ b/script/get-zendnn/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -18,10 +19,11 @@ def preprocess(i): env['ZENDNN_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/get-zephyr-sdk/customize.py b/script/get-zephyr-sdk/customize.py index 87619e7a01..d0b05d6757 100644 --- a/script/get-zephyr-sdk/customize.py +++ b/script/get-zephyr-sdk/customize.py @@ -1,15 +1,17 @@ from cmind import utils import os + def preprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} def postprocess(i): env = i['env'] env['ZEPHYR_TOOLCHAIN_VARIANT'] = "zephyr" - env['ZEPHYR_SDK_INSTALL_DIR'] = os.path.join(os.getcwd(), "zephyr-sdk-" + env['CM_ZEPHYR_SDK_VERSION']) + env['ZEPHYR_SDK_INSTALL_DIR'] = os.path.join( + os.getcwd(), "zephyr-sdk-" + env['CM_ZEPHYR_SDK_VERSION']) - return {'return':0} + return {'return': 0} diff --git a/script/get-zephyr/customize.py b/script/get-zephyr/customize.py index c157b165ce..74b8a9fec4 100644 --- a/script/get-zephyr/customize.py +++ b/script/get-zephyr/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): env = i['env'] if '+PATH' not in env: env['+PATH'] = [] env['+PATH'].append("$HOME/.local/bin") - return {'return':0} + return {'return': 0} def postprocess(i): @@ -14,4 +15,4 @@ def postprocess(i): env = i['env'] env['CM_ZEPHYR_DIR'] = os.path.join(os.getcwd(), "zephyr") - return {'return':0} + return {'return': 0} diff --git a/script/gui/app.py b/script/gui/app.py index 1e15bfef64..c5928994fa 100644 --- a/script/gui/app.py +++ b/script/gui/app.py @@ -6,49 +6,49 @@ import misc + def main(): query_params = misc.get_params(st) - script_path = os.environ.get('CM_GUI_SCRIPT_PATH','') - script_alias = os.environ.get('CM_GUI_SCRIPT_ALIAS','') + script_path = os.environ.get('CM_GUI_SCRIPT_PATH', '') + script_alias = os.environ.get('CM_GUI_SCRIPT_ALIAS', '') title = os.environ.get('CM_GUI_TITLE', '') # Check if script tags are specified from CMD - script_tags = os.environ.get('CM_GUI_SCRIPT_TAGS','').strip() + script_tags = os.environ.get('CM_GUI_SCRIPT_TAGS', '').strip() - script_tags_from_url = query_params.get('tags',['']) - if len(script_tags_from_url)>0: + script_tags_from_url = query_params.get('tags', ['']) + if len(script_tags_from_url) > 0: x_script_tags_from_url = script_tags_from_url[0].strip() if x_script_tags_from_url != '': script_tags = x_script_tags_from_url meta = {} - if script_tags !='': + if script_tags != '': # Check type of tags if ' ' in script_tags: - script_tags = script_tags.replace(' ',',') + script_tags = script_tags.replace(' ', ',') - print ('Searching CM scripts using tags "{}"'.format(script_tags)) + print('Searching CM scripts using tags "{}"'.format(script_tags)) - r = cmind.access({'action':'find', - 'automation':'script,5b4e0237da074764', - 'tags':script_tags}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'script,5b4e0237da074764', + 'tags': script_tags}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==1: + if len(lst) == 1: script = lst[0] meta = script.meta script_path = script.path script_alias = meta['alias'] - - # Read meta - if len(meta)==0 and script_path!='' and os.path.isdir(script_path): + if len(meta) == 0 and script_path != '' and os.path.isdir(script_path): fn = os.path.join(script_path, '_cm') r = cmind.utils.load_yaml_and_json(fn) if r['return'] == 0: @@ -68,5 +68,6 @@ def main(): return script.page(ii) + if __name__ == "__main__": main() diff --git a/script/gui/customize.py b/script/gui/customize.py index 2d2789b363..185e11e1ea 100644 --- a/script/gui/customize.py +++ b/script/gui/customize.py @@ -7,6 +7,7 @@ import shutil import subprocess + def preprocess(i): os_info = i['os_info'] @@ -16,49 +17,50 @@ def preprocess(i): cm = i['automation'].cmind - script_tags = env.get('CM_GUI_SCRIPT_TAGS','') + script_tags = env.get('CM_GUI_SCRIPT_TAGS', '') if script_tags != '': # Check type of tags if ' ' in script_tags: - script_tags = script_tags.replace(' ',',') + script_tags = script_tags.replace(' ', ',') - print ('Searching CM scripts using tags "{}"'.format(script_tags)) + print('Searching CM scripts using tags "{}"'.format(script_tags)) - r = cm.access({'action':'find', - 'automation':'script', - 'tags':script_tags}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'script', + 'tags': script_tags}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==1: + if len(lst) == 1: script = lst[0] env['CM_GUI_SCRIPT_PATH'] = script.path env['CM_GUI_SCRIPT_ALIAS'] = script.meta['alias'] - print ('Script found in path {}'.format(script.path)) + print('Script found in path {}'.format(script.path)) env['CM_GUI_SCRIPT_TAGS'] = script_tags # Check other vars and assemble extra CMD - extra_cmd = env.get('CM_GUI_EXTRA_CMD','') + extra_cmd = env.get('CM_GUI_EXTRA_CMD', '') port = env.get('CM_GUI_PORT', '') address = env.get('CM_GUI_ADDRESS', '') no_browser = env.get('CM_GUI_NO_BROWSER', '') - if no_browser!='': - extra_cmd+=' --server.headless true' + if no_browser != '': + extra_cmd += ' --server.headless true' - if address!='': - extra_cmd+=' --server.address='+address + if address != '': + extra_cmd += ' --server.address=' + address - if port!='': - extra_cmd+=' --server.port='+port + if port != '': + extra_cmd += ' --server.port=' + port env['CM_GUI_EXTRA_CMD'] = extra_cmd - print ('Prepared extra CMD for streamlit: {}'.format(extra_cmd)) + print('Prepared extra CMD for streamlit: {}'.format(extra_cmd)) - return {'return':0} + return {'return': 0} diff --git a/script/gui/graph.py b/script/gui/graph.py index ab4fc2db61..30581dd095 100644 --- a/script/gui/graph.py +++ b/script/gui/graph.py @@ -22,17 +22,17 @@ security = ['os.', 'streamlit.', 'matplotlib.', 'numpy.', 'pandas.', 'mpld3.'] -repro_badges={ - 'acm_ctuning_repro_badge_functional':{'img':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, - 'acm_ctuning_repro_badge_reproduce':{'img':'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, - 'acm_ctuning_repro_badge_support_docker':{'img':'https://cTuning.org/images/docker_logo2_small.png'}, - 'acm_ctuning_repro_badge_cm_interface':{'img':'https://cTuning.org/images/logo-ck-single-tr4.png'} - } +repro_badges = { + 'acm_ctuning_repro_badge_functional': {'img': 'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, + 'acm_ctuning_repro_badge_reproduce': {'img': 'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, + 'acm_ctuning_repro_badge_support_docker': {'img': 'https://cTuning.org/images/docker_logo2_small.png'}, + 'acm_ctuning_repro_badge_cm_interface': {'img': 'https://cTuning.org/images/logo-ck-single-tr4.png'} +} class OpenBrowserOnClick(mpld3.plugins.PluginBase): - JAVASCRIPT=""" + JAVASCRIPT = """ mpld3.register_plugin("openbrowseronclick", PointClickableHTMLTooltip); @@ -64,10 +64,6 @@ def __init__(self, points, targets=None): "targets": targets} - - - - def main(): params = misc.get_params(st) @@ -78,51 +74,53 @@ def main(): return visualize(st, params) - - -def visualize(st, query_params, action = ''): +def visualize(st, query_params, action=''): # Query experiment - result_uid = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_RESULT_UID','') - q_result_uid = query_params.get('result_uid',['']) - if len(q_result_uid)>0: - if q_result_uid[0]!='': + result_uid = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_RESULT_UID', '') + q_result_uid = query_params.get('result_uid', ['']) + if len(q_result_uid) > 0: + if q_result_uid[0] != '': result_uid = q_result_uid[0] - v_experiment_name = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_NAME','') - q_experiment_name = query_params.get('name',['']) - if len(q_experiment_name)>0: - if q_experiment_name[0]!='': + v_experiment_name = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_NAME', '') + q_experiment_name = query_params.get('name', ['']) + if len(q_experiment_name) > 0: + if q_experiment_name[0] != '': v_experiment_name = q_experiment_name[0] - v_experiment_tags='' - if v_experiment_name=='': - v_experiment_tags = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TAGS','') - q_experiment_tags = query_params.get('tags',['']) - if len(q_experiment_tags)>0: - if q_experiment_tags[0]!='': + v_experiment_tags = '' + if v_experiment_name == '': + v_experiment_tags = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TAGS', '') + q_experiment_tags = query_params.get('tags', ['']) + if len(q_experiment_tags) > 0: + if q_experiment_tags[0] != '': v_experiment_tags = q_experiment_tags[0] - v_experiment_tags = v_experiment_tags.replace(',',' ') + v_experiment_tags = v_experiment_tags.replace(',', ' ') # Check default # if v_experiment_tags == '' and v_experiment_name == '': # v_experiment_tags = 'mlperf-inference v4.0' - v_experiment_tags = st.text_input('Select CM experiment tags separated by space:', value=v_experiment_tags, key='v_experiment_tags').strip() - v_experiment_tags = v_experiment_tags.replace(',',' ') + v_experiment_tags = st.text_input( + 'Select CM experiment tags separated by space:', + value=v_experiment_tags, + key='v_experiment_tags').strip() + v_experiment_tags = v_experiment_tags.replace(',', ' ') # Get all experiment names - ii = {'action':'find', - 'automation':'experiment,a0a2d123ef064bcb'} + ii = {'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb'} # If name is given, do not use tags - if v_experiment_name!='': - ii['artifact']=v_experiment_name - elif v_experiment_tags!='': - ii['tags']=v_experiment_tags.replace(' ',',') + if v_experiment_name != '': + ii['artifact'] = v_experiment_name + elif v_experiment_tags != '': + ii['tags'] = v_experiment_tags.replace(' ', ',') r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst_all = r['list'] @@ -131,42 +129,42 @@ def visualize(st, query_params, action = ''): selection = 0 index = 1 for l in sorted(lst_all, key=lambda x: ( - ','.join(x.meta.get('tags',[])), - x.meta.get('alias',''), - x.meta['uid'] - )): + ','.join(x.meta.get('tags', [])), + x.meta.get('alias', ''), + x.meta['uid'] + )): meta = l.meta - if v_experiment_name!='' and (v_experiment_name == meta['alias'] or v_experiment_name == meta['uid']): + if v_experiment_name != '' and ( + v_experiment_name == meta['alias'] or v_experiment_name == meta['uid']): selection = index - name = ' '.join(meta.get('tags',[])) - if name =='': name = meta.get('alias', '') - if name =='': name = meta['uid'] - + name = ' '.join(meta.get('tags', [])) + if name == '': + name = meta.get('alias', '') + if name == '': + name = meta['uid'] experiments.append(name) - index+=1 + index += 1 if len(lst_all) == 1: selection = 1 # Show experiment artifacts - experiment = st.selectbox('Select experiment from {} found:'.format(len(experiments)-1), + experiment = st.selectbox('Select experiment from {} found:'.format(len(experiments) - 1), range(len(experiments)), format_func=lambda x: experiments[x], index=selection, key='experiment') + lst = [lst_all[experiment - 1]] if experiment > 0 else lst_all - lst = [lst_all[experiment-1]] if experiment > 0 else lst_all - - if len(lst)>8: + if len(lst) > 8: st.markdown('Too many experiments - continue pruning ...') - return {'return':0} - + return {'return': 0} # Check experiments results = [] @@ -185,14 +183,14 @@ def visualize(st, query_params, action = ''): if os.path.isfile(path_to_result): emeta = experiment.meta - desc = {'path':path_to_result, + desc = {'path': path_to_result, 'experiment_dir': d, - 'experiment_uid':emeta['uid'], - 'experiment_alias':emeta['alias'], - 'experiment_tags':','.join(emeta.get('tags',[]))} + 'experiment_uid': emeta['uid'], + 'experiment_alias': emeta['alias'], + 'experiment_tags': ','.join(emeta.get('tags', []))} add = True - if result_uid!='': + if result_uid != '': add = False r = cmind.utils.load_json(path_to_result) if r['return'] == 0: @@ -201,13 +199,13 @@ def visualize(st, query_params, action = ''): results_meta[path_to_result] = meta for m in meta: - if m.get('uid','') == result_uid: + if m.get('uid', '') == result_uid: add = True break if add: - pwd = experiment.meta.get('password_hash','') - if pwd=='': + pwd = experiment.meta.get('password_hash', '') + if pwd == '': results.append(desc) else: desc['password_hash'] = pwd @@ -218,65 +216,72 @@ def visualize(st, query_params, action = ''): results_with_password.append(desc) # Check if password - if len(passwords)>0: - password = st.text_input('Some results are protected by password. Enter password to unlock them:', value='', key='v_experiment_pwd').strip() + if len(passwords) > 0: + password = st.text_input( + 'Some results are protected by password. Enter password to unlock them:', + value='', + key='v_experiment_pwd').strip() - if password!='': + if password != '': import bcrypt # salt = bcrypt.gensalt() # TBD: temporal hack to demo password protection for experiments # salt = bcrypt.gensalt() password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' - password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt).decode('utf-8') + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), + password_salt).decode('utf-8') for result in results_with_password: if result['password_hash'] == password_hash2: results.append(result) # How to visualize selection - if len(results)==0: + if len(results) == 0: st.markdown('No results found!') - return {'return':0} - - - if st.session_state.get('tmp_cm_results','')=='': - st.session_state['tmp_cm_results']=len(results) - elif int(st.session_state['tmp_cm_results'])!=len(results): - st.session_state['tmp_cm_results']=len(results) - st.session_state['how']=0 + return {'return': 0} + if st.session_state.get('tmp_cm_results', '') == '': + st.session_state['tmp_cm_results'] = len(results) + elif int(st.session_state['tmp_cm_results']) != len(results): + st.session_state['tmp_cm_results'] = len(results) + st.session_state['how'] = 0 how = '' - if result_uid=='': - v_max_results = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS','') + if result_uid == '': + v_max_results = os.environ.get( + 'CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS', '') - if v_max_results!='' and len(results)>int(v_max_results): + if v_max_results != '' and len(results) > int(v_max_results): st.markdown('Too many results - continue pruning ...') - return {'return':0} + return {'return': 0} - v_how = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_HOW','') - q_how = query_params.get('type',['']) - if len(q_how)>0: - if q_how[0]!='': + v_how = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_HOW', '') + q_how = query_params.get('type', ['']) + if len(q_how) > 0: + if q_how[0] != '': v_how = q_how[0] how_selection = ['', '2d-static', '2d', 'bar'] - how_selection_desc = ['', 'Scatter plot (static)', 'Scatter plot (interactive, slow - to be improved)', 'Bar plot (static)'] + how_selection_desc = [ + '', + 'Scatter plot (static)', + 'Scatter plot (interactive, slow - to be improved)', + 'Bar plot (static)'] how_index = 0 - if v_how!='' and v_how in how_selection: + if v_how != '' and v_how in how_selection: how_index = how_selection.index(v_how) how2 = st.selectbox('Select how to visualize {} CM experiment set(s):'.format(len(results)), - range(len(how_selection_desc)), - format_func=lambda x: how_selection_desc[x], - index = how_index, - key = 'how') - + range(len(how_selection_desc)), + format_func=lambda x: how_selection_desc[x], + index=how_index, + key='how') if how2 == '' or how2 == 0: - return {'return':0} + return {'return': 0} how = how_selection[how2] @@ -287,14 +292,14 @@ def visualize(st, query_params, action = ''): keys = [] all_data = [] - - derived_metrics_value = query_params.get('derived_metrics',[''])[0].strip() + derived_metrics_value = query_params.get( + 'derived_metrics', [''])[0].strip() derived_metrics_value = st.text_input("Optional: add derived metrics in Python. Example: result['Accuracy2'] = result['Accuracy']*2", - value = derived_metrics_value).strip() + value=derived_metrics_value).strip() for x in security: if x in derived_metrics_value: - derived_metrics_value='' + derived_metrics_value = '' break error_shown2 = False @@ -305,22 +310,25 @@ def visualize(st, query_params, action = ''): result_meta = results_meta[path_to_result] else: r = cmind.utils.load_json_or_yaml(path_to_result) - if r['return']>0: return r + if r['return'] > 0: + return r result_meta = r['meta'] for result in result_meta: # Add extra info - for k in ['experiment_dir', 'experiment_alias', 'experiment_uid', 'experiment_tags']: + for k in ['experiment_dir', 'experiment_alias', + 'experiment_uid', 'experiment_tags']: if k in desc: - result[k]=desc[k] + result[k] = desc[k] - if derived_metrics_value!='': + if derived_metrics_value != '': try: exec(derived_metrics_value) except Exception as e: if not error_shown2: - st.markdown('*Syntax error in derived metrics: {}*'.format(e)) + st.markdown( + '*Syntax error in derived metrics: {}*'.format(e)) error_shown2 = True all_values.append(result) @@ -329,25 +337,38 @@ def visualize(st, query_params, action = ''): if k not in keys: keys.append(k) - first_keys = ['Organization', 'Model', 'Scenario', 'SystemName', 'notes', 'framework', 'Result', 'Result_Units', 'Accuracy'] - sorted_keys = [k for k in first_keys if k in keys] + [k for k in sorted(keys, key=lambda s: s.lower()) if k not in first_keys] - - filter_value = query_params.get('filter',[''])[0].strip() - if result_uid=='': # and filter_value!='': - filter_value = st.text_input("Optional: add result filter in Python. Examples: result['Accuracy']>75 or 'llama2' in result['Model']", value = filter_value).strip() + first_keys = [ + 'Organization', + 'Model', + 'Scenario', + 'SystemName', + 'notes', + 'framework', + 'Result', + 'Result_Units', + 'Accuracy'] + sorted_keys = [k for k in first_keys if k in keys] + \ + [k for k in sorted(keys, key=lambda s: s.lower()) + if k not in first_keys] + + filter_value = query_params.get('filter', [''])[0].strip() + if result_uid == '': # and filter_value!='': + filter_value = st.text_input( + "Optional: add result filter in Python. Examples: result['Accuracy']>75 or 'llama2' in result['Model']", + value=filter_value).strip() st.markdown('---') for x in security: if x in filter_value: - filter_value='' + filter_value = '' break # all_values is a list of dictionaries with all keys - error_shown=False + error_shown = False for result in all_values: - if filter_value!='': + if filter_value != '': try: if not eval(filter_value): continue @@ -357,7 +378,7 @@ def visualize(st, query_params, action = ''): error_shown = True # Check if 1 result UID is selected - if result_uid!='' and result.get('uid','')!=result_uid: + if result_uid != '' and result.get('uid', '') != result_uid: continue data = [] @@ -366,34 +387,28 @@ def visualize(st, query_params, action = ''): all_data.append(data) - if result_uid!='': break + if result_uid != '': + break ################################################### - if len(all_data)==0: + if len(all_data) == 0: st.markdown('No results found for your selection.') - return {'return':0, 'end_html':end_html} - - - - + return {'return': 0, 'end_html': end_html} ################################################### # If experiment found and 1 UID, print a table - if result_uid!='': + if result_uid != '': st.markdown('---') st.markdown('# Result summary') - data = all_data[0] - result = {} - j=0 + j = 0 for k in sorted_keys: result[k] = data[j] - j+=1 - + j += 1 # Check badges x = '' @@ -402,23 +417,26 @@ def visualize(st, query_params, action = ''): if result.get(k, False): img = repro_badges[k]['img'] - x += '\n'.format(img) - - if x!='': - st.write('
\n'+x+'\n
\n', unsafe_allow_html = True) + x += '\n'.format( + img) + if x != '': + st.write( + '
\n' + x + '\n
\n', + unsafe_allow_html=True) x = '' for k in sorted_keys: - x+='* **{}**: {}\n'.format(k,str(result[k])) + x += '* **{}**: {}\n'.format(k, str(result[k])) st.markdown(x) # Check associated reports - r=cmind.access({'action':'find', - 'automation':'report,6462ecdba2054467', - 'tags':'result-{}'.format(result_uid)}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'report,6462ecdba2054467', + 'tags': 'result-{}'.format(result_uid)}) + if r['return'] > 0: + return r lst = r['list'] @@ -430,27 +448,27 @@ def visualize(st, query_params, action = ''): report_meta = l.meta report_alias = report_meta['alias'] - report_title = report_meta.get('title','') + report_title = report_meta.get('title', '') - report_name = report_title if report_title!='' else report_alias + report_name = report_title if report_title != '' else report_alias r = cmind.utils.load_txt(f1) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] st.markdown('---') - st.markdown('### '+report_name) + st.markdown('### ' + report_name) st.markdown(s) - # Create self link st.markdown("""---""") experiment_alias_or_uid = result['experiment_uid'] - end_html=''' + end_html = '''
Self link
@@ -458,94 +476,104 @@ def visualize(st, query_params, action = ''): st.write(end_html, unsafe_allow_html=True) - - return {'return':0} - - - - - + return {'return': 0} ################################################### # Select 2D keys - axis_key_x='' - axis_key_y='' - axis_key_c='' + axis_key_x = '' + axis_key_y = '' + axis_key_c = '' - if len(keys)>0: + if len(keys) > 0: keys = [''] + sorted_keys - axis_key_x = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X','') - q_axis_key_x = query_params.get('x',['']) - if len(q_axis_key_x)>0: - if q_axis_key_x[0]!='': + axis_key_x = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X', '') + q_axis_key_x = query_params.get('x', ['']) + if len(q_axis_key_x) > 0: + if q_axis_key_x[0] != '': axis_key_x = q_axis_key_x[0] i_axis_key_x = 0 - if axis_key_x != '' and axis_key_x in keys: i_axis_key_x = keys.index(axis_key_x) - if axis_key_x == '' and 'Result' in keys: i_axis_key_x = keys.index('Result') - axis_key_x = st.selectbox('Select X key', keys, index=i_axis_key_x, key='x') - - axis_key_y = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y','') - q_axis_key_y = query_params.get('y',['']) - if len(q_axis_key_y)>0: - if q_axis_key_y[0]!='': + if axis_key_x != '' and axis_key_x in keys: + i_axis_key_x = keys.index(axis_key_x) + if axis_key_x == '' and 'Result' in keys: + i_axis_key_x = keys.index('Result') + axis_key_x = st.selectbox( + 'Select X key', keys, index=i_axis_key_x, key='x') + + axis_key_y = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y', '') + q_axis_key_y = query_params.get('y', ['']) + if len(q_axis_key_y) > 0: + if q_axis_key_y[0] != '': axis_key_y = q_axis_key_y[0] i_axis_key_y = 0 - if axis_key_y != '' and axis_key_y in keys: i_axis_key_y = keys.index(axis_key_y) - if axis_key_y == '' and 'Accuracy' in keys: i_axis_key_y = keys.index('Accuracy') - axis_key_y = st.selectbox('Select Y key', keys, index=i_axis_key_y, key='y') - - axis_key_c = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C','') - q_axis_key_c = query_params.get('c',['']) - if len(q_axis_key_c)>0: - if q_axis_key_c[0]!='': + if axis_key_y != '' and axis_key_y in keys: + i_axis_key_y = keys.index(axis_key_y) + if axis_key_y == '' and 'Accuracy' in keys: + i_axis_key_y = keys.index('Accuracy') + axis_key_y = st.selectbox( + 'Select Y key', keys, index=i_axis_key_y, key='y') + + axis_key_c = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C', '') + q_axis_key_c = query_params.get('c', ['']) + if len(q_axis_key_c) > 0: + if q_axis_key_c[0] != '': axis_key_c = q_axis_key_c[0] i_axis_key_c = 0 - if axis_key_c != '' and axis_key_c in keys: i_axis_key_c = keys.index(axis_key_c) - if axis_key_c == '' and 'version' in keys: i_axis_key_c = keys.index('version') - axis_key_c = st.selectbox('Select Color key', keys, index=i_axis_key_c, key='c') - - axis_key_s = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S','') - q_axis_key_s = query_params.get('s',['']) - if len(q_axis_key_s)>0: + if axis_key_c != '' and axis_key_c in keys: + i_axis_key_c = keys.index(axis_key_c) + if axis_key_c == '' and 'version' in keys: + i_axis_key_c = keys.index('version') + axis_key_c = st.selectbox( + 'Select Color key', + keys, + index=i_axis_key_c, + key='c') + + axis_key_s = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S', '') + q_axis_key_s = query_params.get('s', ['']) + if len(q_axis_key_s) > 0: axis_key_s = q_axis_key_s[0] i_axis_key_s = 0 - if axis_key_s != '' and axis_key_s in keys: i_axis_key_s = keys.index(axis_key_s) - axis_key_s = st.selectbox('Select Style key', keys, index=i_axis_key_s, key='s') - + if axis_key_s != '' and axis_key_s in keys: + i_axis_key_s = keys.index(axis_key_s) + axis_key_s = st.selectbox( + 'Select Style key', + keys, + index=i_axis_key_s, + key='s') # Select values values = [] - if axis_key_x!='' and axis_key_y!='': + if axis_key_x != '' and axis_key_y != '': for v in all_values: x = v.get(axis_key_x, None) y = v.get(axis_key_y, None) - if x!=None and y!=None: + if x is not None and y is not None: values.append(v) - if len(values)>0: + if len(values) > 0: - #fig, ax = plt.subplots(figsize=(12,6)) - fig, ax = plt.subplots() #figsize=(6,4)) + # fig, ax = plt.subplots(figsize=(12,6)) + fig, ax = plt.subplots() # figsize=(6,4)) ax.set_xlabel(axis_key_x) ax.set_ylabel(axis_key_y) title = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TITLE', '') - q_title = query_params.get('title',['']) - if len(q_title)>0: - if q_title[0]!='': + q_title = query_params.get('title', ['']) + if len(q_title) > 0: + if q_title[0] != '': title = q_title[0] ax.set_title(title, size=16) if how == 'bar': ax.set_title('Under development ...', size=16) - ax.yaxis.grid(linestyle = 'dotted') + ax.yaxis.grid(linestyle='dotted') else: - ax.grid(linestyle = 'dotted') - #https://matplotlib.org/stable/api/markers_api.html + ax.grid(linestyle='dotted') + # https://matplotlib.org/stable/api/markers_api.html unique_color_values = {} # unique_colors = list(mcolors.CSS4_COLORS.keys()) @@ -568,7 +596,7 @@ def visualize(st, query_params, action = ''): values2 = [] for result in values: - if filter_value!='': + if filter_value != '': try: if not eval(filter_value): continue @@ -581,14 +609,14 @@ def visualize(st, query_params, action = ''): if how == 'bar': x = result.get(axis_key_x, None) - if x != None and x!='' and x not in unique_x_values: + if x is not None and x != '' and x not in unique_x_values: unique_x_values.append(x) s = result.get(axis_key_s, None) - if s != None and s!='' and s not in unique_s_values: + if s is not None and s != '' and s not in unique_s_values: unique_s_values.append(s) - ############################################################################ + ####################################################################### # Continue visualizing if how == '2d-static' or how == 'bar': @@ -602,7 +630,7 @@ def visualize(st, query_params, action = ''): for result in values2: v = result - t+=1 + t += 1 x = v.get(axis_key_x, None) y = v.get(axis_key_y, None) @@ -611,43 +639,50 @@ def visualize(st, query_params, action = ''): yy.append(y) color = 'blue' - if axis_key_c!='': + if axis_key_c != '': c = v.get(axis_key_c, None) - if c!=None: + if c is not None: if c in unique_color_values: color = unique_color_values[c] else: color = unique_colors[i_unique_color_values] unique_color_values[c] = color - if i_unique_color_values<(len(unique_colors)-1): - i_unique_color_values+=1 + if i_unique_color_values < ( + len(unique_colors) - 1): + i_unique_color_values += 1 cc.append(color) style = 'o' - if axis_key_s!='': + if axis_key_s != '': s = v.get(axis_key_s, None) - if s!=None: + if s is not None: if s in unique_style_values: style = unique_style_values[s] else: style = unique_styles[i_unique_style_values] unique_style_values[s] = style - if i_unique_style_values<(len(unique_styles)-1): - i_unique_style_values+=1 + if i_unique_style_values < ( + len(unique_styles) - 1): + i_unique_style_values += 1 ss.append(style) - info='' + info = '' for key in sorted(v.keys(), key=lambda x: x.lower()): value = v[key] - info+=str(key)+': '+str(value)+'
\n' + info += str(key) + ': ' + str(value) + '
\n' io.append(info) import plotly.express as px - dd = {axis_key_x:xx,axis_key_y:yy,axis_key_c:cc,axis_key_s:ss,'info':io} + dd = { + axis_key_x: xx, + axis_key_y: yy, + axis_key_c: cc, + axis_key_s: ss, + 'info': io} # https://docs.streamlit.io/library/api-reference/charts/st.bar_chart # https://docs.streamlit.io/library/api-reference/charts/st.plotly_chart @@ -658,14 +693,22 @@ def visualize(st, query_params, action = ''): if how == 'bar': st.bar_chart(df, x=axis_key_x, y=axis_key_y) else: - fig = px.scatter(df, x=axis_key_x, y=axis_key_y, color=axis_key_c, symbol=axis_key_s, hover_name='info', height=1000) - - st.plotly_chart(fig, theme="streamlit", use_container_width=True) - - + fig = px.scatter( + df, + x=axis_key_x, + y=axis_key_y, + color=axis_key_c, + symbol=axis_key_s, + hover_name='info', + height=1000) + + st.plotly_chart( + fig, + theme="streamlit", + use_container_width=True) elif how == '2d': - ##################################################################### + ################################################################### # 2D interactive graph - very slow - need to be updated width = 1 @@ -673,89 +716,98 @@ def visualize(st, query_params, action = ''): for result in values2: v = result - t+=1 + t += 1 x = v.get(axis_key_x, None) y = v.get(axis_key_y, None) - url = v.get('url','') - if url=='': url = v.get('git_url','') + url = v.get('url', '') + if url == '': + url = v.get('git_url', '') color = 'blue' - if axis_key_c!='': + if axis_key_c != '': c = v.get(axis_key_c, None) - if c!=None: + if c is not None: if c in unique_color_values: color = unique_color_values[c] else: color = unique_colors[i_unique_color_values] unique_color_values[c] = color - if i_unique_color_values<(len(unique_colors)-1): - i_unique_color_values+=1 + if i_unique_color_values < ( + len(unique_colors) - 1): + i_unique_color_values += 1 style = 'o' - if axis_key_s!='': + if axis_key_s != '': s = v.get(axis_key_s, None) - if s!=None: + if s is not None: if s in unique_style_values: style = unique_style_values[s] else: style = unique_styles[i_unique_style_values] unique_style_values[s] = style - if i_unique_style_values<(len(unique_styles)-1): - i_unique_style_values+=1 + if i_unique_style_values < ( + len(unique_styles) - 1): + i_unique_style_values += 1 graph = ax.scatter(x, y, color=color, marker=style) - info='' + info = '' for key in sorted(v.keys(), key=lambda x: x.lower()): value = v[key] - info+=''+str(key)+': '+str(value)+'
\n' + info += '' + str(key) + ': ' + str(value) + '
\n' - info2 = '
'+info+'
' + info2 = '
' + \ + info + '
' label = [info2] plugins.connect(fig, plugins.PointHTMLTooltip(graph, label)) - experiment_uid = v.get('experiment_uid','') - if experiment_uid!='' and experiment_uid not in experiment_uids: + experiment_uid = v.get('experiment_uid', '') + if experiment_uid != '' and experiment_uid not in experiment_uids: experiment_uids.append(experiment_uid) - uid = v.get('uid','') - if uid!='': - xaction = 'action={}&'.format(action) if action!='' else '' - url = '?{}name={}&result_uid={}'.format(xaction, experiment_uid, uid) + uid = v.get('uid', '') + if uid != '': + xaction = 'action={}&'.format( + action) if action != '' else '' + url = '?{}name={}&result_uid={}'.format( + xaction, experiment_uid, uid) - if url!='': + if url != '': targets = [url] - plugins.connect(fig, OpenBrowserOnClick(graph, targets = targets)) + plugins.connect( + fig, OpenBrowserOnClick( + graph, targets=targets)) # Render graph fig_html = mpld3.fig_to_html(fig) components.html(fig_html, width=1100, height=500) - #fig_html = '
'+fig_html+'
' + # fig_html = '
'+fig_html+'
' - #components.html(fig_html, width=1000, height=800) - #st.markdown('---') + # components.html(fig_html, width=1000, height=800) + # st.markdown('---') - ######################################################################## + ####################################################################### # Show all data df = pd.DataFrame( - all_data, - columns=(k for k in sorted_keys if k!='') + all_data, + columns=(k for k in sorted_keys if k != '') ) st.markdown('---') st.dataframe(df) # Check if can create self link - if len(experiment_uids)==1: + if len(experiment_uids) == 1: st.markdown("""---""") - xtype = '&type={}'.format(how) if how!='' else '' + xtype = '&type={}'.format(how) if how != '' else '' - end_html=''' + end_html = '''
Self link
@@ -763,16 +815,13 @@ def visualize(st, query_params, action = ''): st.write(end_html, unsafe_allow_html=True) - - return {'return':0} - - + return {'return': 0} if __name__ == "__main__": r = main() - if r['return']>0: + if r['return'] > 0: st.markdown("""---""") st.markdown('**Error detected by CM:** {}'.format(r['error'])) diff --git a/script/gui/misc.py b/script/gui/misc.py index 33ffc92f64..feb9122fba 100644 --- a/script/gui/misc.py +++ b/script/gui/misc.py @@ -1,25 +1,26 @@ # Support functions ########################################################## -def make_url(name, alias='', action='contributors', key='name', md=True, skip_url_quote=False): +def make_url(name, alias='', action='contributors', + key='name', md=True, skip_url_quote=False): import urllib - if alias == '': alias = name + if alias == '': + alias = name x = urllib.parse.quote_plus(alias) if not skip_url_quote else alias xaction = '' if action != '': xaction = 'action={}'.format(action) - if key!='': - xaction+='&' - + if key != '': + xaction += '&' url = '?{}'.format(xaction) - if key!='': - url+='{}={}'.format(key,x) + if key != '': + url += '{}={}'.format(key, x) if md: md = '[{}]({})'.format(name, url) @@ -29,6 +30,8 @@ def make_url(name, alias='', action='contributors', key='name', md=True, skip_ur return md ########################################################## + + def convert_date(date): # date: format YYYYMMDD to YYYY month day @@ -39,11 +42,14 @@ def convert_date(date): month = calendar.month_abbr[int(date[4:6])] day = str(int(date[6:8])) except Exception as e: - return {'return':1, 'error':'date "{}" is not of format YYYYMMDD: {}'.format(date, format(e))} + return {'return': 1, 'error': 'date "{}" is not of format YYYYMMDD: {}'.format( + date, format(e))} - return {'return':0, 'string':year+' '+month+' '+day} + return {'return': 0, 'string': year + ' ' + month + ' ' + day} ########################################################## + + def get_params(st): compatibility = False @@ -53,9 +59,9 @@ def get_params(st): params = {} for k in params2: v = params2[k] - if type(v)!=list: - params[k]=[v] - except: + if type(v) != list: + params[k] = [v] + except BaseException: compatibility = True if compatibility: @@ -64,6 +70,8 @@ def get_params(st): return params ########################################################## + + def get_all_deps_tags(i): meta = i['meta'] all_deps_tags = i.get('all_deps_tags', []) @@ -72,25 +80,24 @@ def get_all_deps_tags(i): v = meta[k] if k == 'tags': - if type(v) == list: - v = ','.join(v) + + +if isinstance(v, if) v = ','.join(v) if v not in all_deps_tags: all_deps_tags.append(v) - elif type(v) == dict: - r = get_all_deps_tags({'meta':v, 'all_deps_tags':all_deps_tags}) +elif isinstance(v, elif ) r = get_all_deps_tags({'meta': v, 'all_deps_tags': all_deps_tags}) all_deps_tags = r['all_deps_tags'] - elif type(v) == list: - for vv in v: - if type(vv) == dict: - r = get_all_deps_tags({'meta':vv, 'all_deps_tags':all_deps_tags}) +elif isinstance(v, elif) for vv in v: +if isinstance(vv, if ) r = get_all_deps_tags({'meta': vv, 'all_deps_tags': all_deps_tags}) all_deps_tags = r['all_deps_tags'] - return {'return':0, 'all_deps_tags':all_deps_tags} + return {'return': 0, 'all_deps_tags':all_deps_tags} ########################################################## + def make_selector(i): key = i['key'] @@ -103,19 +110,18 @@ def make_selector(i): hide = i.get('hide', False) - key2 = '@'+key + key2 = '@' +key value2 = None - if type(value) == dict: - desc = value['desc'] +if isinstance(value, if ) desc = value['desc'] choices = value.get('choices', []) boolean = value.get('boolean', False) default = value.get('default', '') force = value.get('force', None) - if force != None: + if force is not None: value2 = force if not hide: st.markdown('**{}:** {}'.format(desc, str(force))) @@ -124,33 +130,35 @@ def make_selector(i): if boolean: v = default x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: - if x[0].lower()=='true': + if x !=None and len(x)>0 and x[0]!=None: + if x[0].lower() =='true': v = True - elif x[0].lower()=='false': + elif x[0].lower() =='false': v = False if hide: value2 = v else: value2 = st.checkbox(desc, value=v, key=key2) - elif len(choices)>0: + elif len(choices) >0: x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: + if x !=None and len(x)>0 and x[0]!=None: x = x[0] if x in choices: - selected_index = choices.index(x) if x in choices else 0 + selected_index = choices.index( + x) if x in choices else 0 else: - selected_index = choices.index(default) if default!='' else 0 + selected_index = choices.index(default) if default != '' else 0 else: - selected_index = choices.index(default) if default!='' else 0 + selected_index = choices.index(default) if default != '' else 0 if hide: value2 = choices[selected_index] else: - value2 = st.selectbox(desc, choices, index=selected_index, key=key2) + value2 = st.selectbox( + desc, choices, index=selected_index, key=key2) else: v = default x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: + if x !=None and len(x)>0 and x[0]!=None: v = x[0] if hide: value2 = v @@ -167,48 +175,52 @@ def make_selector(i): value2 = st.text_input(desc) st_inputs[key2] = value2 - return {'return':0, 'key2': key2, 'value2': value2} + return {'return': 0, 'key2': key2, 'value2': value2} ########################################################## + def make_selection(st, selection, param_key, text, x_uid, force_index=0): x_meta = {} - if len(selection)>0: - selection = sorted(selection, key = lambda v: v['name']) + if len(selection) >0: + selection = sorted(selection, key= lambda v: v['name']) if x_uid != '': x_meta = selection[0] st.markdown('**Selected {}:** {}'.format(text, x_meta['name'])) else: - x_selection = [{'name':''}] + x_selection = [{'name': ''}] x_selection += selection x_id = st.selectbox('Select {}:'.format(text), range(len(x_selection)), format_func=lambda x: x_selection[x]['name'], - index = force_index, - key = param_key) + index= force_index, + key= param_key) - if x_id>0: + if x_id >0: x_meta = x_selection[x_id] - return {'return':0, 'meta':x_meta} + return {'return': 0, 'meta':x_meta} + +########################################################################## -################################################################################## def get_with_complex_key_safe(meta, key): v = get_with_complex_key(meta, key) - if v == None: v='' + if v == None: + v='' return v -################################################################################## +########################################################################## + def get_with_complex_key(meta, key): j = key.find('.') - if j<0: + if j <0: return meta.get(key) key0 = key[:j] @@ -216,4 +228,4 @@ def get_with_complex_key(meta, key): if key0 not in meta: return None - return get_with_complex_key(meta[key0], key[j+1:]) + return get_with_complex_key(meta[key0], key[j +1:]) diff --git a/script/gui/playground.py b/script/gui/playground.py index 109d048469..851d3f1bf5 100644 --- a/script/gui/playground.py +++ b/script/gui/playground.py @@ -8,6 +8,7 @@ import cmind import misc + def main(): st.set_page_config(layout="wide", @@ -36,15 +37,16 @@ def main(): st.markdown(hide_streamlit_style, unsafe_allow_html=True) # Set title (check extra user HTML to embed before title if needed) - extra = os.environ.get('CM_GUI_EXTRA_HTML','') + extra = os.environ.get('CM_GUI_EXTRA_HTML', '') - if extra!='': + if extra != '': url = '' for p in params: - v=str(','.join(params[p])) - if url!='': url+=';' - url+=p+'='+v - extra=extra.replace('{{CM_URL}}', url)+'\n\n' + v = str(','.join(params[p])) + if url != '': + url += ';' + url += p + '=' + v + extra = extra.replace('{{CM_URL}}', url) + '\n\n' st.write('''
@@ -54,32 +56,32 @@ def main():
'''.format(extra), - unsafe_allow_html=True - ) + unsafe_allow_html=True + ) - extra_file = os.environ.get('CM_GUI_EXTRA_HTML_FILE','') - if extra_file!='': + extra_file = os.environ.get('CM_GUI_EXTRA_HTML_FILE', '') + if extra_file != '': r = cmind.utils.load_txt(extra_file) - if r['return']>0: return r + if r['return'] > 0: + return r - s = '\n\n'+r['string']+'\n\n' + s = '\n\n' + r['string'] + '\n\n' st.write(s, unsafe_allow_html=True) - # Check action and basic menu - action = params.get('action',['scripts'])[0].lower() - - style_action_scripts='font-style:italic;font-weight:bold;color:#ffffff' if action=='scripts' else '' - style_action_howtorun='font-style:italic;font-weight:bold;color:#ffffff' if action=='howtorun' else '' - style_action_challenges='font-style:italic;font-weight:bold;color:#ffffff' if action=='challenges' else '' - style_action_contributors='font-style:italic;font-weight:bold;color:#ffffff' if action=='contributors' else '' - style_action_experiments='font-style:italic;font-weight:bold;color:#ffffff' if action=='experiments' else '' - style_action_reproduce='font-style:italic;font-weight:bold;color:#ffffff' if action=='reproduce' else '' - style_action_apps='font-style:italic;font-weight:bold;color:#ffffff' if action=='apps' else '' - style_action_reports='font-style:italic;font-weight:bold;color:#ffffff' if action=='reports' else '' - style_action_beta='font-style:italic;font-weight:bold;color:#ffffff' if action=='beta' else '' - style_action_install='font-style:italic;font-weight:bold;color:#ffffff' if action=='install' else '' + action = params.get('action', ['scripts'])[0].lower() + + style_action_scripts = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'scripts' else '' + style_action_howtorun = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'howtorun' else '' + style_action_challenges = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'challenges' else '' + style_action_contributors = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'contributors' else '' + style_action_experiments = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'experiments' else '' + style_action_reproduce = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'reproduce' else '' + style_action_apps = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'apps' else '' + style_action_reports = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'reports' else '' + style_action_beta = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'beta' else '' + style_action_install = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'install' else '' st.write('''
@@ -97,17 +99,17 @@ def main():
'''.format( - style_action_scripts, - style_action_howtorun, - style_action_challenges, - style_action_experiments, - style_action_reproduce, - style_action_contributors, - style_action_reports, - style_action_beta, - style_action_apps, - style_action_install - ), + style_action_scripts, + style_action_howtorun, + style_action_challenges, + style_action_experiments, + style_action_reproduce, + style_action_contributors, + style_action_reports, + style_action_beta, + style_action_apps, + style_action_install + ), unsafe_allow_html=True ) @@ -115,7 +117,7 @@ def main(): # st.markdown("""---""") st.markdown('') - r={'return':0} + r = {'return': 0} if action == 'challenges': from playground_challenges import page @@ -125,7 +127,7 @@ def main(): r = page(st, params) elif action == 'experiments': from graph import visualize - r = visualize(st, params, action = 'experiments') + r = visualize(st, params, action='experiments') elif action == 'contributors': from playground_contributors import page r = page(st, params) @@ -148,16 +150,16 @@ def main(): from playground_install import page r = page(st, params, {}) - if r['return']>0: - st.markdown('**CM error:** {} . Please report [here](https://github.com/mlcommons/ck/issues)'.format(r['error'])) - - end_html=r.get('end_html','') + if r['return'] > 0: + st.markdown( + '**CM error:** {} . Please report [here](https://github.com/mlcommons/ck/issues)'.format(r['error'])) + end_html = r.get('end_html', '') # Finalize all pages st.markdown("""---""") - if end_html!='': + if end_html != '': st.write(end_html, unsafe_allow_html=True) st.write(""" @@ -172,9 +174,11 @@ def make_url(name, alias='', action='contributors', key='name', md=True): import urllib - if alias == '': alias = name + if alias == '': + alias = name - url = '?action={}&{}={}'.format(action, key, urllib.parse.quote_plus(alias)) + url = '?action={}&{}={}'.format( + action, key, urllib.parse.quote_plus(alias)) if md: md = '[{}]({})'.format(name, url) @@ -194,9 +198,10 @@ def convert_date(date): month = calendar.month_abbr[int(date[4:6])] day = str(int(date[6:8])) except Exception as e: - return {'return':1, 'error':'date "{}" is not of format YYYYMMDD: {}'.format(date, format(e))} + return {'return': 1, 'error': 'date "{}" is not of format YYYYMMDD: {}'.format( + date, format(e))} - return {'return':0, 'string':year+' '+month+' '+day} + return {'return': 0, 'string': year + ' ' + month + ' ' + day} if __name__ == "__main__": diff --git a/script/gui/playground_apps.py b/script/gui/playground_apps.py index bcde96578e..acaf9f4bff 100644 --- a/script/gui/playground_apps.py +++ b/script/gui/playground_apps.py @@ -14,6 +14,7 @@ external_module_path = '' external_module_meta = {} + def main(): params = misc.get_params(st) @@ -25,9 +26,7 @@ def main(): return page(st, params) - - -def page(st, params, action = ''): +def page(st, params, action=''): global initialized, external_module_path, external_module_meta @@ -36,5 +35,4 @@ def page(st, params, action = ''): st.markdown('----') st.markdown(announcement) - - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_beta.py b/script/gui/playground_beta.py index 9b2c526a8c..bf2b24ec4e 100644 --- a/script/gui/playground_beta.py +++ b/script/gui/playground_beta.py @@ -5,14 +5,15 @@ import datetime import misc + def page(st, params): current_script_path = os.environ.get('CM_TMP_CURRENT_SCRIPT_PATH', '') - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' - name = params.get('name',[''])[0].strip() - tags = params.get('tags',[''])[0].lower() + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() readme = os.path.join(current_script_path, 'playground_beta_README.md') @@ -21,15 +22,16 @@ def page(st, params): if os.path.isfile(readme): r = cmind.utils.load_txt(readme) - if r['return']>0: return r + if r['return'] > 0: + return r md += r['string'] md = md.replace('{{URL_PREFIX}}', url_prefix) # st.markdown(md) - st.write(md, unsafe_allow_html = True) + st.write(md, unsafe_allow_html=True) - end_html='' + end_html = '' - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_challenges.py b/script/gui/playground_challenges.py index 7d628760ce..44a8824898 100644 --- a/script/gui/playground_challenges.py +++ b/script/gui/playground_challenges.py @@ -5,127 +5,129 @@ import datetime import misc + def page(st, params): - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' url_scripts = url_prefix + '?action=scripts' url_contributors = url_prefix + '?action=contributors' + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() - name = params.get('name',[''])[0].strip() - tags = params.get('tags',[''])[0].lower() - - ii = {'action':'find', - 'automation':'challenge,3d84abd768f34e08'} + ii = {'action': 'find', + 'automation': 'challenge,3d84abd768f34e08'} - if name!='': - ii['artifact']=name - if tags!='': - ii['tags']=tags + if name != '': + ii['artifact'] = name + if tags != '': + ii['tags'] = tags r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] end_html = '' - if len(lst)==0: + if len(lst) == 0: st.markdown('Challenges were not found!') else: artifact = None - if len(lst)==1: + if len(lst) == 1: artifact = lst[0] else: challenges = [] date_now = datetime.datetime.now().isoformat() - date_now2 = int(date_now[0:4]+date_now[5:7]+date_now[8:10]) + date_now2 = int(date_now[0:4] + date_now[5:7] + date_now[8:10]) ongoing = [] for l in sorted(lst, key=lambda x: ( - -int(x.meta.get('date_open','0')), - -int(x.meta.get('date_close','0')), - x.meta.get('title','') - )): + -int(x.meta.get('date_open', '0')), + -int(x.meta.get('date_close', '0')), + x.meta.get('title', '') + )): row = {} meta = l.meta - row['uid']= meta['uid'] + row['uid'] = meta['uid'] name = meta.get('title', meta['alias']) - row['name']=name + row['name'] = name - if meta.get('hot', False): row['hot']=True + if meta.get('hot', False): + row['hot'] = True - for k in ['date_close_extension', 'points', 'trophies', 'prize', 'prize_short', 'skip', 'sort']: + for k in ['date_close_extension', 'points', + 'trophies', 'prize', 'prize_short', 'skip', 'sort']: if k in meta: - row[k]=meta[k] + row[k] = meta[k] under_preparation = meta.get('under_preparation', False) - row['under_preparation']=under_preparation + row['under_preparation'] = under_preparation - date_open = meta.get('date_open','') - date_close = meta.get('date_close','') + date_open = meta.get('date_open', '') + date_close = meta.get('date_close', '') s_date_open = '' - if date_open!='': + if date_open != '': r = misc.convert_date(date_open) - s_date_open = r['string'] if r['return']==0 else '' + s_date_open = r['string'] if r['return'] == 0 else '' - row['orig_date_open']=date_open - row['date_open']=s_date_open + row['orig_date_open'] = date_open + row['date_open'] = s_date_open s_date_close = '' - if date_close!='': + if date_close != '': r = misc.convert_date(date_close) - s_date_close = r['string'] if r['return']==0 else '' + s_date_close = r['string'] if r['return'] == 0 else '' - row['orig_date_close']=date_close - row['date_close']=s_date_close + row['orig_date_close'] = date_close + row['date_close'] = s_date_close diff1 = 0 diff2 = 0 - if date_open!='': - diff1 = int(date_open)-int(date_now2) - - if date_close!='': - diff2 = int(date_close)-int(date_now2) + if date_open != '': + diff1 = int(date_open) - int(date_now2) + if date_close != '': + diff2 = int(date_close) - int(date_now2) prefix = '' if under_preparation: prefix = 'Under preparation: ' else: - if date_open!='' and diff1>0: + if date_open != '' and diff1 > 0: prefix = 'Opens on {}: '.format(s_date_open) else: - if date_close!='': - if diff2<0: - prefix = 'Finished on {}: '.format(s_date_close) + if date_close != '': + if diff2 < 0: + prefix = 'Finished on {}: '.format( + s_date_close) else: - prefix = 'Open and finishes on {}: '.format(s_date_close) + prefix = 'Open and finishes on {}: '.format( + s_date_close) else: prefix = 'Open: '.format(s_date_close) - # Check if open challenge even if under preparation - if date_open and (date_close=='' or (diff1<=0 and diff2>0)): + if date_open and (date_close == '' or ( + diff1 <= 0 and diff2 > 0)): ongoing.append(row) else: - challenges.append({'prefix':prefix, 'name':name, 'uid':l.meta['uid']}) - - - + challenges.append( + {'prefix': prefix, 'name': name, 'uid': l.meta['uid']}) # Show ongoing if open - if len(ongoing)>0: + if len(ongoing) > 0: # Check hot hot = [] @@ -153,28 +155,30 @@ def page(st, params):

'''.format(url_scripts, url_contributors) - st.write(x, unsafe_allow_html = True) - + st.write(x, unsafe_allow_html=True) # Check if hot - if len(hot)>0: + if len(hot) > 0: st.markdown('#### Hot challenges') md_tmp = '' for row in sorted(hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), - row.get('sort', 0), - row.get('name', ''), - row.get('under_preparation', False) - )): + row.get('sort', 0), + row.get( + 'name', ''), + row.get( + 'under_preparation', False) + )): x = row['name'] x = x[0].upper() + x[1:] - url = url_prefix + '?action=challenges&name={}'.format(row['uid']) - + url = url_prefix + \ + '?action=challenges&name={}'.format(row['uid']) - date_close = row.get('date_close','').strip() - y = ' (Closing date: **{}**)'.format(date_close) if date_close !='' else '' + date_close = row.get('date_close', '').strip() + y = ' (Closing date: **{}**)'.format( + date_close) if date_close != '' else '' md_tmp += '* [{}]({}){}\n'.format(x, url, y) @@ -182,18 +186,21 @@ def page(st, params): st.markdown('#### On-going challenges') - # Continue all ind = 1 data = [] for row in sorted(ongoing_without_hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), - row.get('sort', 0), - row.get('name', ''), - row.get('under_preparation', False) - )): - if row.get('skip',False): continue + row.get( + 'sort', 0), + row.get( + 'name', ''), + row.get( + 'under_preparation', False) + )): + if row.get('skip', False): + continue xrow = [] @@ -206,8 +213,9 @@ def page(st, params): x = x[0].lower() + x[1:] y = 'Under preparation: ' - url = url_prefix + '?action=challenges&name={}'.format(row['uid']) -# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) + url = url_prefix + \ + '?action=challenges&name={}'.format(row['uid']) +# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) x = '''
@@ -215,24 +223,25 @@ def page(st, params): {}{}
- '''.format(y, url, x).replace('\n','') + '''.format(y, url, x).replace('\n', '') # st.write(x, unsafe_allow_html = True) xrow.append(x) # Assemble info - x='' + x = '' - date_close = row.get('date_close','') + date_close = row.get('date_close', '') y = '' - if date_close!='' and date_close!=None: - x += '   Closing date: **{}**\n'.format(date_close) - y = date_close.replace(' ',' ') + if date_close != '' and date_close is not None: + x += '   Closing date: **{}**\n'.format( + date_close) + y = date_close.replace(' ', ' ') xrow.append(y) y = '' - if row.get('date_close_extension',False): + if row.get('date_close_extension', False): y = 'until done' xrow.append(y) @@ -245,11 +254,9 @@ def page(st, params): # # xrow.append(y) - - awards = '' - trophies = row.get('trophies',False) + trophies = row.get('trophies', False) if trophies: x += '   Trophy: **Yes**\n' awards += '🏆' @@ -263,16 +270,13 @@ def page(st, params): # # xrow.append(awards) - - if x!='': - md += '     '+x + if x != '': + md += '     ' + x # st.markdown(md) - data.append(xrow) - ind+=1 - + ind += 1 import pandas as pd import numpy as np @@ -280,10 +284,14 @@ def page(st, params): df = pd.DataFrame(data, columns=['Challenge', 'Closing date', 'Extension']) - df.index+=1 + df.index += 1 # st.table(df) - st.write(df.to_html(escape=False, justify='left'), unsafe_allow_html=True) + st.write( + df.to_html( + escape=False, + justify='left'), + unsafe_allow_html=True) # Show selector for all # challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', @@ -294,16 +302,12 @@ def page(st, params): # if challenge>0: # artifact = artifacts[challenge] - - - # Process 1 challenge if artifact is None: -# st.markdown('#### Past or future challenges:') + # st.markdown('#### Past or future challenges:') st.markdown('#### Future or past challenges') - for c in challenges: prefix = c['prefix'] @@ -318,19 +322,9 @@ def page(st, params): '''.format(str(ind), prefix, url, name) - st.write(x, unsafe_allow_html = True) - - ind+=1 - - - - - - - - - + st.write(x, unsafe_allow_html=True) + ind += 1 else: meta = artifact.meta @@ -343,100 +337,104 @@ def page(st, params):

Challenge: {}

'''.format(name), - unsafe_allow_html=True - ) - - end_html='
Self link
'.format(misc.make_url(meta['uid'], action='challenges', md=False)) + unsafe_allow_html=True + ) + end_html = '
Self link
'.format( + misc.make_url(meta['uid'], action='challenges', md=False)) # Check basic password - password_hash = meta.get('password_hash','') + password_hash = meta.get('password_hash', '') view = True - if password_hash!='': + if password_hash != '': view = False - password = st.text_input("Enter password", type="password", key="password") + password = st.text_input( + "Enter password", type="password", key="password") - if password!='': + if password != '': import bcrypt - # TBD: temporal hack to demo password protection for experiments + # TBD: temporal hack to demo password protection for + # experiments password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' - password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt) + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), password_salt) - if password_hash.encode('utf-8')==password_hash2: - view=True + if password_hash.encode('utf-8') == password_hash2: + view = True else: st.markdown('**Warning:** wrong password') if not view: - return {'return':0, 'end_html':end_html} - - + return {'return': 0, 'end_html': end_html} z = '' - date_open = meta.get('date_open','') - if date_open!='': + date_open = meta.get('date_open', '') + if date_open != '': # Format YYYYMMDD r = misc.convert_date(date_open) - if r['return']>0: return r - z+='* **Open date:** {}\n'.format(r['string']) + if r['return'] > 0: + return r + z += '* **Open date:** {}\n'.format(r['string']) - date_close = meta.get('date_close','') - if date_close!='': + date_close = meta.get('date_close', '') + if date_close != '': # Format YYYYMMDD r = misc.convert_date(date_close) - if r['return']>0: return r - z+='* **Closing date:** {}\n'.format(r['string']) + if r['return'] > 0: + return r + z += '* **Closing date:** {}\n'.format(r['string']) if meta.get('trophies', False): - z+='* **MLCommons Collective Knowledge Contributor award:** Yes\n' + z += '* **MLCommons Collective Knowledge Contributor award:** Yes\n' - prize_short = meta.get('prize_short','') - if prize_short!='': - z+='* **Prizes:** {}\n'.format(prize_short) + prize_short = meta.get('prize_short', '') + if prize_short != '': + z += '* **Prizes:** {}\n'.format(prize_short) # prize = meta.get('prize','') # if prize!='': # z+='* **Student prizes:** {}\n'.format(prize) - - urls = meta.get('urls',[]) + urls = meta.get('urls', []) url = meta.get('url', '') - if url!='': urls.append(url) + if url != '': + urls.append(url) - if len(urls)>0: + if len(urls) > 0: x = '* **External link:** ' md = '' - if len(urls)>1: + if len(urls) > 1: md = '* **External links:**\n' - x=' * ' + x = ' * ' for u in urls: - md+=x+'[{}]({})\n'.format(u,u) - z+=md+'\n' - + md += x + '[{}]({})\n'.format(u, u) + z += md + '\n' # Check if has linked experiments - experiments = meta.get('experiments',[]) + experiments = meta.get('experiments', []) - if len(experiments)>0: + if len(experiments) > 0: md = '* **Shared experiments:**\n' for e in experiments: - tags = e.get('tags','') - name = e.get('name','') + tags = e.get('tags', '') + name = e.get('name', '') - if tags!='': - md+=' * '+misc.make_url(tags, action='experiments', key='tags')+'\n' - elif name!='': - md+=' * '+misc.make_url(name, action='experiments')+'\n' + if tags != '': + md += ' * ' + \ + misc.make_url( + tags, action='experiments', key='tags') + '\n' + elif name != '': + md += ' * ' + \ + misc.make_url(name, action='experiments') + '\n' - z+=md+'\n' + z += md + '\n' st.markdown(z) - # Check if has text path = artifact.path @@ -444,7 +442,8 @@ def page(st, params): f1 = os.path.join(path, f) if os.path.isfile(f1): r = cmind.utils.load_txt(f1) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] @@ -454,7 +453,7 @@ def page(st, params): y = s.split('\n') ss = '' for x in y: - ss+=x.strip()+'\n' + ss += x.strip() + '\n' st.write(ss, unsafe_allow_html=True) else: @@ -463,10 +462,11 @@ def page(st, params): break # Check associated reports - r=cmind.access({'action':'find', - 'automation':'report,6462ecdba2054467', - 'tags':'challenge-{}'.format(uid)}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'report,6462ecdba2054467', + 'tags': 'challenge-{}'.format(uid)}) + if r['return'] > 0: + return r lst = r['list'] @@ -478,25 +478,19 @@ def page(st, params): report_meta = l.meta report_alias = report_meta['alias'] - report_title = report_meta.get('title','') + report_title = report_meta.get('title', '') - report_name = report_title if report_title!='' else report_alias + report_name = report_title if report_title != '' else report_alias r = cmind.utils.load_txt(f1) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] st.markdown('---') - st.markdown('### '+report_name) + st.markdown('### ' + report_name) st.markdown(s, unsafe_allow_html=True) - - - - - - - - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_challenges_with_prizes.py b/script/gui/playground_challenges_with_prizes.py index 80afce51e4..3cc681cd72 100644 --- a/script/gui/playground_challenges_with_prizes.py +++ b/script/gui/playground_challenges_with_prizes.py @@ -5,121 +5,123 @@ import datetime import misc + def page(st, params): - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' - name = params.get('name',[''])[0].strip() - tags = params.get('tags',[''])[0].lower() + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() - ii = {'action':'find', - 'automation':'challenge,3d84abd768f34e08'} + ii = {'action': 'find', + 'automation': 'challenge,3d84abd768f34e08'} - if name!='': - ii['artifact']=name - if tags!='': - ii['tags']=tags + if name != '': + ii['artifact'] = name + if tags != '': + ii['tags'] = tags r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] end_html = '' - if len(lst)==0: + if len(lst) == 0: st.markdown('Challenges were not found!') else: artifact = None - if len(lst)==1: + if len(lst) == 1: artifact = lst[0] else: challenges = [] date_now = datetime.datetime.now().isoformat() - date_now2 = int(date_now[0:4]+date_now[5:7]+date_now[8:10]) + date_now2 = int(date_now[0:4] + date_now[5:7] + date_now[8:10]) ongoing = [] for l in sorted(lst, key=lambda x: ( - -int(x.meta.get('date_open','0')), - -int(x.meta.get('date_close','0')), - x.meta.get('title','') - )): + -int(x.meta.get('date_open', '0')), + -int(x.meta.get('date_close', '0')), + x.meta.get('title', '') + )): row = {} meta = l.meta - row['uid']= meta['uid'] + row['uid'] = meta['uid'] name = meta.get('title', meta['alias']) - row['name']=name + row['name'] = name - for k in ['date_close_extension', 'points', 'trophies', 'prize', 'prize_short', 'skip', 'sort']: + for k in ['date_close_extension', 'points', + 'trophies', 'prize', 'prize_short', 'skip', 'sort']: if k in meta: - row[k]=meta[k] + row[k] = meta[k] under_preparation = meta.get('under_preparation', False) - row['under_preparation']=under_preparation + row['under_preparation'] = under_preparation - date_open = meta.get('date_open','') - date_close = meta.get('date_close','') + date_open = meta.get('date_open', '') + date_close = meta.get('date_close', '') s_date_open = '' - if date_open!='': + if date_open != '': r = misc.convert_date(date_open) - s_date_open = r['string'] if r['return']==0 else '' + s_date_open = r['string'] if r['return'] == 0 else '' - row['orig_date_open']=date_open - row['date_open']=s_date_open + row['orig_date_open'] = date_open + row['date_open'] = s_date_open s_date_close = '' - if date_close!='': + if date_close != '': r = misc.convert_date(date_close) - s_date_close = r['string'] if r['return']==0 else '' + s_date_close = r['string'] if r['return'] == 0 else '' - row['orig_date_close']=date_close - row['date_close']=s_date_close + row['orig_date_close'] = date_close + row['date_close'] = s_date_close diff1 = 0 diff2 = 0 - if date_open!='': - diff1 = int(date_open)-int(date_now2) - - if date_close!='': - diff2 = int(date_close)-int(date_now2) + if date_open != '': + diff1 = int(date_open) - int(date_now2) + if date_close != '': + diff2 = int(date_close) - int(date_now2) prefix = '' if under_preparation: prefix = 'Under preparation: ' else: - if date_open!='' and diff1>0: + if date_open != '' and diff1 > 0: prefix = 'Opens on {}: '.format(s_date_open) else: - if date_close!='': - if diff2<0: - prefix = 'Finished on {}: '.format(s_date_close) + if date_close != '': + if diff2 < 0: + prefix = 'Finished on {}: '.format( + s_date_close) else: - prefix = 'Open and finishes on {}: '.format(s_date_close) + prefix = 'Open and finishes on {}: '.format( + s_date_close) else: prefix = 'Open: '.format(s_date_close) - # Check if open challenge even if under preparation - if date_open and (date_close=='' or (diff1<=0 and diff2>0)): + if date_open and (date_close == '' or ( + diff1 <= 0 and diff2 > 0)): ongoing.append(row) else: - challenges.append({'prefix':prefix, 'name':name, 'uid':l.meta['uid']}) - - - + challenges.append( + {'prefix': prefix, 'name': name, 'uid': l.meta['uid']}) # Show ongoing if open - if len(ongoing)>0: + if len(ongoing) > 0: ind = 1 x = ''' @@ -133,16 +135,19 @@ def page(st, params): --> ''' - st.write(x, unsafe_allow_html = True) + st.write(x, unsafe_allow_html=True) data = [] for row in sorted(ongoing, key=lambda row: (int(row.get('orig_date_close', 9999999999)), row.get('sort', 0), - row.get('name', ''), - row.get('under_preparation', False) + row.get( + 'name', ''), + row.get( + 'under_preparation', False) )): - if row.get('skip',False): continue + if row.get('skip', False): + continue xrow = [] @@ -155,8 +160,9 @@ def page(st, params): x = x[0].lower() + x[1:] y = 'Under preparation: ' - url = url_prefix + '?action=challenges&name={}'.format(row['uid']) -# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) + url = url_prefix + \ + '?action=challenges&name={}'.format(row['uid']) +# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) x = '''
@@ -164,24 +170,25 @@ def page(st, params): {}{}
- '''.format(y, url, x).replace('\n','') + '''.format(y, url, x).replace('\n', '') # st.write(x, unsafe_allow_html = True) xrow.append(x) # Assemble info - x='' + x = '' - date_close = row.get('date_close','') + date_close = row.get('date_close', '') y = '' - if date_close!='' and date_close!=None: - x += '   Closing date: **{}**\n'.format(date_close) - y = date_close.replace(' ',' ') + if date_close != '' and date_close is not None: + x += '   Closing date: **{}**\n'.format( + date_close) + y = date_close.replace(' ', ' ') xrow.append(y) y = '' - if row.get('date_close_extension',False): + if row.get('date_close_extension', False): y = 'until done' xrow.append(y) @@ -194,34 +201,30 @@ def page(st, params): # # xrow.append(y) - - awards = '' - trophies = row.get('trophies',False) + trophies = row.get('trophies', False) if trophies: x += '   Trophy: **Yes**\n' awards += '🏆' - - prize = row.get('prize_short','') - if prize!='': - x += '   Prizes from [MLCommons organizations](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https:/cKnowledge.org): **{}**\n'.format(prize) - if awards!='': awards+=' , ' + prize = row.get('prize_short', '') + if prize != '': + x += '   Prizes from [MLCommons organizations](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https:/cKnowledge.org): **{}**\n'.format( + prize) + if awards != '': + awards += ' , ' awards += prize xrow.append(awards) - - if x!='': - md += '     '+x + if x != '': + md += '     ' + x # st.markdown(md) - data.append(xrow) - ind+=1 - + ind += 1 import pandas as pd import numpy as np @@ -229,10 +232,14 @@ def page(st, params): df = pd.DataFrame(data, columns=['Challenge', 'Closing date', 'Extension', 'Contributor award and prizes from MLCommons organizations, cTuning foundation and cKnowledge.org']) - df.index+=1 + df.index += 1 # st.table(df) - st.write(df.to_html(escape=False, justify='left'), unsafe_allow_html=True) + st.write( + df.to_html( + escape=False, + justify='left'), + unsafe_allow_html=True) # Show selector for all # challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', @@ -243,20 +250,16 @@ def page(st, params): # if challenge>0: # artifact = artifacts[challenge] - - - # Process 1 challenge if artifact is None: -# st.markdown('#### Past or future challenges:') + # st.markdown('#### Past or future challenges:') x = '''

Future or past challenges

''' - st.write(x, unsafe_allow_html = True) - + st.write(x, unsafe_allow_html=True) for c in challenges: @@ -272,19 +275,9 @@ def page(st, params): '''.format(str(ind), prefix, url, name) - st.write(x, unsafe_allow_html = True) - - ind+=1 - - - - - - - - - + st.write(x, unsafe_allow_html=True) + ind += 1 else: meta = artifact.meta @@ -297,100 +290,104 @@ def page(st, params):

Challenge: {}

'''.format(name), - unsafe_allow_html=True - ) - - end_html='
Self link
'.format(misc.make_url(meta['uid'], action='challenges', md=False)) + unsafe_allow_html=True + ) + end_html = '
Self link
'.format( + misc.make_url(meta['uid'], action='challenges', md=False)) # Check basic password - password_hash = meta.get('password_hash','') + password_hash = meta.get('password_hash', '') view = True - if password_hash!='': + if password_hash != '': view = False - password = st.text_input("Enter password", type="password", key="password") + password = st.text_input( + "Enter password", type="password", key="password") - if password!='': + if password != '': import bcrypt - # TBD: temporal hack to demo password protection for experiments + # TBD: temporal hack to demo password protection for + # experiments password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' - password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt) + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), password_salt) - if password_hash.encode('utf-8')==password_hash2: - view=True + if password_hash.encode('utf-8') == password_hash2: + view = True else: st.markdown('**Warning:** wrong password') if not view: - return {'return':0, 'end_html':end_html} - - + return {'return': 0, 'end_html': end_html} z = '' - date_open = meta.get('date_open','') - if date_open!='': + date_open = meta.get('date_open', '') + if date_open != '': # Format YYYYMMDD r = misc.convert_date(date_open) - if r['return']>0: return r - z+='* **Open date:** {}\n'.format(r['string']) + if r['return'] > 0: + return r + z += '* **Open date:** {}\n'.format(r['string']) - date_close = meta.get('date_close','') - if date_close!='': + date_close = meta.get('date_close', '') + if date_close != '': # Format YYYYMMDD r = misc.convert_date(date_close) - if r['return']>0: return r - z+='* **Closing date:** {}\n'.format(r['string']) + if r['return'] > 0: + return r + z += '* **Closing date:** {}\n'.format(r['string']) if meta.get('trophies', False): - z+='* **MLCommons Collective Knowledge Contributor award:** Yes\n' + z += '* **MLCommons Collective Knowledge Contributor award:** Yes\n' - prize_short = meta.get('prize_short','') - if prize_short!='': - z+='* **Prizes:** {}\n'.format(prize_short) + prize_short = meta.get('prize_short', '') + if prize_short != '': + z += '* **Prizes:** {}\n'.format(prize_short) # prize = meta.get('prize','') # if prize!='': # z+='* **Student prizes:** {}\n'.format(prize) - - urls = meta.get('urls',[]) + urls = meta.get('urls', []) url = meta.get('url', '') - if url!='': urls.append(url) + if url != '': + urls.append(url) - if len(urls)>0: + if len(urls) > 0: x = '* **External link:** ' md = '' - if len(urls)>1: + if len(urls) > 1: md = '* **External links:**\n' - x=' * ' + x = ' * ' for u in urls: - md+=x+'[{}]({})\n'.format(u,u) - z+=md+'\n' - + md += x + '[{}]({})\n'.format(u, u) + z += md + '\n' # Check if has linked experiments - experiments = meta.get('experiments',[]) + experiments = meta.get('experiments', []) - if len(experiments)>0: + if len(experiments) > 0: md = '* **Shared experiments:**\n' for e in experiments: - tags = e.get('tags','') - name = e.get('name','') + tags = e.get('tags', '') + name = e.get('name', '') - if tags!='': - md+=' * '+misc.make_url(tags, action='experiments', key='tags') - elif name!='': - md+=' * '+misc.make_url(name, action='experiments') + if tags != '': + md += ' * ' + \ + misc.make_url( + tags, action='experiments', key='tags') + elif name != '': + md += ' * ' + \ + misc.make_url(name, action='experiments') - z+=md+'\n' + z += md + '\n' st.markdown(z) - # Check if has text path = artifact.path @@ -398,7 +395,8 @@ def page(st, params): f1 = os.path.join(path, f) if os.path.isfile(f1): r = cmind.utils.load_txt(f1) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] @@ -408,7 +406,7 @@ def page(st, params): y = s.split('\n') ss = '' for x in y: - ss+=x.strip()+'\n' + ss += x.strip() + '\n' st.write(ss, unsafe_allow_html=True) else: @@ -417,10 +415,11 @@ def page(st, params): break # Check associated reports - r=cmind.access({'action':'find', - 'automation':'report,6462ecdba2054467', - 'tags':'challenge-{}'.format(uid)}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'report,6462ecdba2054467', + 'tags': 'challenge-{}'.format(uid)}) + if r['return'] > 0: + return r lst = r['list'] @@ -432,25 +431,19 @@ def page(st, params): report_meta = l.meta report_alias = report_meta['alias'] - report_title = report_meta.get('title','') + report_title = report_meta.get('title', '') - report_name = report_title if report_title!='' else report_alias + report_name = report_title if report_title != '' else report_alias r = cmind.utils.load_txt(f1) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] st.markdown('---') - st.markdown('### '+report_name) + st.markdown('### ' + report_name) st.markdown(s, unsafe_allow_html=True) - - - - - - - - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_contributors.py b/script/gui/playground_contributors.py index cc49c87a4a..3c9a9a1214 100644 --- a/script/gui/playground_contributors.py +++ b/script/gui/playground_contributors.py @@ -4,94 +4,98 @@ import misc import os + def page(st, params): - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' - name = params.get('name',[''])[0].lower() + name = params.get('name', [''])[0].lower() list_all = False - if name!='': - r=cmind.access({'action':'load', - 'automation':'contributor,68eae17b590d4f8f', - 'artifact':name}) - if r['return']>0 and r['return']!=16: + if name != '': + r = cmind.access({'action': 'load', + 'automation': 'contributor,68eae17b590d4f8f', + 'artifact': name}) + if r['return'] > 0 and r['return'] != 16: return r end_html = '' - if r['return']==0: + if r['return'] == 0: meta = r['meta'] path = r['path'] - name = meta.get('name',meta.get('organization','')) - if name!='': - st.markdown("#### "+name) + name = meta.get('name', meta.get('organization', '')) + if name != '': + st.markdown("#### " + name) - x='' - for t in meta.get('trophies',[]): - url = t.get('url','') + x = '' + for t in meta.get('trophies', []): + url = t.get('url', '') if url != '': - x+='🏆 '.format(url) + x += '🏆 '.format(url) - if x!='': - st.write('

'+x+'

', unsafe_allow_html = True) + if x != '': + st.write('

' + x + '

', unsafe_allow_html=True) - end_html=''' + end_html = '''
Self link
'''.format(misc.make_url(meta['uid'], action='contributors', md=False)) - org = meta.get('organization','') - if org!='': - st.markdown("* **Organization:** "+org) + org = meta.get('organization', '') + if org != '': + st.markdown("* **Organization:** " + org) - urls = meta.get('urls',[]) + urls = meta.get('urls', []) url = meta.get('url', '') - if url!='': urls.append(url) + if url != '': + urls.append(url) - if len(urls)>0: + if len(urls) > 0: x = '* **Web page:** ' md = '' - if len(urls)>1: + if len(urls) > 1: md = '* **Web pages:**\n' - x=' * ' + x = ' * ' for u in urls: - md+=x+'[{}]({})\n'.format(u,u) + md += x + '[{}]({})\n'.format(u, u) st.markdown(md) - ongoing = meta.get('ongoing',[]) + ongoing = meta.get('ongoing', []) x = str(calculate_points(meta)) - y1 ='' + y1 = '' y2 = '' - if len(ongoing)>0: + if len(ongoing) > 0: y1 = '*' y2 = ' (ongoing)*' - st.markdown("* **Points: {}{}{}**".format(y1,x,y2)) + st.markdown("* **Points: {}{}{}**".format(y1, x, y2)) # st.write('

'+x+'

', unsafe_allow_html = True) - if len(ongoing)>0: + if len(ongoing) > 0: x = "* **Ongoing challenges:**\n" for t in ongoing: if t != '': - x+=" - {}\n".format(misc.make_url(t, action='challenges', key='tags')) + x += " - {}\n".format(misc.make_url(t, + action='challenges', key='tags')) st.markdown(x) - challenges = meta.get('challenges',[]) - if len(challenges)>0: + challenges = meta.get('challenges', []) + if len(challenges) > 0: md = "* **Contributions:**\n" for c in sorted(challenges): - md+=" * {}\n".format(misc.make_url(c, action='challenges', key='tags')) + md += " * {}\n".format(misc.make_url(c, + action='challenges', key='tags')) st.markdown(md) @@ -102,19 +106,19 @@ def page(st, params): if os.path.isfile(readme): r = cmind.utils.load_txt(readme) - if r['return']>0: return r + if r['return'] > 0: + return r md += r['string'] st.markdown('---') st.markdown(md) - else: - st.markdown('**Warning:** Contributor "{}" not found!'.format(name)) - - return {'return':0, 'end_html':end_html} + st.markdown( + '**Warning:** Contributor "{}" not found!'.format(name)) + return {'return': 0, 'end_html': end_html} return page_list(st, params) @@ -124,28 +128,29 @@ def page_list(st, params): import numpy as np # Read all contributors - r = cmind.access({'action':'find', - 'automation':'contributor,68eae17b590d4f8f'}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'contributor,68eae17b590d4f8f'}) + if r['return'] > 0: + return r lst = r['list'] # Prepare the latest contributors all_data = [] keys = [ - ('name', 'Name', 400, 'leftAligned'), - ('points', 'Points', 80,'rightAligned'), -# ('ongoing_number', 'Ongoing challenges', 80, 'rightAligned'), - ('trophies', 'Trophies', 80,'rightAligned') - ] - + ('name', 'Name', 400, 'leftAligned'), + ('points', 'Points', 80, 'rightAligned'), + # ('ongoing_number', 'Ongoing challenges', 80, 'rightAligned'), + ('trophies', 'Trophies', 80, 'rightAligned') + ] - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' md_people = '' md_org = '' # for l in sorted(lst, key=lambda x: (-int(x.meta.get('last_participation_date','0')), -# for l in sorted(lst, key=lambda x: x.meta.get('name', x.meta.get('organization','')).lower()): +# for l in sorted(lst, key=lambda x: x.meta.get('name', +# x.meta.get('organization','')).lower()): for l in lst: @@ -174,13 +179,12 @@ def page_list(st, params): name = m.get('name', '') org = m.get('organization', '') - row['name_to_print'] = name if name!='' else org - + row['name_to_print'] = name if name != '' else org # Registration in the CK challenges gives 1 point - y1 ='' + y1 = '' y2 = '' - if len(ongoing)>0: + if len(ongoing) > 0: y1 = '*' y2 = ' (ongoing)*' @@ -191,55 +195,56 @@ def page_list(st, params): for t in ongoing: if t != '': url = url_prefix + '?action=challenges&tags={}'.format(t) - x+='{}
'.format(url,t.replace('-', ' ').replace(',',' ')) + x += '{}
'.format( + url, t.replace('-', ' ').replace(',', ' ')) row['ongoing'] = x name2 = '' - if name!='': - url = misc.make_url(name, alias=uid, md = False) - md_people += '* '+ misc.make_url(name, alias=uid) +'\n' + if name != '': + url = misc.make_url(name, alias=uid, md=False) + md_people += '* ' + misc.make_url(name, alias=uid) + '\n' - if org!='': + if org != '': name2 = ' ({})'.format(org) - elif org!='': - url = misc.make_url(org, alias=alias, md = False) - md_org += '* '+ misc.make_url(org, alias=alias) +'\n' + elif org != '': + url = misc.make_url(org, alias=alias, md=False) + md_org += '* ' + misc.make_url(org, alias=alias) + '\n' name = org - row['name'] = '{}{}'.format(url_prefix + url, name, name2) + row['name'] = '{}{}'.format( + url_prefix + url, name, name2) row['trophies_number'] = len(trophies) x = '' for t in trophies: - url = t.get('url','') + url = t.get('url', '') if url != '': - x+='🏆 '.format(url) + x += '🏆 '.format( + url) row['trophies'] = x - all_data.append(row) - # Visualize table pd_keys = [v[0] for v in keys] pd_key_names = [v[1] for v in keys] pd_all_data = [] - for row in sorted(all_data, key=lambda row: (row.get('ongoing_number',0)<=0, - -row.get('points',0), - -row.get('trophies_number',0), + for row in sorted(all_data, key=lambda row: (row.get('ongoing_number', 0) <= 0, + -row.get('points', 0), + -row.get('trophies_number', 0), name_to_sort(row))): - pd_row=[] + pd_row = [] for k in pd_keys: pd_row.append(row.get(k)) pd_all_data.append(pd_row) - df = pd.DataFrame(pd_all_data, columns = pd_key_names) + df = pd.DataFrame(pd_all_data, columns=pd_key_names) - df.index+=1 + df.index += 1 x = '''
@@ -254,10 +259,15 @@ def page_list(st, params):
'''.format(url_prefix) - st.write(x, unsafe_allow_html = True) - - st.write('
'+df.to_html(escape=False, justify='left')+'
', unsafe_allow_html=True) + st.write(x, unsafe_allow_html=True) + st.write( + '
' + + df.to_html( + escape=False, + justify='left') + + '
', + unsafe_allow_html=True) # from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode @@ -316,7 +326,7 @@ def page_list(st, params): # st.markdown("### All contributors (individuals and orgs)") # st.markdown(md) - return {'return':0} + return {'return': 0} def name_to_sort(meta): @@ -333,13 +343,13 @@ def calculate_points(meta): points = 1 - xpoints = meta.get('points',[]) + xpoints = meta.get('points', []) for x in xpoints: - points += int(x.get('point',0)) + points += int(x.get('point', 0)) # Automatic challenges - points += len(meta.get('challenges',[])) - points += len(meta.get('ongoing',[])) + points += len(meta.get('challenges', [])) + points += len(meta.get('ongoing', [])) return points @@ -350,9 +360,9 @@ def prepare_name(meta): org = meta.get('organization', '') md = '' - if name!='': - md = '* '+misc.make_url(name, alias=alias)+'\n' - elif org!='': - md = '* *'+misc.make_url(org, alias=alias)+'*\n' + if name != '': + md = '* ' + misc.make_url(name, alias=alias) + '\n' + elif org != '': + md = '* *' + misc.make_url(org, alias=alias) + '*\n' return md diff --git a/script/gui/playground_howtorun.py b/script/gui/playground_howtorun.py index 2442c93fd8..e88f99c9f9 100644 --- a/script/gui/playground_howtorun.py +++ b/script/gui/playground_howtorun.py @@ -10,6 +10,7 @@ announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + def main(): params = misc.get_params(st) @@ -21,9 +22,7 @@ def main(): return page(st, params) - - -def page(st, params, action = ''): +def page(st, params, action=''): end_html = '' @@ -50,56 +49,65 @@ def page(st, params, action = ''):
'''.format(url_script) - st.write(x, unsafe_allow_html = True) + st.write(x, unsafe_allow_html=True) # st.markdown(announcement) - - ############################################################################################ + ########################################################################## # Select target hardware compute_uid = '' - x = params.get('compute_uid',['']) - if len(x)>0 and x[0]!='': compute_uid = x[0].strip() + x = params.get('compute_uid', ['']) + if len(x) > 0 and x[0] != '': + compute_uid = x[0].strip() - ii = {'action':'load_cfg', - 'automation':'utils', - 'tags':'benchmark,compute', - 'skip_files':False} + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,compute', + 'skip_files': False} - if compute_uid!='': - ii['prune']={'uid':compute_uid} + if compute_uid != '': + ii['prune'] = {'uid': compute_uid} r = cmind.access(ii) - if r['return']>0: return r - - r = misc.make_selection(st, r['selection'], 'compute', 'target hardware', compute_uid) - if r['return']>0: return r + if r['return'] > 0: + return r + + r = misc.make_selection( + st, + r['selection'], + 'compute', + 'target hardware', + compute_uid) + if r['return'] > 0: + return r compute_meta = r['meta'] # st.markdown(compute_meta) - ############################################################################################ + ########################################################################## # Select benchmark bench_uid = '' - x = params.get('bench_uid',['']) - if len(x)>0 and x[0]!='': bench_uid = x[0].strip() + x = params.get('bench_uid', ['']) + if len(x) > 0 and x[0] != '': + bench_uid = x[0].strip() - ii = {'action':'load_cfg', - 'automation':'utils', - 'tags':'benchmark,list', - 'skip_files':False} + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,list', + 'skip_files': False} - if bench_uid!='': - ii['prune']={'uid':bench_uid} + if bench_uid != '': + ii['prune'] = {'uid': bench_uid} r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r # Prune by supported compute selection = r['selection'] pruned_selection = [] - if len(compute_meta)==0 or compute_meta.get('tags','')=='': + if len(compute_meta) == 0 or compute_meta.get('tags', '') == '': pruned_selection = selection else: xtags = set(compute_meta['tags'].split(',')) @@ -109,8 +117,8 @@ def page(st, params, action = ''): for s in selection: add = True - supported_compute = s.get('supported_compute',[]) - if len(supported_compute)>0: + supported_compute = s.get('supported_compute', []) + if len(supported_compute) > 0: add = False for c in supported_compute: @@ -126,40 +134,48 @@ def page(st, params, action = ''): force_bench_index = 0 if bench_uid == '': j = 0 - for q in sorted(pruned_selection, key = lambda v: v['name']): + for q in sorted(pruned_selection, key=lambda v: v['name']): j += 1 if q['uid'] == '39877bb63fb54725': force_bench_index = j - r = misc.make_selection(st, pruned_selection, 'benchmark', 'benchmark', bench_uid, force_index = force_bench_index) - if r['return']>0: return r + r = misc.make_selection( + st, + pruned_selection, + 'benchmark', + 'benchmark', + bench_uid, + force_index=force_bench_index) + if r['return'] > 0: + return r bench_meta = r['meta'] # st.markdown(bench_meta) - if len(bench_meta)>0: - ############################################################################################ + if len(bench_meta) > 0: + ####################################################################### # Check common CM interface -# st.markdown('---') + # st.markdown('---') - urls = bench_meta.get('urls',[]) + urls = bench_meta.get('urls', []) script_path = '' - script_name = bench_meta.get('script_name','') + script_name = bench_meta.get('script_name', '') script_meta = {} script_obj = None script_url = '' - if script_name!='': - ii = {'action':'find', - 'automation':'script', - 'artifact':script_name} + if script_name != '': + ii = {'action': 'find', + 'automation': 'script', + 'artifact': script_name} r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] - if len(lst)>0: + if len(lst) > 0: script_obj = lst[0] @@ -171,74 +187,78 @@ def page(st, params, action = ''): repo_meta = script_obj.repo_meta - url = repo_meta.get('url','') - if url=='' and repo_meta.get('git', False): - url = 'https://github.com/'+repo_meta['alias'].replace('@','/') + url = repo_meta.get('url', '') + if url == '' and repo_meta.get('git', False): + url = 'https://github.com/' + \ + repo_meta['alias'].replace('@', '/') - if url!='': + if url != '': # Recreate GitHub path - if not url.endswith('/'): url=url+'/' + if not url.endswith('/'): + url = url + '/' url += 'tree/master/' - if repo_meta.get('prefix','')!='': + if repo_meta.get('prefix', '') != '': url += repo_meta['prefix'] - if not url.endswith('/'): url=url+'/' + if not url.endswith('/'): + url = url + '/' - url += 'script/'+script_alias + url += 'script/' + script_alias script_url = url if not bench_meta.get('skip_extra_urls', False): - url_script = misc.make_url(script_name, key='name', action='scripts', md=False) + url_script = misc.make_url( + script_name, key='name', action='scripts', md=False) url_script += '&gui=true' urls.append({'name': 'Universal CM GUI to run this benchmark', 'url': url_script}) # Check if extra README - script_path_readme_extra = os.path.join(script_path, 'README-extra.md') + script_path_readme_extra = os.path.join( + script_path, 'README-extra.md') if os.path.isfile(script_path_readme_extra): # Check README.extra.md - url_readme_extra = url+'/README-extra.md' + url_readme_extra = url + '/README-extra.md' urls.append({'name': 'Notes about how to run this benchmark from the command line', 'url': url_readme_extra}) - # Check URLS - if len(urls)>0: + if len(urls) > 0: x = '\n' for u in urls: name = u['name'] url = u['url'] - x+='* [{}]({})\n'.format(name, url) - x+='\n' + x += '* [{}]({})\n'.format(name, url) + x += '\n' st.markdown(x) - ############################################################################################ + ####################################################################### # Check if has customization extra = {} skip = False - script_tags = script_meta.get('tags_help','') - if script_tags =='': - script_tags = ','.join(script_meta.get('tags',[])) + script_tags = script_meta.get('tags_help', '') + if script_tags == '': + script_tags = ','.join(script_meta.get('tags', [])) - if script_obj!=None: + if script_obj is not None: ii = {'st': st, 'params': params, 'meta': script_obj.meta, 'misc_module': misc, - 'compute_meta':compute_meta, - 'bench_meta':bench_meta, - 'script_path':script_path, - 'script_tags':script_tags, - 'script_url':script_url} + 'compute_meta': compute_meta, + 'bench_meta': bench_meta, + 'script_path': script_path, + 'script_tags': script_tags, + 'script_url': script_url} import sys import importlib @@ -247,31 +267,34 @@ def page(st, params, action = ''): tmp_module = None try: - found_automation_spec = importlib.util.spec_from_file_location('customize', full_module_path) - if found_automation_spec != None: - tmp_module = importlib.util.module_from_spec(found_automation_spec) + found_automation_spec = importlib.util.spec_from_file_location( + 'customize', full_module_path) + if found_automation_spec is not None: + tmp_module = importlib.util.module_from_spec( + found_automation_spec) found_automation_spec.loader.exec_module(tmp_module) # tmp_module=importlib.import_module('customize') except Exception as e: st.markdown('WARNING: {}'.format(e)) pass - if tmp_module!=None: + if tmp_module is not None: if hasattr(tmp_module, 'gui'): try: func = getattr(tmp_module, 'gui') except Exception as e: - return {'return':1, 'error':format(e)} + return {'return': 1, 'error': format(e)} r = func(ii) - if r['return'] > 0 : return r + if r['return'] > 0: + return r extra = r.get('extra', {}) skip = r.get('skip', False) - ############################################################################################ + ####################################################################### # Show official GUI - if script_path!='' and not skip: + if script_path != '' and not skip: import script ii = {'st': st, @@ -285,17 +308,22 @@ def page(st, params, action = ''): 'extra': extra} rr = script.page(ii) - if rr['return']>0: return rr - - end_html += '\n'+rr.get('end_html','') + if rr['return'] > 0: + return rr - ############################################################################################ - self_url = misc.make_url(bench_meta['uid'], action='howtorun', key='bench_uid', md=False) + end_html += '\n' + rr.get('end_html', '') - if len(compute_meta)>0: - self_url += '&compute_uid='+compute_meta['uid'] + ####################################################################### + self_url = misc.make_url( + bench_meta['uid'], + action='howtorun', + key='bench_uid', + md=False) - end_html='
Self link
'.format(self_url) + if len(compute_meta) > 0: + self_url += '&compute_uid=' + compute_meta['uid'] + end_html = '
Self link
'.format( + self_url) - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_install.py b/script/gui/playground_install.py index f5f53ca398..a0fb3a861d 100644 --- a/script/gui/playground_install.py +++ b/script/gui/playground_install.py @@ -5,17 +5,17 @@ import datetime import misc + def page(st, params, extra): end_html = '' - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' if not extra.get('skip_header', False): st.markdown('---') - st.markdown('**Install [MLCommons Collective Mind automation framework](https://github.com/mlcommons/ck):**') - - + st.markdown( + '**Install [MLCommons Collective Mind automation framework](https://github.com/mlcommons/ck):**') md = '' @@ -33,29 +33,27 @@ def page(st, params, extra): host_os = st.selectbox('Select your host OS:', range(len(choices)), - format_func = lambda x: choices[x][0], - index = host_os_selection, - key = 'install_select_host_os') + format_func=lambda x: choices[x][0], + index=host_os_selection, + key='install_select_host_os') host_os_index = choices[host_os][1] - cur_script_file = __file__ cur_script_path = os.path.dirname(cur_script_file) - - notes = os.path.join(cur_script_path, 'install', host_os_index+'.md') + notes = os.path.join(cur_script_path, 'install', host_os_index + '.md') if os.path.isfile(notes): r = cmind.utils.load_txt(notes) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] if s != '': - show = st.toggle('Show system dependencies?', value = True) + show = st.toggle('Show system dependencies?', value=True) if show: md += s - need_user = '' python = 'python3' if host_os_index == 'redhat': @@ -63,26 +61,24 @@ def page(st, params, extra): elif host_os_index == 'windows': python = 'python' - ################################################################### # Select repository choices = [ - ('Development GitHub version: mlcommons@cm4mlops', 'dev'), - ('Stable GitHub version: mlcommons@cm4mlops', 'main'), - ('Stable ZIP archive from Zenodo: 20240306', 'zenodo'), - ('Stable ZIP archive from GitHub: 20240416', 'zip-github') - ] + ('Development GitHub version: mlcommons@cm4mlops', 'dev'), + ('Stable GitHub version: mlcommons@cm4mlops', 'main'), + ('Stable ZIP archive from Zenodo: 20240306', 'zenodo'), + ('Stable ZIP archive from GitHub: 20240416', 'zip-github') + ] repo = st.selectbox('Select repository with [automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts):', - range(len(choices)), - format_func = lambda x: choices[x][0], - index=0, - key='select_repo') + range(len(choices)), + format_func=lambda x: choices[x][0], + index=0, + key='select_repo') repo_index = choices[repo][1] - # Add stable repo from Zenodo if repo_index == 'dev': cm_repo = 'mlcommons@cm4mlops --checkout=dev' @@ -93,55 +89,59 @@ def page(st, params, extra): else: cm_repo = 'mlcommons@cm4mlops' - x = '{} -m pip install cmind -U {}\n\n'.format(python, need_user) + x = '{} -m pip install cmind -U {}\n\n'.format(python, need_user) x += 'cm test core \n\n' x += 'cm pull repo {}\n\n'.format(cm_repo) - clean_cm_cache = st.toggle('Clean CM cache', value=True, key = 'install_clean_cm_cache') + clean_cm_cache = st.toggle( + 'Clean CM cache', + value=True, + key='install_clean_cm_cache') cm_clean_cache = 'cm rm cache -f\n\n' if clean_cm_cache else '' x += cm_clean_cache - - - python_venv_name=params.get('@adr.python.name', '') - python_ver_min=params.get('@adr.python.version_min', '') - python_ver=params.get('@adr.python.version', '') + python_venv_name = params.get('@adr.python.name', '') + python_ver_min = params.get('@adr.python.version_min', '') + python_ver = params.get('@adr.python.version', '') if python_venv_name == '': - use_python_venv = st.toggle('Use Python Virtual Environment for CM scripts?', value = False) + use_python_venv = st.toggle( + 'Use Python Virtual Environment for CM scripts?', value=False) if use_python_venv: - python_venv_name = st.text_input('Enter some CM python venv name for your project:', value = "mlperf-v4.0") + python_venv_name = st.text_input( + 'Enter some CM python venv name for your project:', + value="mlperf-v4.0") if python_ver_min == '': - python_ver_min = st.text_input('[Optional] Specify min version such as 3.8:') + python_ver_min = st.text_input( + '[Optional] Specify min version such as 3.8:') y = '' - if python_venv_name!='':# or python_ver!='' or python_ver_min!='': + if python_venv_name != '': # or python_ver!='' or python_ver_min!='': y = 'cm run script "get sys-utils-cm"\n' - if python_venv_name!='': - y+='cm run script "install python-venv" --name='+str(python_venv_name) + if python_venv_name != '': + y += 'cm run script "install python-venv" --name=' + \ + str(python_venv_name) else: - y+='cm run script "get python"' + y += 'cm run script "get python"' - if python_ver!='': - y+=' --version='+str(python_ver) + if python_ver != '': + y += ' --version=' + str(python_ver) - if python_ver_min!='': - y+=' --version_min='+str(python_ver_min) - - if y!='': - x+=y + if python_ver_min != '': + y += ' --version_min=' + str(python_ver_min) + if y != '': + x += y md += '```bash\n{}\n```\n'.format(x) st.markdown('---') st.markdown(md) - st.markdown('*Check [more CM installation notes at GitHub](https://github.com/mlcommons/ck/blob/master/docs/installation.md)*.') - - + st.markdown( + '*Check [more CM installation notes at GitHub](https://github.com/mlcommons/ck/blob/master/docs/installation.md)*.') - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_reports.py b/script/gui/playground_reports.py index d11276fb50..78cc7b2c4a 100644 --- a/script/gui/playground_reports.py +++ b/script/gui/playground_reports.py @@ -5,34 +5,36 @@ import datetime import misc + def page(st, params): - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' - name = params.get('name',[''])[0].strip() - tags = params.get('tags',[''])[0].lower() + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() - ii = {'action':'find', - 'automation':'report,6462ecdba2054467'} + ii = {'action': 'find', + 'automation': 'report,6462ecdba2054467'} - if name!='': - ii['artifact']=name - if tags!='': - ii['tags']=tags + if name != '': + ii['artifact'] = name + if tags != '': + ii['tags'] = tags r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst = r['list'] end_html = '' - ############################################################################## - if len(lst)==0: + ########################################################################## + if len(lst) == 0: st.markdown('Reports were not found!') - ############################################################################## - elif len(lst)==1: + ########################################################################## + elif len(lst) == 1: l = lst[0] meta = l.meta @@ -50,39 +52,45 @@ def page(st, params):
'''.format(title) - st.write(x, unsafe_allow_html = True) - - end_html='
Self link
'.format(misc.make_url(meta['uid'], action='reports', md=False)) + st.write(x, unsafe_allow_html=True) + end_html = '
Self link
'.format( + misc.make_url(meta['uid'], action='reports', md=False)) # Check basic password - password_hash = meta.get('password_hash','') + password_hash = meta.get('password_hash', '') view = True - if password_hash!='': + if password_hash != '': view = False - password = st.text_input("Enter password", type="password", key="password") + password = st.text_input( + "Enter password", + type="password", + key="password") - if password!='': + if password != '': import bcrypt - # TBD: temporal hack to demo password protection for experiments + # TBD: temporal hack to demo password protection for + # experiments password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' - password_hash2 = bcrypt.hashpw(password.encode('utf-8'), password_salt) + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), password_salt) - if password_hash.encode('utf-8')==password_hash2: - view=True + if password_hash.encode('utf-8') == password_hash2: + view = True else: st.markdown('**Warning:** wrong password') if not view: - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} # Check if has text for f in ['README.md']: f1 = os.path.join(path, f) if os.path.isfile(f1): r = cmind.utils.load_txt(f1) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] @@ -92,7 +100,7 @@ def page(st, params): y = s.split('\n') ss = '' for x in y: - ss+=x.strip()+'\n' + ss += x.strip() + '\n' st.write(ss, unsafe_allow_html=True) else: @@ -100,37 +108,37 @@ def page(st, params): break - - ############################################################################## + ########################################################################## else: reports = [] md = '' - for l in sorted(lst, key=lambda x: x.meta.get('date',''), reverse=True): + for l in sorted(lst, key=lambda x: x.meta.get( + 'date', ''), reverse=True): meta = l.meta - if meta.get('private',False): + if meta.get('private', False): continue uid = meta['uid'] title = meta.get('title', meta['alias']) - url = meta.get('redirect','') + url = meta.get('redirect', '') if url == '': url = url_prefix + '?action=reports&name={}'.format(uid) - md += '* ['+title+']('+url+')\n' + md += '* [' + title + '](' + url + ')\n' x = '''

Community reports

''' - st.write(x, unsafe_allow_html = True) + st.write(x, unsafe_allow_html=True) st.markdown(md) - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_reproduce.py b/script/gui/playground_reproduce.py index 525a49fd72..9e82a686ee 100644 --- a/script/gui/playground_reproduce.py +++ b/script/gui/playground_reproduce.py @@ -12,12 +12,12 @@ announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' -badges={ - 'functional':{'url':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, - 'reproduced':{'url':'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, - 'support_docker':{'url':'https://cTuning.org/images/docker_logo2_small.png'}, - 'support_cm':{'url':'https://cTuning.org/images/logo-ck-single-tr4.png'} - } +badges = { + 'functional': {'url': 'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, + 'reproduced': {'url': 'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, + 'support_docker': {'url': 'https://cTuning.org/images/docker_logo2_small.png'}, + 'support_cm': {'url': 'https://cTuning.org/images/logo-ck-single-tr4.png'} +} def main(): @@ -31,9 +31,7 @@ def main(): return page(st, params) - - -def page(st, params, action = ''): +def page(st, params, action=''): end_html = '' @@ -64,68 +62,76 @@ def page(st, params, action = ''):
'''.format(url_benchmarks, url_challenges) - st.write(x, unsafe_allow_html = True) - + st.write(x, unsafe_allow_html=True) - return {'return':0} + return {'return': 0} # st.markdown(announcement) # Check if test is selected test_uid = '' - x = params.get('test_uid',['']) - if len(x)>0 and x[0]!='': test_uid = x[0].strip() + x = params.get('test_uid', ['']) + if len(x) > 0 and x[0] != '': + test_uid = x[0].strip() - - ############################################################################################ + ########################################################################## # Select target hardware compute_uid = '' compute_meta = {} compute_selection = [] if test_uid == '': - x = params.get('compute_uid',['']) - if len(x)>0 and x[0]!='': compute_uid = x[0].strip() + x = params.get('compute_uid', ['']) + if len(x) > 0 and x[0] != '': + compute_uid = x[0].strip() - ii = {'action':'load_cfg', - 'automation':'utils', - 'tags':'benchmark,compute', - 'skip_files':False} + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,compute', + 'skip_files': False} - if compute_uid!='': - ii['prune']={'uid':compute_uid} + if compute_uid != '': + ii['prune'] = {'uid': compute_uid} r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r compute_selection = r['selection'] if test_uid == '': - r = misc.make_selection(st, r['selection'], 'compute', 'target hardware', compute_uid) - if r['return']>0: return r + r = misc.make_selection( + st, + r['selection'], + 'compute', + 'target hardware', + compute_uid) + if r['return'] > 0: + return r compute_meta = r['meta'] - compute_uid = compute_meta.get('uid','') - + compute_uid = compute_meta.get('uid', '') - ############################################################################################ + ########################################################################## # Select benchmark bench_meta = {} bench_name = '' - x = params.get('bench_name',['']) - if len(x)>0 and x[0]!='': bench_name = x[0].strip() + x = params.get('bench_name', ['']) + if len(x) > 0 and x[0] != '': + bench_name = x[0].strip() if test_uid == '': - ii = {'action':'load_cfg', - 'automation':'utils', - 'tags':'benchmark,run', - 'skip_files':True} + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,run', + 'skip_files': True} - if bench_name!='': - ii['artifact']=bench_name + if bench_name != '': + ii['artifact'] = bench_name r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r # Prune by supported compute selection = r['selection'] @@ -137,56 +143,65 @@ def page(st, params, action = ''): for s in selection: add = True - if compute_uid in s.get('supported_compute',[]): + if compute_uid in s.get('supported_compute', []): pruned_selection.append(s) - r = misc.make_selection(st, pruned_selection, 'benchmark', 'benchmark', bench_name) - if r['return']>0: return r + r = misc.make_selection( + st, + pruned_selection, + 'benchmark', + 'benchmark', + bench_name) + if r['return'] > 0: + return r bench_meta = r['meta'] - ############################################################################################ + ########################################################################## # Select tests if test_uid == '' and compute_uid == '' and len(bench_meta) == 0: st.markdown('*Please prune search by device and/or benchmark ...*') else: - ii = {'action':'load_cfg', - 'automation':'utils', - 'tags':'benchmark,run', - 'key':'run-', - 'key_end':['-meta.json', '-meta.yaml'], - 'skip_files':False} - - if len(bench_meta)>0 or bench_name!='': - if len(bench_meta)>0: - ii['artifact']=bench_meta['uid'] + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,run', + 'key': 'run-', + 'key_end': ['-meta.json', '-meta.yaml'], + 'skip_files': False} + + if len(bench_meta) > 0 or bench_name != '': + if len(bench_meta) > 0: + ii['artifact'] = bench_meta['uid'] else: - ii['artifact']=bench_name - elif compute_uid !='' : - ii['prune']={'meta_key':'supported_compute', - 'meta_key_uid':compute_uid} + ii['artifact'] = bench_name + elif compute_uid != '': + ii['prune'] = {'meta_key': 'supported_compute', + 'meta_key_uid': compute_uid} if compute_uid != '': - if 'prune' not in ii: ii['prune']={} + if 'prune' not in ii: + ii['prune'] = {} ii['prune']['key'] = 'compute_uid' ii['prune']['key_uid'] = compute_uid - if test_uid!='': - if 'prune' not in ii: ii['prune']={} - ii['prune']['uid']=test_uid + if test_uid != '': + if 'prune' not in ii: + ii['prune'] = {} + ii['prune']['uid'] = test_uid r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r # Prune by supported compute selection = r['selection'] - if len(selection)==0: + if len(selection) == 0: st.markdown('*WARNING: No tests found!*') else: - if len(selection)==1: - ################################################################### + if len(selection) == 1: + ############################################################### # Show individual test s = selection[0] @@ -200,45 +215,48 @@ def page(st, params, action = ''): x = '' for b in badges: - if s.get(b, False) or b=='support_cm': - x += '\n'.format(badges[b]['url']) + if s.get(b, False) or b == 'support_cm': + x += '\n'.format( + badges[b]['url']) - if x!='': - st.write(x, unsafe_allow_html = True) + if x != '': + st.write(x, unsafe_allow_html=True) # Check benchmark - bench_uid = s.get('bench_uid','') + bench_uid = s.get('bench_uid', '') if bench_uid != '': - url_bench = url_benchmarks + '&bench_uid='+bench_uid - st.markdown('[Link to benchmark GUI]({})'.format(url_bench)) + url_bench = url_benchmarks + '&bench_uid=' + bench_uid + st.markdown( + '[Link to benchmark GUI]({})'.format(url_bench)) # Check notes - test_md = full_path[:-10]+'.md' + test_md = full_path[:-10] + '.md' if os.path.isfile(test_md): r = cmind.utils.load_txt(test_md) - if r['return']>0: return r + if r['return'] > 0: + return r x = r['string'] - if x!='': + if x != '': st.markdown('**Notes:**') st.markdown(x) inp = {} - input_file = full_path[:-10]+'-input' + input_file = full_path[:-10] + '-input' r = cmind.utils.load_yaml_and_json(input_file) - if r['return']==0: + if r['return'] == 0: inp = r['meta'] out = {} - output_file = full_path[:-10]+'-output' + output_file = full_path[:-10] + '-output' r = cmind.utils.load_yaml_and_json(output_file) - if r['return']==0: + if r['return'] == 0: out = r['meta'] - cmd = inp.get('cmd',[]) - if len(cmd)>0: + cmd = inp.get('cmd', []) + if len(cmd) > 0: xcmd = ' \\\n '.join(cmd) st.markdown(""" @@ -248,7 +266,6 @@ def page(st, params, action = ''): ``` """.format(xcmd)) - st.markdown(""" **CM input dictionary:** ```json @@ -256,7 +273,6 @@ def page(st, params, action = ''): ``` """.format(json.dumps(inp, indent=2))) - st.markdown(""" **CM input dictionary:** ```json @@ -264,7 +280,6 @@ def page(st, params, action = ''): ``` """.format(json.dumps(out, indent=2))) - st.markdown(""" **Test meta:** @@ -273,9 +288,8 @@ def page(st, params, action = ''): ``` """.format(json.dumps(s, indent=2))) - else: - ################################################################### + ############################################################### # Show tables import pandas as pd import numpy as np @@ -284,11 +298,10 @@ def page(st, params, action = ''): all_data = [] - # TBD: should be taken from a given benchmark dimensions = [] - if len(bench_meta)>0: + if len(bench_meta) > 0: dimensions = bench_meta.get('view_dimensions', []) dimension_values = {} @@ -309,19 +322,28 @@ def page(st, params, action = ''): # If dimensions, sort by dimensions for d in list(reversed(dimension_keys)): - selection = sorted(selection, key = lambda x: misc.get_with_complex_key_safe(selection, d)) + selection = sorted( + selection, key=lambda x: misc.get_with_complex_key_safe( + selection, d)) keys += [ - ('functional', 'Functional', 80, ''), - ('reproduced', 'Reproduced', 80, ''), - ('support_docker', 'Support Docker', 80, ''), - ('support_cm', 'Has unified CM interface', 80, ''), - ('notes', 'Notes', 200, 'lefAligned'), - ] + ('functional', 'Functional', 80, ''), + ( + 'reproduced', + 'Reproduced', + 80, + ''), + ('support_docker', 'Support Docker', 80, ''), + ( + 'support_cm', + 'Has unified CM interface', + 80, + ''), + ('notes', 'Notes', 200, 'lefAligned'), + ] j = 0 - for s in selection: row = {} @@ -331,20 +353,21 @@ def page(st, params, action = ''): uid = s['uid'] - url_test = misc.make_url(uid, key='test_uid', action='reproduce', md=False) + url_test = misc.make_url( + uid, key='test_uid', action='reproduce', md=False) bench_meta = s['main_meta'] inp = {} - input_file = full_path[:-10]+'-input' + input_file = full_path[:-10] + '-input' r = cmind.utils.load_yaml_and_json(input_file) - if r['return']==0: + if r['return'] == 0: inp = r['meta'] out = {} - output_file = full_path[:-10]+'-output' + output_file = full_path[:-10] + '-output' r = cmind.utils.load_yaml_and_json(output_file) - if r['return']==0: + if r['return'] == 0: out = r['meta'] row_meta = {'dict': s, @@ -352,50 +375,55 @@ def page(st, params, action = ''): 'output': out} if len(dimensions) == 0: - row['test'] = '{}'.format(url_test, uid) + row['test'] = '{}'.format( + url_test, uid) else: - row['test'] = 'View'.format(url_test) + row['test'] = 'View'.format( + url_test) for k in dimensions: kk = k[0] v = misc.get_with_complex_key_safe(row_meta, kk) - if len(k)>2 and k[2]=='tick': - if v!=None and v!='': + if len(k) > 2 and k[2] == 'tick': + if v is not None and v != '': v = '✅' row[kk] = str(v) - # Check ACM/IEEE functional badge url = '' x = '' if s.get('functional', False): - x = '
'.format(url, badges['functional']['url']) + x = '
'.format( + url, badges['functional']['url']) row['functional'] = x # Check ACM/IEEE reproduced badge x = '' if s.get('reproduced', False): - x = '
'.format(url, badges['reproduced']['url']) + x = '
'.format( + url, badges['reproduced']['url']) row['reproduced'] = x # Check Docker x = '' if s.get('support_docker', False): - x = '
'.format(url, badges['support_docker']['url']) + x = '
'.format( + url, badges['support_docker']['url']) row['support_docker'] = x x = '' - bench_uid = s.get('bench_uid','') + bench_uid = s.get('bench_uid', '') if bench_uid != '': - url_bench = url_benchmarks + '&bench_uid='+bench_uid - x = '
'.format(url_bench, badges['support_cm']['url']) + url_bench = url_benchmarks + '&bench_uid=' + bench_uid + x = '
'.format( + url_bench, badges['support_cm']['url']) row['support_cm'] = x # Check misc notes - row['notes']=''+s.get('notes','')+'' + row['notes'] = '' + s.get('notes', '') + '' # Finish row all_data.append(row) @@ -405,32 +433,28 @@ def page(st, params, action = ''): pd_key_names = [v[1] for v in keys] pd_all_data = [] - for row in sorted(all_data, key=lambda row: (row.get('x1',0))): - pd_row=[] + for row in sorted( + all_data, key=lambda row: (row.get('x1', 0))): + pd_row = [] for k in pd_keys: pd_row.append(row.get(k)) pd_all_data.append(pd_row) - df = pd.DataFrame(pd_all_data, columns = pd_key_names) - - df.index+=1 - - html=df.to_html(escape=False, justify='left') - st.write(html, unsafe_allow_html = True) - - - - + df = pd.DataFrame(pd_all_data, columns=pd_key_names) + df.index += 1 - if bench_name!='': - self_url+='&bench_name='+bench_name - if test_uid!='': - self_url+='&test_uid='+test_uid - elif compute_uid!='': - self_url+='&compute_uid='+compute_uid + html = df.to_html(escape=False, justify='left') + st.write(html, unsafe_allow_html=True) - end_html='
Self link
'.format(self_url) + if bench_name != '': + self_url += '&bench_name=' + bench_name + if test_uid != '': + self_url += '&test_uid=' + test_uid + elif compute_uid != '': + self_url += '&compute_uid=' + compute_uid + end_html = '
Self link
'.format( + self_url) return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_scripts.py b/script/gui/playground_scripts.py index bf1ce13d9b..71a7becd29 100644 --- a/script/gui/playground_scripts.py +++ b/script/gui/playground_scripts.py @@ -5,20 +5,22 @@ import datetime import misc + def page(st, params): - url_prefix = st.config.get_option('server.baseUrlPath')+'/' + url_prefix = st.config.get_option('server.baseUrlPath') + '/' url_prefix_script = url_prefix + '?action=scripts' script_name = '' - x = params.get('name',['']) - if len(x)>0 and x[0]!='': script_name = x[0].strip() + x = params.get('name', ['']) + if len(x) > 0 and x[0] != '': + script_name = x[0].strip() script_tags = '' if script_name == '': - x = params.get('tags',['']) - if len(x)>0 and x[0]!='': script_tags = x[0].strip() - + x = params.get('tags', ['']) + if len(x) > 0 and x[0] != '': + script_tags = x[0].strip() if script_tags == 'modular,app': x = ''' @@ -51,27 +53,30 @@ def page(st, params):
''' - st.write(x, unsafe_allow_html = True) - + st.write(x, unsafe_allow_html=True) - script_tags = st.text_input('Search open-source automation recipes by tags:', value=script_tags, key='script_tags').strip() + script_tags = st.text_input( + 'Search open-source automation recipes by tags:', + value=script_tags, + key='script_tags').strip() # Searching automation recipes - ii = {'action':'find', - 'automation':'script,5b4e0237da074764'} + ii = {'action': 'find', + 'automation': 'script,5b4e0237da074764'} - if script_tags!='': - script_tags=script_tags.replace(' ',',') - ii['tags']=script_tags - elif script_name!='': - ii['artifact']=script_name + if script_tags != '': + script_tags = script_tags.replace(' ', ',') + ii['tags'] = script_tags + elif script_name != '': + ii['artifact'] = script_name # Check variations for later: variations = [v for v in script_tags.split(',') if v.startswith('_')] r = cmind.access(ii) - if r['return']>0: return r + if r['return'] > 0: + return r lst2 = r['list'] @@ -79,12 +84,12 @@ def page(st, params): end_html = '' - if len(lst)==0: + if len(lst) == 0: st.markdown('CM scripts were not found!') else: artifact = None - if len(lst)==1: + if len(lst) == 1: # Individual script recipe = lst[0] @@ -94,20 +99,21 @@ def page(st, params): uid = meta['uid'] use_gui = False - x = params.get('gui',['']) - if len(x)>0 and (x[0].lower()=='true' or x[0].lower()=='yes'): + x = params.get('gui', ['']) + if len(x) > 0 and (x[0].lower() == + 'true' or x[0].lower() == 'yes'): import script script_path = recipe.path script_alias = alias # script_tags = script_tags - if script_tags=='': - script_tags = meta.get('tags_help','') - if script_tags !='': - script_tags=script_tags.replace(' ',',') + if script_tags == '': + script_tags = meta.get('tags_help', '') + if script_tags != '': + script_tags = script_tags.replace(' ', ',') else: - script_tags = ','.join(meta.get('tags',[])) + script_tags = ','.join(meta.get('tags', [])) ii = {'st': st, 'params': params, @@ -127,20 +133,31 @@ def page(st, params): repo_meta = recipe.repo_meta # Basic run - tags = meta['tags_help'] if meta.get('tags_help','')!='' else ' '.join(meta['tags']) - - x1 = misc.make_url(tags.replace(' ',','), key = 'tags', action='scripts', md=False, skip_url_quote=True) + tags = meta['tags_help'] if meta.get( + 'tags_help', '') != '' else ' '.join( + meta['tags']) + + x1 = misc.make_url( + tags.replace( + ' ', + ','), + key='tags', + action='scripts', + md=False, + skip_url_quote=True) x2 = misc.make_url(meta['alias'], action='scripts', md=False) x3 = misc.make_url(meta['uid'], action='scripts', md=False) - end_html='
Self links: tags or alias or UID
'.format(x1,x2,x3) + end_html = '
Self links: tags or alias or UID
'.format( + x1, x2, x3) - extra_repo = '' if repo_meta['alias']=='mlcommons@cm4mlops' else '\ncm pull repo '+repo_meta['alias'] + extra_repo = '' if repo_meta['alias'] == 'mlcommons@cm4mlops' else '\ncm pull repo ' + \ + repo_meta['alias'] xtags = tags - if len(variations)>0: - if xtags!='': - xtags+=' ' - xtags+=' '.join(variations) + if len(variations) > 0: + if xtags != '': + xtags += ' ' + xtags += ' '.join(variations) x = ''' ```bash @@ -160,136 +177,146 @@ def page(st, params): cm gui script "{}" ``` - '''.format(extra_repo,xtags,xtags,xtags,xtags,xtags,xtags) - - - + '''.format(extra_repo, xtags, xtags, xtags, xtags, xtags, xtags) # Check original link - url = repo_meta.get('url','') - if url=='' and repo_meta.get('git', False): - url = 'https://github.com/'+repo_meta['alias'].replace('@','/') + url = repo_meta.get('url', '') + if url == '' and repo_meta.get('git', False): + url = 'https://github.com/' + \ + repo_meta['alias'].replace('@', '/') url_readme = '' url_readme_extra = '' url_meta_description = '' url_customize = '' - if url!='': + if url != '': # Recreate GitHub path - if not url.endswith('/'): url=url+'/' + if not url.endswith('/'): + url = url + '/' url += 'tree/master/' - if repo_meta.get('prefix','')!='': + if repo_meta.get('prefix', '') != '': url += repo_meta['prefix'] - if not url.endswith('/'): url=url+'/' + if not url.endswith('/'): + url = url + '/' - url += 'script/'+alias + url += 'script/' + alias # Check README.md z = os.path.join(recipe.path, 'README.md') if os.path.isfile(z): - url_readme = url+'/README.md' + url_readme = url + '/README.md' # Check README.extra.md z = os.path.join(recipe.path, 'README-extra.md') if os.path.isfile(z): - url_readme_extra = url+'/README-extra.md' + url_readme_extra = url + '/README-extra.md' # Check customize.py z = os.path.join(recipe.path, 'customize.py') if os.path.isfile(z): - url_customize = url+'/customize.py' + url_customize = url + '/customize.py' # Check _cm.yaml or _cm.json for z in ['_cm.yaml', '_cm.json']: y = os.path.join(recipe.path, z) if os.path.isfile(y): - url_meta_description = url+'/'+z + url_meta_description = url + '/' + z - url_gui = url_prefix_script+'&name='+alias+','+uid+'&gui=true' + url_gui = url_prefix_script + '&name=' + alias + ',' + uid + '&gui=true' - z = '* ***Check [open source code (Apache 2.0 license)]({}) at GitHub.***\n'.format(url) - z += '* ***Check [detailed auto-generated README on GitHub]({}).***\n'.format(url_readme) - z += '* ***Check [experimental GUI]({}) to run this script.***\n'.format(url_gui) + z = '* ***Check [open source code (Apache 2.0 license)]({}) at GitHub.***\n'.format( + url) + z += '* ***Check [detailed auto-generated README on GitHub]({}).***\n'.format( + url_readme) + z += '* ***Check [experimental GUI]({}) to run this script.***\n'.format( + url_gui) z += '---\n' st.markdown(z) - st.markdown('Default run on Linux, Windows, MacOS and any other OS (check [CM installation guide]({}) for more details):\n{}\n'.format(url_prefix + '?action=install', x)) + st.markdown( + 'Default run on Linux, Windows, MacOS and any other OS (check [CM installation guide]({}) for more details):\n{}\n'.format( + url_prefix + + '?action=install', + x)) st.markdown('*The [Collective Mind concept](https://doi.org/10.5281/zenodo.8105339) is to gradually improve portability and reproducibility of common automation recipes based on user feedback' - ' while keeping the same human-friendly interface. If you encounter issues, please report them [here](https://github.com/mlcommons/ck/issues) ' - ' to help this community project!*') - - + ' while keeping the same human-friendly interface. If you encounter issues, please report them [here](https://github.com/mlcommons/ck/issues) ' + ' to help this community project!*') - if url_readme_extra!='': - st.markdown('* See [extra README]({}) for this automation recipe at GitHub.'.format(url_readme_extra)) + if url_readme_extra != '': + st.markdown( + '* See [extra README]({}) for this automation recipe at GitHub.'.format(url_readme_extra)) - if url_meta_description!='': - st.markdown('* See [meta description]({}) for this automation recipe at GitHub.'.format(url_meta_description)) + if url_meta_description != '': + st.markdown( + '* See [meta description]({}) for this automation recipe at GitHub.'.format(url_meta_description)) - if url_customize!='': - st.markdown('* See [customization python code]({}) for this automation recipe at GitHub.'.format(url_customize)) + if url_customize != '': + st.markdown( + '* See [customization python code]({}) for this automation recipe at GitHub.'.format(url_customize)) # Check dependencies - r = misc.get_all_deps_tags({'meta':meta, 'st':st}) - if r['return']>0: return r + r = misc.get_all_deps_tags({'meta': meta, 'st': st}) + if r['return'] > 0: + return r all_deps_tags = r['all_deps_tags'] - if len(all_deps_tags)>0: + if len(all_deps_tags) > 0: st.markdown('**Dependencies on other CM scripts:**') - x='' + x = '' for t in sorted(all_deps_tags): # Test that it's not just extending tags: if t.startswith('_') or ',' not in t: continue - url_deps = url_prefix_script+'&tags='+t + url_deps = url_prefix_script + '&tags=' + t - x+='* [{}]({})\n'.format(t, url_deps) + x += '* [{}]({})\n'.format(t, url_deps) st.markdown(x) - else: - categories={} + categories = {} for l in sorted(lst, key=lambda x: ( - x.meta.get('alias','') - )): + x.meta.get('alias', '') + )): - category = l.meta.get('category','') - if category == '': category = 'Unsorted' + category = l.meta.get('category', '') + if category == '': + category = 'Unsorted' if category not in categories: - categories[category]=[] + categories[category] = [] categories[category].append(l) - if len(categories)>1: - category_selection = [''] + sorted(list(categories.keys()), key = lambda v: v.upper()) + if len(categories) > 1: + category_selection = [ + ''] + sorted(list(categories.keys()), key=lambda v: v.upper()) # Creating compute selector category_id = st.selectbox('Prune by category:', range(len(category_selection)), format_func=lambda x: category_selection[x], - index = 0, - key = 'category') + index=0, + key='category') - if category_id>0: + if category_id > 0: category_key = category_selection[category_id] - categories = {category_key:categories[category_key]} + categories = {category_key: categories[category_key]} # Check number of recipes recipes = 0 - for category in sorted(categories, key = lambda v: v.upper()): + for category in sorted(categories, key=lambda v: v.upper()): recipes += len(categories[category]) x = ''' @@ -297,11 +324,10 @@ def page(st, params): Found {} automation recipes: '''.format(str(recipes)) - st.write(x, unsafe_allow_html = True) - + st.write(x, unsafe_allow_html=True) - for category in sorted(categories, key = lambda v: v.upper()): - md = '### {}'.format(category)+'\n' + for category in sorted(categories, key=lambda v: v.upper()): + md = '### {}'.format(category) + '\n' for recipe in categories[category]: meta = recipe.meta @@ -309,10 +335,10 @@ def page(st, params): alias = meta['alias'] uid = meta['uid'] - url = url_prefix_script+'&name='+alias+','+uid + url = url_prefix_script + '&name=' + alias + ',' + uid - md += '* [{}]({})'.format(alias, url)+'\n' + md += '* [{}]({})'.format(alias, url) + '\n' st.markdown(md) - return {'return':0, 'end_html':end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/script.py b/script/gui/script.py index 0a279b3352..ededecbecd 100644 --- a/script/gui/script.py +++ b/script/gui/script.py @@ -6,6 +6,7 @@ import misc + def page(i): st = i['st'] @@ -22,53 +23,54 @@ def page(i): no_run = os.environ.get('CM_GUI_NO_RUN', '') - gui_meta = meta.get('gui',{}) + gui_meta = meta.get('gui', {}) gui_func = gui_meta.get('use_customize_func', '') - if gui_func!='': - ii = {'streamlit_module':st, - 'meta':meta} - return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy') , + if gui_func != '': + ii = {'streamlit_module': st, + 'meta': meta} + return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy'), 'customize', gui_func, ii) st.markdown("""---""") - if gui_meta.get('title','')!='': + if gui_meta.get('title', '') != '': title = gui_meta['title'] - # Set title # st.title('[Collective Mind](https://github.com/mlcommons/ck)') url_script = 'https://github.com/mlcommons/ck' - if repo_meta != None and script_alias!='': - url = repo_meta.get('url','') - if url=='' and repo_meta.get('git', False): - url = 'https://github.com/'+repo_meta['alias'].replace('@','/') + if repo_meta is not None and script_alias != '': + url = repo_meta.get('url', '') + if url == '' and repo_meta.get('git', False): + url = 'https://github.com/' + repo_meta['alias'].replace('@', '/') - if url!='': + if url != '': # Recreate GitHub path - if not url.endswith('/'): url=url+'/' + if not url.endswith('/'): + url = url + '/' url += 'tree/master/' - if repo_meta.get('prefix','')!='': + if repo_meta.get('prefix', '') != '': url += repo_meta['prefix'] - if not url.endswith('/'): url=url+'/' + if not url.endswith('/'): + url = url + '/' - url += 'script/'+script_alias + url += 'script/' + script_alias url_script = url hide = params.get('hide_script_customization', False) - if script_alias!='': - show_customize = st.toggle('**Customize input for the CM script "[{}]({})"**'.format(script_alias, url_script), value = not hide) + if script_alias != '': + show_customize = st.toggle( + '**Customize input for the CM script "[{}]({})"**'.format( + script_alias, url_script), value=not hide) hide = not show_customize - - # Check if found path and there is meta # TBD (Grigori): need to cache it using @st.cache variation_groups = {} @@ -80,23 +82,23 @@ def page(i): st_variations = {} - if len(meta)>0: - variations = meta.get('variations',{}) + if len(meta) > 0: + variations = meta.get('variations', {}) - default_variation = meta.get('default_variation','') + default_variation = meta.get('default_variation', '') variation_keys = sorted(list(variations.keys())) for variation_key in sorted(variation_keys): variation = variations[variation_key] - alias = variation.get('alias','').strip() + alias = variation.get('alias', '').strip() - if alias!='': + if alias != '': aliases = variation_alias.get(alias, []) if variation_key not in aliases: aliases.append(variation_key) - variation_alias[alias]=aliases + variation_alias[alias] = aliases # Do not continue this loop if alias continue @@ -119,7 +121,7 @@ def page(i): default_variations.append(variation_key) - group = variation.get('group','') + group = variation.get('group', '') if variation_key.endswith('_'): group = '*internal*' @@ -127,22 +129,23 @@ def page(i): group = '*no-group*' if group not in variation_groups: - variation_groups[group]=[] + variation_groups[group] = [] variation_groups[group].append(variation_key) # Prepare variation_groups - if len(variations)>0: + if len(variations) > 0: if not hide: - st.markdown('**Select variations to update multiple flags and environment variables:**') + st.markdown( + '**Select variations to update multiple flags and environment variables:**') - variation_groups_order = meta.get('variation_groups_order',[]) + variation_groups_order = meta.get('variation_groups_order', []) for variation in sorted(variation_groups): if variation not in variation_groups_order: variation_groups_order.append(variation) for group_key in variation_groups_order: - group_key_cap = group_key.replace('-',' ').capitalize() + group_key_cap = group_key.replace('-', ' ').capitalize() if not group_key.startswith('*'): y = [''] @@ -152,12 +155,12 @@ def page(i): index += 1 y.append(variation_key) if variation_key in default_variations: - selected_index=index + selected_index = index - key2 = '~~'+group_key + key2 = '~~' + group_key x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: + if x != None and len(x) > 0 and x[0] != None: x = x[0] if x in y: selected_index = y.index(x) if x in y else 0 @@ -165,50 +168,52 @@ def page(i): if hide: st_variations[key2] = sorted(y)[selected_index] else: - st_variations[key2] = st.selectbox(group_key_cap, sorted(y), index=selected_index, key=key2) + st_variations[key2] = st.selectbox( + group_key_cap, sorted(y), index=selected_index, key=key2) elif group_key == '*no-group*': for variation_key in sorted(variation_groups[group_key]): v = False if variation_key in default_variations: - v=True + v = True - key2 = '~'+variation_key + key2 = '~' + variation_key x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: - if x[0].lower()=='true': + if x != None and len(x) > 0 and x[0] != None: + if x[0].lower() == 'true': v = True - elif x[0].lower()=='false': + elif x[0].lower() == 'false': v = False if hide: st_variations[key2] = v else: - st_variations[key2] = st.checkbox(variation_key.capitalize(), key=key2, value=v) - + st_variations[key2] = st.checkbox( + variation_key.capitalize(), key=key2, value=v) # Prepare inputs - input_desc=meta.get('input_description',{}) + input_desc = meta.get('input_description', {}) - if len(input_desc)>0: + if len(input_desc) > 0: sort_desc = {} sort_keys = [] for k in input_desc: - sort = input_desc[k].get('sort',0) - if sort>0: - sort_desc[k]=sort - if len(sort_desc)>0: - sort_keys = sorted(sort_desc, key = lambda k: sort_desc[k]) + sort = input_desc[k].get('sort', 0) + if sort > 0: + sort_desc[k] = sort + if len(sort_desc) > 0: + sort_keys = sorted(sort_desc, key=lambda k: sort_desc[k]) - other_keys = sorted([k for k in input_desc if input_desc[k].get('sort',0)==0]) + other_keys = sorted( + [k for k in input_desc if input_desc[k].get('sort', 0) == 0]) - all_keys = [] if len(sort_keys)==0 else sort_keys + all_keys = [] if len(sort_keys) == 0 else sort_keys all_keys += other_keys if not hide: - if len(sort_keys)>0: + if len(sort_keys) > 0: st.markdown('**Select main flags:**') else: st.markdown('**Select all flags:**') @@ -217,23 +222,25 @@ def page(i): for key in all_keys: value = input_desc[key] - if len(sort_keys)>0 and value.get('sort',0)==0 and not other_flags: + if len(sort_keys) > 0 and value.get( + 'sort', 0) == 0 and not other_flags: if not hide: st.markdown('**Select other flags:**') other_flags = True - ii={'key':key, - 'desc':value, - 'params':params, - 'st':st, - 'st_inputs':st_inputs, - 'hide':hide} + ii = {'key': key, + 'desc': value, + 'params': params, + 'st': st, + 'st_inputs': st_inputs, + 'hide': hide} r2 = misc.make_selector(ii) - if r2['return']>0: return r2 + if r2['return'] > 0: + return r2 # Check tags - selected_variations=[] + selected_variations = [] for k in st_variations: v = st_variations[k] @@ -242,63 +249,61 @@ def page(i): elif k.startswith('~'): k2 = k[1:] - if type(v)==bool: - if v: - selected_variations.append('_'+k2) - elif v!='': - selected_variations.append('_'+v) + +if isinstance(v, if ) if v: + selected_variations.append('_' + k2) + elif v != '': + selected_variations.append('_' + v) x = script_tags if ' ' in script_tags: - if len(selected_variations)>0: - x+=' '+' '.join(selected_variations) + if len(selected_variations) > 0: + x += ' ' + ' '.join(selected_variations) tags = '"{}"'.format(x) else: - if len(selected_variations)>0: - x+=','+','.join(selected_variations) + if len(selected_variations) > 0: + x += ',' + ','.join(selected_variations) tags = '--tags={}'.format(x) - - - # Add extras to inputs - add_to_st_inputs = extra.get('add_to_st_inputs',{}) - if len(add_to_st_inputs)>0: + add_to_st_inputs = extra.get('add_to_st_inputs', {}) + if len(add_to_st_inputs) > 0: st_inputs.update(add_to_st_inputs) - - ############################################################################ + ########################################################################## st.markdown("""---""") st.markdown('**Run this CM script (Linux/MacOS/Windows):**') - x = '' extra_notes_online = extra.get('extra_notes_online', '') - if extra_notes_online != '': x+=' [ '+extra_notes_online+' ] ' + if extra_notes_online != '': + x += ' [ ' +extra_notes_online +' ] ' extra_faq_online = extra.get('extra_faq_online', '') - if extra_faq_online != '': x+=' [ '+extra_faq_online+' ] ' - - if x !='': - st.markdown('*'+x.strip()+'*') - + if extra_faq_online != '': + x += ' [ ' +extra_faq_online +' ] ' + if x != '': + st.markdown('*' + x.strip() + '*') host_os_windows = False if os.name != 'nt' else True - host_os_use_windows = st.toggle('Run on Windows?', value = host_os_windows) + host_os_use_windows = st.toggle('Run on Windows?', value=host_os_windows) if host_os_use_windows: var1 = '^' host_os_flag = 'windows' -# st.markdown('*Check how to install [a few dependencies](https://github.com/mlcommons/ck/blob/master/docs/installation.md#windows) on Windows.*') +# st.markdown('*Check how to install [a few +# dependencies](https://github.com/mlcommons/ck/blob/master/docs/installation.md#windows) +# on Windows.*') else: var1 = '\\' host_os_flag = 'linux' - - show_cm_install = st.toggle('Install MLCommons Collective Mind', value=False) + show_cm_install = st.toggle( + 'Install MLCommons Collective Mind', + value=False) if show_cm_install: @@ -306,18 +311,17 @@ def page(i): extra = {'skip_header': True, 'run_on_windows': host_os_use_windows} r = playground_install.page(st, params, extra) - if r['return']>0: return r - + if r['return'] > 0: + return r st.markdown('---') - - ############################################################################ + ########################################################################## shell = st.toggle('Open shell after executing CM script?', value=False) if shell: st_inputs['~shell'] = True - ############################################################################ + ########################################################################## flags_dict = {} flags = '' @@ -325,81 +329,82 @@ def page(i): value = st_inputs[key] key2 = key[1:] - if value!='' and (type(value)!=bool or value==True): - flags+=' '+var1+'\n --'+key2 + if value != '' and (type(value) !=bool or value==True): + flags += ' ' + var1 + '\n --' + key2 z = True - if type(value)!=bool: - x = str(value) +if not isinstance(value, if ) x = str(value) z = x if ' ' in x or ':' in x or '/' in x or '\\' in x: - x='"'+x+'"' - flags+='='+x - - flags_dict[key2]=z - - - - + x = '"' + x + '"' + flags += '=' + x + flags_dict[key2] = z - ############################################################################ + ########################################################################## run_via_docker = False - if not extra.get('skip_script_docker_func', False) and len(meta.get('docker',{}))>0: - run_via_docker = st.toggle('Use Docker', key='run_via_docker', value=False) + if not extra.get('skip_script_docker_func', False) and len(meta.get('docker', {})) > 0: + run_via_docker = st.toggle( + 'Use Docker', + key='run_via_docker', + value=False) if run_via_docker: - st.markdown("*WARNING: CM automatically generates containers for a give script - it's a beta functionality - feel free to [test and provide feedback](https://discord.gg/JjWNWXKxwT)!*") + st.markdown( + "*WARNING: CM automatically generates containers for a give script - it's a beta functionality - feel free to [test and provide feedback](https://discord.gg/JjWNWXKxwT)!*") action = 'docker' if run_via_docker else 'run' cli = 'cm {} script {} {}\n'.format(action, tags, flags) - - ############################################################################ + ########################################################################## use_experiment_from_extra = extra.get('use_experiment', False) - use_experiment = st.toggle('Use CM experiment for reproducibility', key='use_cm_experiment', value=use_experiment_from_extra) + use_experiment = st.toggle( + 'Use CM experiment for reproducibility', + key='use_cm_experiment', + value=use_experiment_from_extra) extra_cm_prefix = '' if use_experiment: - cli = 'cm run experiment --tags={} -- {}\n '.format("repro,"+script_tags, var1) + cli + cli = 'cm run experiment --tags={} -- {}\n '.format( + "repro," + script_tags, var1) + cli - ############################################################################ + ########################################################################## - extra_setup = extra.get('extra_setup','').strip() - if len(extra_setup)>2: - show_extra_setup_notes = st.toggle('Show extra setup notes?', value = True) + extra_setup = extra.get('extra_setup', '').strip() + if len(extra_setup) > 2: + show_extra_setup_notes = st.toggle( + 'Show extra setup notes?', value=True) if show_extra_setup_notes: -# st.markdown('---') + # st.markdown('---') st.markdown(extra_setup) # st.markdown('---') - show_python_api = st.toggle('Run via Python API', value=False) # Python API if show_python_api: final_script_tags = script_tags - if len(selected_variations)>0: + if len(selected_variations) > 0: for sv in selected_variations: - final_script_tags += ' '+sv - final_script_tags = final_script_tags.replace(' ',',') + final_script_tags += ' ' + sv + final_script_tags = final_script_tags.replace(' ', ',') if use_experiment: dd = { - 'action': 'run', - 'automation': 'experiment,a0a2d123ef064bcb', - 'tags': script_tags, + 'action': 'run', + 'automation': 'experiment,a0a2d123ef064bcb', + 'tags': script_tags, 'out': 'con' - } + } unparsed_cmd = ['cm', 'run', 'script,5b4e0237da074764', - '--tags='+final_script_tags] + '--tags=' + final_script_tags] for flag in flags_dict: value = flags_dict[flag] @@ -409,25 +414,29 @@ def page(i): else: dd = { - 'action':action, - 'automation':'script,5b4e0237da074764', - } + 'action': action, + 'automation': 'script,5b4e0237da074764', + } - dd['tags']=final_script_tags + dd['tags'] = final_script_tags - dd['out']='con' + dd['out'] = 'con' dd.update(flags_dict) import json - dd_json=json.dumps(dd, indent=2) - dd_json=dd_json.replace(': true', ': True').replace(': false', ': False') + dd_json = json.dumps(dd, indent=2) + dd_json = dd_json.replace( + ': true', + ': True').replace( + ': false', + ': False') y = 'import cmind\n' - y+= 'r = cmind.access('+dd_json+')\n' - y+= 'if r[\'return\']>0: print (r[\'error\'])\n' + y += 'r = cmind.access(' + dd_json + ')\n' + y += 'if r[\'return\']>0: print (r[\'error\'])\n' - x=''' + x = ''' ```python {} '''.format(y) @@ -436,34 +445,33 @@ def page(i): st.markdown(x) - - - ############################################################################ - show_cli = st.toggle('Run from the command line', value = True) + ########################################################################## + show_cli = st.toggle('Run from the command line', value=True) if show_cli: # Add explicit button "Run" cli = st.text_area('', cli, height=600) - if no_run=='' and st.button("Run in the new terminal"): - cli = cli+var1+'--pause\n' + if no_run == '' and st.button("Run in the new terminal"): + cli = cli + var1 + '--pause\n' - cli = cli.replace(var1, ' ').replace('\n',' ') + cli = cli.replace(var1, ' ').replace('\n', ' ') if os.name == 'nt': cmd2 = 'start cmd /c {}'.format(cli) else: cli2 = cli.replace('"', '\\"') - prefix = os.environ.get('CM_GUI_SCRIPT_PREFIX_LINUX','') - if prefix!='': prefix+=' ' + prefix = os.environ.get('CM_GUI_SCRIPT_PREFIX_LINUX', '') + if prefix != '': + prefix += ' ' cmd2 = prefix + 'bash -c "{}"'.format(cli2) - print ('Running command:') - print ('') - print (' {}'.format(cmd2)) - print ('') + print('Running command:') + print('') + print(' {}'.format(cmd2)) + print('') os.system(cmd2) @@ -476,9 +484,10 @@ def page(i): please don't hesitate report issues or suggest features at CM GitHub! ''' - st.write(x, unsafe_allow_html = True) + st.write(x, unsafe_allow_html=True) + + return {'return': 0} - return {'return':0} if __name__ == "__main__": main() diff --git a/script/gui/tests/generate_password.py b/script/gui/tests/generate_password.py index 145a46dbd3..a29f7cd637 100644 --- a/script/gui/tests/generate_password.py +++ b/script/gui/tests/generate_password.py @@ -1,8 +1,8 @@ import bcrypt -#salt = bcrypt.gensalt() +# salt = bcrypt.gensalt() # TBD: temporal hack to demo password protection for experiments -#salt = bcrypt.gensalt() +# salt = bcrypt.gensalt() pwd = input('Password: ') pwd = pwd.strip() @@ -10,4 +10,4 @@ password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' password_hash2 = bcrypt.hashpw(pwd.encode('utf-8'), password_salt) -print ('"password_hash":"{}"'.format(password_hash2.decode('utf-8'))) +print('"password_hash":"{}"'.format(password_hash2.decode('utf-8'))) diff --git a/script/import-mlperf-inference-to-experiment/customize.py b/script/import-mlperf-inference-to-experiment/customize.py index fcacf3412f..38825892ee 100644 --- a/script/import-mlperf-inference-to-experiment/customize.py +++ b/script/import-mlperf-inference-to-experiment/customize.py @@ -13,26 +13,27 @@ file_result = 'cm-result.json' model2task = { - "resnet":"image-classification", - "retinanet":"object-detection", - "ssd-small":"object-detection", - "ssd-large": "object-detection", - "rnnt":"speech-recognition", - "bert-99":"language-processing", - "bert-99.9":"language-processing", - "gptj-99":"language-processing", - "gptj-99.9":"language-processing", - "llama2-70b-99":"language-processing", - "llama2-70b-99.9":"language-processing", - "dlrm-99":"recommendation", - "dlrm-v2-99":"recommendation", - "dlrm-99.9":"recommendation", - "dlrm-v2-99.9":"recommendation", - "3d-unet-99":"image-segmentation", - "3d-unet-99.9":"image-segmentation", - "stable-diffusion-xl":"text-to-image" + "resnet": "image-classification", + "retinanet": "object-detection", + "ssd-small": "object-detection", + "ssd-large": "object-detection", + "rnnt": "speech-recognition", + "bert-99": "language-processing", + "bert-99.9": "language-processing", + "gptj-99": "language-processing", + "gptj-99.9": "language-processing", + "llama2-70b-99": "language-processing", + "llama2-70b-99.9": "language-processing", + "dlrm-99": "recommendation", + "dlrm-v2-99": "recommendation", + "dlrm-99.9": "recommendation", + "dlrm-v2-99.9": "recommendation", + "3d-unet-99": "image-segmentation", + "3d-unet-99.9": "image-segmentation", + "stable-diffusion-xl": "text-to-image" } + def preprocess(i): env = i['env'] @@ -40,10 +41,11 @@ def preprocess(i): cur_dir = os.getcwd() # Query cache for results dirs - r = cm.access({'action':'find', - 'automation':'cache,541d6f712a6b464e', - 'tags':'get,repo,mlperf-inference-results'}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-inference-results'}) + if r['return'] > 0: + return r lst = r['list'] @@ -59,24 +61,27 @@ def preprocess(i): version = '' for t in tags: if t.startswith('version-'): - version = 'v'+t[8:] + version = 'v' + t[8:] break - skip_submission_checker = env.get('CM_SKIP_SUBMISSION_CHECKER','') in ['yes','True'] + skip_submission_checker = env.get( + 'CM_SKIP_SUBMISSION_CHECKER', '') in [ + 'yes', 'True'] - print ('') - print ('Processing results in path: {}'.format(path)) - print ('Version: {}'.format(version)) - print ('') + print('') + print('Processing results in path: {}'.format(path)) + print('Version: {}'.format(version)) + print('') if skip_submission_checker: if not os.path.isfile(file_summary): - return {'return':1, 'error':'{} not found'.format(file_summary)} + return {'return': 1, + 'error': '{} not found'.format(file_summary)} else: if os.path.isfile(file_summary): os.remove(file_summary) - print ('* Running submission checker ...') + print('* Running submission checker ...') xenv = {} @@ -84,56 +89,60 @@ def preprocess(i): if submitter != '': xenv['CM_MLPERF_SUBMITTER'] = submitter - ii = {'action':'run', - 'automation':'script', - 'tags':'run,mlperf,inference,submission,checker', - 'extra_args':' --skip-extra-files-in-root-check', - 'submission_dir':path} + ii = {'action': 'run', + 'automation': 'script', + 'tags': 'run,mlperf,inference,submission,checker', + 'extra_args': ' --skip-extra-files-in-root-check', + 'submission_dir': path} - if len(xenv)>0: + if len(xenv) > 0: ii['env'] = xenv - if version!='': - print (' Version detected from cache tags: {}'.format(version)) - ii['version']=version + if version != '': + print( + ' Version detected from cache tags: {}'.format(version)) + ii['version'] = version r = cm.access(ii) # Ignore if script fails for now (when some results are wrong) - if r['return']>0 and r['return']!=2: + if r['return'] > 0 and r['return'] != 2: return r - if r['return']>0: - print ('') - print ('WARNING: script returned non-zero value - possible issue - please check!') - print ('') - input ('Press Enter to continue') - print ('') + if r['return'] > 0: + print('') + print( + 'WARNING: script returned non-zero value - possible issue - please check!') + print('') + input('Press Enter to continue') + print('') r = convert_summary_csv_to_experiment(path, version, env) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} def convert_summary_csv_to_experiment(path, version, env): - print ('* Processing MLPerf repo in cache path: {}'.format(path)) + print('* Processing MLPerf repo in cache path: {}'.format(path)) cur_dir = os.getcwd() # Get Git URL os.chdir(path) - burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + burl = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url']) url = burl.decode('UTF-8').strip() - print (' Git URL: {}'.format(url)) + print(' Git URL: {}'.format(url)) os.chdir(cur_dir) if os.path.isfile(file_summary): summary = [] - with open (file_summary, encoding = 'utf-8') as fcsv: + with open(file_summary, encoding='utf-8') as fcsv: csv_reader = csv.DictReader(fcsv) for rows in csv_reader: @@ -145,42 +154,43 @@ def convert_summary_csv_to_experiment(path, version, env): v = rows[k] if v == 'False': - v=False + v = False elif v == 'True': - v=True + v = True else: try: - v=float(v) + v = float(v) - if v==int(v): - v=int(v) + if v == int(v): + v = int(v) except ValueError: pass result[k] = v # Add extra tags - if url!='': - result['git_url']=url + if url != '': + result['git_url'] = url - location = result.get('Location','') + location = result.get('Location', '') if location != '': - result['url']=url+'/tree/master/'+location + result['url'] = url + '/tree/master/' + location accuracy = result.get('Accuracy', 0.0) # # print (accuracy, type(accuracy)) - if accuracy!=None and accuracy!='None' and accuracy>0: - result['Accuracy_div_100'] = float('{:.5f}'.format(result['Accuracy']/100)) + if accuracy is not None and accuracy != 'None' and accuracy > 0: + result['Accuracy_div_100'] = float( + '{:.5f}'.format(result['Accuracy'] / 100)) # Add ratios - # Append to summary summary.append(result) - r=utils.save_json(file_summary_json.format(version), summary) - if r['return']>0: return r + r = utils.save_json(file_summary_json.format(version), summary) + if r['return'] > 0: + return r # Create virtual experiment entries experiment = {} @@ -203,48 +213,54 @@ def convert_summary_csv_to_experiment(path, version, env): if has_power: xdivision += '-power' - # If datacenter,edge - remove ,edge to be consistent with https://mlcommons.org/en/inference-datacenter-21/ - j=system_type.find(',') - if j>=0: - system_type=system_type[:j] + # If datacenter,edge - remove ,edge to be consistent with + # https://mlcommons.org/en/inference-datacenter-21/ + j = system_type.find(',') + if j >= 0: + system_type = system_type[:j] scenario = result['Scenario'].lower() - name = 'mlperf-inference--{}--'+system_type+'--'+xdivision+'--'+task+'--'+scenario + name = 'mlperf-inference--{}--' + system_type + \ + '--' + xdivision + '--' + task + '--' + scenario name_all = name.format('all') name_ver = name.format(version) for name in [name_all, name_ver]: - if name not in experiment: experiment[name]=[] + if name not in experiment: + experiment[name] = [] experiment[name].append(result) # Checking experiment - env_target_repo=env.get('CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO','').strip() - target_repo='' if env_target_repo=='' else env_target_repo+':' - + env_target_repo = env.get( + 'CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO', '').strip() + target_repo = '' if env_target_repo == '' else env_target_repo + ':' - print ('') + print('') for name in experiment: - print (' Preparing experiment artifact "{}"'.format(name)) + print(' Preparing experiment artifact "{}"'.format(name)) tags = name.split('--') - if 'mlperf' not in tags: tags.insert(0, 'mlperf') + if 'mlperf' not in tags: + tags.insert(0, 'mlperf') # Checking if experiment already exists - r = cm.access({'action':'find', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':target_repo+name}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - r = cm.access({'action':'add', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':target_repo+name, - 'tags':tags}) - if r['return']>0: return r + if len(lst) == 0: + r = cm.access({'action': 'add', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name, + 'tags': tags}) + if r['return'] > 0: + return r path = r['path'] else: @@ -262,12 +278,14 @@ def convert_summary_csv_to_experiment(path, version, env): path2 = dd break - if path2=='': + if path2 == '': r = utils.get_current_date_time({}) - if r['return']>0: return r + if r['return'] > 0: + return r - date_time = r['iso_datetime'].replace(':','-').replace('T','.') + date_time = r['iso_datetime'].replace( + ':', '-').replace('T', '.') path2 = os.path.join(path, date_time) @@ -277,8 +295,9 @@ def convert_summary_csv_to_experiment(path, version, env): fresult = os.path.join(path2, file_result) if os.path.isfile(fresult): - r=utils.load_json(fresult) - if r['return']>0: return r + r = utils.load_json(fresult) + if r['return'] > 0: + return r existing_results = r['meta'] @@ -290,10 +309,11 @@ def convert_summary_csv_to_experiment(path, version, env): for result2 in results: matched = True - # Need to iterate over keys in the new results since old results can have more keys (derivates, etc) + # Need to iterate over keys in the new results since + # old results can have more keys (derivates, etc) for k in result2: - if k!='uid': - if k not in result or result2[k]!=result[k]: + if k != 'uid': + if k not in result or result2[k] != result[k]: matched = False break @@ -305,28 +325,30 @@ def convert_summary_csv_to_experiment(path, version, env): results.append(result) # Check extra keys - final_results=[] + final_results = [] for result in results: # Generate UID if 'uid' not in result: - r=utils.gen_uid() - if r['return']>0: return r + r = utils.gen_uid() + if r['return'] > 0: + return r result['uid'] = r['uid'] # Get Result and Units together if 'Result' in result and 'Units' in result: - result['Result_Units']=result['Units'] + result['Result_Units'] = result['Units'] # Temporal hack for Power to separate power from the graph - units = result.get('Units','') + units = result.get('Units', '') if units == 'Watts' or 'joules' in units: if 'Result_Power' not in result: - result['Result_Power']=result['Result'] - result['Result']=None + result['Result_Power'] = result['Result'] + result['Result'] = None # Write results - r=utils.save_json(fresult, results) - if r['return']>0: return r + r = utils.save_json(fresult, results) + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} diff --git a/script/import-mlperf-tiny-to-experiment/customize.py b/script/import-mlperf-tiny-to-experiment/customize.py index bb31698f8c..5fee2ca853 100644 --- a/script/import-mlperf-tiny-to-experiment/customize.py +++ b/script/import-mlperf-tiny-to-experiment/customize.py @@ -8,10 +8,11 @@ file_summary_json = 'mlperf-inference-summary.json' file_result = 'cm-result.json' -fix_benchmark_names = {'anomaly_detection':'ad', - 'image_classification':'ic', - 'keyword_spotting':'kws', - 'visual_wake_words':'vww'} +fix_benchmark_names = {'anomaly_detection': 'ad', + 'image_classification': 'ic', + 'keyword_spotting': 'kws', + 'visual_wake_words': 'vww'} + def preprocess(i): @@ -20,10 +21,11 @@ def preprocess(i): cur_dir = os.getcwd() # Query cache for results dirs - r = cm.access({'action':'find', - 'automation':'cache,541d6f712a6b464e', - 'tags':'get,repo,mlperf-tiny-results'}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-tiny-results'}) + if r['return'] > 0: + return r lst = r['list'] @@ -38,31 +40,33 @@ def preprocess(i): version = '' for t in tags: if t.startswith('version-'): - version = 'v'+t[8:] + version = 'v' + t[8:] break r = convert_repo_to_experiment(path, version, env) - if r['return']>0: return r + if r['return'] > 0: + return r - print ('') + print('') - return {'return':0} + return {'return': 0} def convert_repo_to_experiment(path, version, env): - print ('') - print ('Processing MLPerf repo from CM cache path: {}'.format(path)) - print ('* Version: {}'.format(version)) + print('') + print('Processing MLPerf repo from CM cache path: {}'.format(path)) + print('* Version: {}'.format(version)) cur_dir = os.getcwd() # Get Git URL os.chdir(path) - burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + burl = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url']) url = burl.decode('UTF-8').strip() - print ('* Git URL: {}'.format(url)) + print('* Git URL: {}'.format(url)) # Create virtual experiment entries experiments = {} @@ -70,20 +74,21 @@ def convert_repo_to_experiment(path, version, env): for division in ['closed', 'open']: p1 = os.path.join(path, division) if os.path.isdir(p1): - print (' * Processing division: {}'.format(division)) + print(' * Processing division: {}'.format(division)) companies = os.listdir(p1) for company in companies: - p2 = os.path.join (p1, company) + p2 = os.path.join(p1, company) if os.path.isdir(p2): - print (' * Processing company: {}'.format(company)) + print(' * Processing company: {}'.format(company)) presults = os.path.join(p2, 'results') psystems = os.path.join(p2, 'systems') pcode = os.path.join(p2, 'code') - if os.path.isdir(presults) and os.path.isdir(psystems) and os.path.isdir(pcode): + if os.path.isdir(presults) and os.path.isdir( + psystems) and os.path.isdir(pcode): # Exception for OctoML presults2 = [presults] @@ -101,95 +106,128 @@ def convert_repo_to_experiment(path, version, env): for system in systems: psystem = os.path.join(presult, system) if os.path.isdir(psystem): - print (' * Processing result for system: {}'.format(system)) + print( + ' * Processing result for system: {}'.format(system)) # Check system file - psystem_desc = os.path.join(psystems, system+'.json') + psystem_desc = os.path.join( + psystems, system + '.json') psystem_dict = {} - print (' File: {}'.format(psystem_desc)) + print( + ' File: {}'.format(psystem_desc)) # Check exceptions if version == 'v1.0': if company == 'OctoML': x = os.path.basename(presult) - psystem_desc = os.path.join(psystems, 'system_description_'+system.replace('-','')+'_'+x+'.json') + psystem_desc = os.path.join( + psystems, + 'system_description_' + + system.replace( + '-', + '') + + '_' + + x + + '.json') elif company == 'STMicroelectronics': - psystem_desc = os.path.join(psystems, system, system+'_system_description.json') - if not os.path.isfile(psystem_desc): - psystem_desc = os.path.join(psystems, system, system.replace('-','_')+'_system_description.json') + psystem_desc = os.path.join( + psystems, system, system + '_system_description.json') + if not os.path.isfile( + psystem_desc): + psystem_desc = os.path.join( + psystems, system, system.replace( + '-', '_') + '_system_description.json') elif company == 'syntiant': - psystem_desc = os.path.join(psystems, system, system+'.json') + psystem_desc = os.path.join( + psystems, system, system + '.json') elif company == 'hls4ml': - psystem_desc = os.path.join(psystems, 'system_description_pynq.json') + psystem_desc = os.path.join( + psystems, 'system_description_pynq.json') elif version == 'v0.7': if company == 'renesas': - psystem_desc = os.path.join(psystems, system+'_System_Description.json') + psystem_desc = os.path.join( + psystems, system + '_System_Description.json') elif company == 'STMicroelectronics': - psystem_desc = os.path.join(psystems, system, system+'_system_description.json') - if not os.path.isfile(psystem_desc): - psystem_desc = os.path.join(psystems, system, system.replace('-','_')+'_system_description.json') + psystem_desc = os.path.join( + psystems, system, system + '_system_description.json') + if not os.path.isfile( + psystem_desc): + psystem_desc = os.path.join( + psystems, system, system.replace( + '-', '_') + '_system_description.json') elif company == 'syntiant': - psystem_desc = os.path.join(psystems, system, system+'.json') + psystem_desc = os.path.join( + psystems, system, system + '.json') elif company == 'hls4ml-finn': - psystem_desc = os.path.join(psystems, 'system_description_'+system[:4]+'.json') - + psystem_desc = os.path.join( + psystems, 'system_description_' + system[:4] + '.json') if os.path.isfile(psystem_desc): x = '' if version == 'v1.0': if company == 'OctoML': - x='}\n\t"' + x = '}\n\t"' elif company == 'syntiant': - x='"\n\t"' + x = '"\n\t"' elif company == 'hls4ml': - x='dummy' + x = 'dummy' elif version == 'v0.7': if company == 'syntiant': - x='"\n\t"' + x = '"\n\t"' - if x!='': + if x != '': r = utils.load_txt(psystem_desc) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] j = s.find(x) - if j>=0: - s=s[:j+1]+','+s[j+1:] + if j >= 0: + s = s[:j + 1] + ',' + s[j + 1:] if s.endswith(',\n'): - s=s[:-2]+'}' + s = s[:-2] + '}' psystem_dict = json.loads(s) else: r = utils.load_json(psystem_desc) - if r['return']>0: return r + if r['return'] > 0: + return r psystem_dict = r['meta'] else: - print (' * Warning: system description not found in {}'.format(psystem_desc)) - input (' Press to continue') + print( + ' * Warning: system description not found in {}'.format(psystem_desc)) + input( + ' Press to continue') for benchmark in os.listdir(psystem): - pbenchmark = os.path.join(psystem, benchmark) + pbenchmark = os.path.join( + psystem, benchmark) if os.path.isdir(pbenchmark): - print (' * Processing benchmark: {}'.format(benchmark)) + print( + ' * Processing benchmark: {}'.format(benchmark)) models = [''] # May have retrained models - pperf = os.path.join(pbenchmark, 'performance', 'results.txt') + pperf = os.path.join( + pbenchmark, 'performance', 'results.txt') if not os.path.isfile(pperf): - pperf = os.path.join(pbenchmark, 'performance', 'performance_results.txt') + pperf = os.path.join( + pbenchmark, 'performance', 'performance_results.txt') if not os.path.isfile(pperf): # likely models models = [] - for model in os.listdir(pbenchmark): - pmodel = os.path.join(pbenchmark, model) + for model in os.listdir( + pbenchmark): + pmodel = os.path.join( + pbenchmark, model) if os.path.isdir(pmodel): models.append(model) @@ -197,190 +235,239 @@ def convert_repo_to_experiment(path, version, env): results = {} - if model!='': - print (' * Processing model: {}'.format(model)) - pbenchmark = os.path.join(psystem, benchmark, model) + if model != '': + print( + ' * Processing model: {}'.format(model)) + pbenchmark = os.path.join( + psystem, benchmark, model) - perf_file_type=0 - pperf = os.path.join(pbenchmark, 'performance', 'results.txt') + perf_file_type = 0 + pperf = os.path.join( + pbenchmark, 'performance', 'results.txt') if not os.path.isfile(pperf): - pperf = os.path.join(pbenchmark, 'performance', 'performance_results.txt') - perf_file_type=1 # outdated/weird - - paccuracy = os.path.join(pbenchmark, 'accuracy', 'results.txt') - if not os.path.isfile(paccuracy): - paccuracy = os.path.join(pbenchmark, 'accuracy', 'accuracy_results.txt') - - penergy = os.path.join(pbenchmark, 'energy', 'results.txt') - - if os.path.isfile(pperf) and os.path.isfile(paccuracy): + pperf = os.path.join( + pbenchmark, 'performance', 'performance_results.txt') + perf_file_type = 1 # outdated/weird + + paccuracy = os.path.join( + pbenchmark, 'accuracy', 'results.txt') + if not os.path.isfile( + paccuracy): + paccuracy = os.path.join( + pbenchmark, 'accuracy', 'accuracy_results.txt') + + penergy = os.path.join( + pbenchmark, 'energy', 'results.txt') + + if os.path.isfile( + pperf) and os.path.isfile(paccuracy): r = utils.load_txt(pperf) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] - median_throughput=0 + median_throughput = 0 - x1='Median throughput is ' if perf_file_type==0 else 'Throughput :' - x2=21 if perf_file_type==0 else 18 + x1 = 'Median throughput is ' if perf_file_type == 0 else 'Throughput :' + x2 = 21 if perf_file_type == 0 else 18 j = s.find(x1) - if j>=0: - j1 = s.find(' inf./sec.', j) - if j1>=0: - median_throughput=float(s[j+x2:j1].strip()) - results['median_throughput']=median_throughput - results['median_throughput_metric']='inf./sec.' - results['Result']=median_throughput - results['_Result']=median_throughput - - if median_throughput==0: - print (' * Warning: median_throughput was not detected in {}'.format(pperf)) - input (' Press to continue') - - r = utils.load_txt(paccuracy, split=True) - if r['return']>0: return r + if j >= 0: + j1 = s.find( + ' inf./sec.', j) + if j1 >= 0: + median_throughput = float( + s[j + x2:j1].strip()) + results['median_throughput'] = median_throughput + results['median_throughput_metric'] = 'inf./sec.' + results['Result'] = median_throughput + results['_Result'] = median_throughput + + if median_throughput == 0: + print( + ' * Warning: median_throughput was not detected in {}'.format(pperf)) + input( + ' Press to continue') + + r = utils.load_txt( + paccuracy, split=True) + if r['return'] > 0: + return r lines = r['list'] - found=False + found = False for line in lines: - j = line.find('ulp-mlperf: ') - if j>=0: - j1 = line.find(':', j+12) - if j1>=0: - accuracy_key = 'accuracy_'+line[j+12:j1] - value = line[j1+2:] - - if value.endswith('%'): + j = line.find( + 'ulp-mlperf: ') + if j >= 0: + j1 = line.find( + ':', j + 12) + if j1 >= 0: + accuracy_key = 'accuracy_' + \ + line[j + 12:j1] + value = line[j1 + 2:] + + if value.endswith( + '%'): value = value[:-1] - results[accuracy_key+'_metric']='%' + results[accuracy_key + + '_metric'] = '%' - value = float(value) + value = float( + value) results[accuracy_key] = value if not found: - # first value + # first + # value results['Accuracy'] = value results['_Accuracy'] = value - found = True if not found: - print (' * Warning: accuracy not found in the file {}'.format(paccuracy)) - input (' Press to continue') + print( + ' * Warning: accuracy not found in the file {}'.format(paccuracy)) + input( + ' Press to continue') else: - print (' * Warning: performance or accuracy files are not present in this submission') - input (' Press to continue') + print( + ' * Warning: performance or accuracy files are not present in this submission') + input( + ' Press to continue') if os.path.isfile(penergy): r = utils.load_txt(penergy) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] - median_throughput=0 - - j = s.find('Median throughput is ') - if j>=0: - j1 = s.find(' inf./sec.', j) - if j1>=0: - median_throughput=float(s[j+21:j1]) - - results['median_energy_median_throughput']=median_throughput - results['median_energy_median_throughput_metric']='inf./sec.' - - if median_throughput==0: - print (' * Warning: median_throughput was not detected in {}'.format(penergy)) - input (' Press to continue') + median_throughput = 0 + + j = s.find( + 'Median throughput is ') + if j >= 0: + j1 = s.find( + ' inf./sec.', j) + if j1 >= 0: + median_throughput = float( + s[j + 21:j1]) + + results['median_energy_median_throughput'] = median_throughput + results['median_energy_median_throughput_metric'] = 'inf./sec.' + + if median_throughput == 0: + print( + ' * Warning: median_throughput was not detected in {}'.format(penergy)) + input( + ' Press to continue') else: - median_energy_cost=0 + median_energy_cost = 0 - j = s.find('Median energy cost is ') - if j>=0: - j1 = s.find(' uJ/inf.', j) - if j1>=0: - median_energy_cost=float(s[j+22:j1]) + j = s.find( + 'Median energy cost is ') + if j >= 0: + j1 = s.find( + ' uJ/inf.', j) + if j1 >= 0: + median_energy_cost = float( + s[j + 22:j1]) - results['median_energy_cost']=median_energy_cost - results['median_energy_cost_metric']='uj/inf.' + results['median_energy_cost'] = median_energy_cost + results['median_energy_cost_metric'] = 'uj/inf.' - if median_energy_cost==0: - print (' * Warning: median_energy_cost was not detected in {}'.format(penergy)) - input (' Press to continue') + if median_energy_cost == 0: + print( + ' * Warning: median_energy_cost was not detected in {}'.format(penergy)) + input( + ' Press to continue') - print (' * Results dict: {}'.format(results)) + print( + ' * Results dict: {}'.format(results)) # Finalizing keys results.update(psystem_dict) - xbenchmark = benchmark if benchmark not in fix_benchmark_names else fix_benchmark_names[benchmark] + xbenchmark = benchmark if benchmark not in fix_benchmark_names else fix_benchmark_names[ + benchmark] - results['git_url']=url+'/tree/master/'+division+'/'+company + results['git_url'] = url + \ + '/tree/master/' + division + '/' + company - results['version']=version - results['__version']=version - results['Organization']=company - results['__Organization']=company - results['Division']=division - results['Benchmark']=xbenchmark - results['__System']=system - - if model!='': - results['Model']=model - results['__Model']=model + results['version'] = version + results['__version'] = version + results['Organization'] = company + results['__Organization'] = company + results['Division'] = division + results['Benchmark'] = xbenchmark + results['__System'] = system + if model != '': + results['Model'] = model + results['__Model'] = model # Prepare experiment name - cm_name = 'mlperf-tiny--{}--'+division+'--'+xbenchmark - print (' * CM experiment name: {}'.format(cm_name)) - - name_all = cm_name.format('all') - name_ver = cm_name.format(version) - - for name in [name_all, name_ver]: - if name not in experiments: experiments[name]=[] - experiments[name].append(results) - + cm_name = 'mlperf-tiny--{}--' + division + '--' + xbenchmark + print( + ' * CM experiment name: {}'.format(cm_name)) + + name_all = cm_name.format( + 'all') + name_ver = cm_name.format( + version) + + for name in [ + name_all, name_ver]: + if name not in experiments: + experiments[name] = [] + experiments[name].append( + results) else: - print (' * Warning: some directories are not present in this submission') - input (' Press to continue') + print( + ' * Warning: some directories are not present in this submission') + input(' Press to continue') os.chdir(cur_dir) - r=utils.save_json(file_summary_json, experiments) - if r['return']>0: return r + r = utils.save_json(file_summary_json, experiments) + if r['return'] > 0: + return r - env_target_repo=env.get('CM_IMPORT_TINYMLPERF_TARGET_REPO','').strip() - target_repo='' if env_target_repo=='' else env_target_repo+':' + env_target_repo = env.get('CM_IMPORT_TINYMLPERF_TARGET_REPO', '').strip() + target_repo = '' if env_target_repo == '' else env_target_repo + ':' # Checking experiment - print ('') + print('') for name in experiments: - print (' Preparing experiment artifact "{}"'.format(name)) + print(' Preparing experiment artifact "{}"'.format(name)) tags = name.split('--') - if 'mlperf' not in tags: tags.insert(0, 'mlperf') + if 'mlperf' not in tags: + tags.insert(0, 'mlperf') # Checking if experiment already exists - r = cm.access({'action':'find', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':target_repo+name}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - r = cm.access({'action':'add', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':target_repo+name, - 'tags':tags}) - if r['return']>0: return r + if len(lst) == 0: + r = cm.access({'action': 'add', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name, + 'tags': tags}) + if r['return'] > 0: + return r path = r['path'] else: @@ -398,12 +485,13 @@ def convert_repo_to_experiment(path, version, env): path2 = dd break - if path2=='': + if path2 == '': r = utils.get_current_date_time({}) - if r['return']>0: return r + if r['return'] > 0: + return r - date_time = r['iso_datetime'].replace(':','-').replace('T','.') + date_time = r['iso_datetime'].replace(':', '-').replace('T', '.') path2 = os.path.join(path, date_time) @@ -413,8 +501,9 @@ def convert_repo_to_experiment(path, version, env): fresult = os.path.join(path2, file_result) if os.path.isfile(fresult): - r=utils.load_json(fresult) - if r['return']>0: return r + r = utils.load_json(fresult) + if r['return'] > 0: + return r existing_results = r['meta'] @@ -426,10 +515,11 @@ def convert_repo_to_experiment(path, version, env): for result2 in results: matched = True - # Need to iterate over keys in the new results since old results can have more keys (derivates, etc) + # Need to iterate over keys in the new results since old + # results can have more keys (derivates, etc) for k in result2: - if k!='uid': - if k not in result or result2[k]!=result[k]: + if k != 'uid': + if k not in result or result2[k] != result[k]: matched = False break @@ -441,18 +531,19 @@ def convert_repo_to_experiment(path, version, env): results.append(result) # Check extra keys - final_results=[] + final_results = [] for result in results: # Generate UID if 'uid' not in result: - r=utils.gen_uid() - if r['return']>0: return r + r = utils.gen_uid() + if r['return'] > 0: + return r result['uid'] = r['uid'] # Write results - r=utils.save_json(fresult, results) - if r['return']>0: return r - + r = utils.save_json(fresult, results) + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} diff --git a/script/import-mlperf-training-to-experiment/customize.py b/script/import-mlperf-training-to-experiment/customize.py index 18130ba86d..e46cb24b45 100644 --- a/script/import-mlperf-training-to-experiment/customize.py +++ b/script/import-mlperf-training-to-experiment/customize.py @@ -14,56 +14,57 @@ file_result = 'cm-result.json' model2task = { - "resnet":"image-classification", - "maskrcnn":"object-detection-heavy-weight", - "ssd":"object-detection-light-weight", - "minigo": "reinforcement-learning", - "rnnt":"speech-recognition", - "bert":"language-processing", - "dlrm":"recommendation", - "3dunet":"image-segmentation" + "resnet": "image-classification", + "maskrcnn": "object-detection-heavy-weight", + "ssd": "object-detection-light-weight", + "minigo": "reinforcement-learning", + "rnnt": "speech-recognition", + "bert": "language-processing", + "dlrm": "recommendation", + "3dunet": "image-segmentation" } model2dataset = { - "resnet":"ImageNet", - "maskrcnn":"COCO", - "ssd":"OpenImages", - "minigo": "Go", - "rnnt":"LibriSpeech", - "bert":"Wikipedia", - "dlrm":"1TB Clickthrough", - "3dunet":"KiTS19" + "resnet": "ImageNet", + "maskrcnn": "COCO", + "ssd": "OpenImages", + "minigo": "Go", + "rnnt": "LibriSpeech", + "bert": "Wikipedia", + "dlrm": "1TB Clickthrough", + "3dunet": "KiTS19" } model2accuracy = { - "resnet":75.9, - "maskrcnn":0.377, - "ssd":34.0, - "minigo": 50, - "rnnt":0.058, - "bert":0.72, - "dlrm":0.8025, - "3dunet":0.908 + "resnet": 75.9, + "maskrcnn": 0.377, + "ssd": 34.0, + "minigo": 50, + "rnnt": 0.058, + "bert": 0.72, + "dlrm": 0.8025, + "3dunet": 0.908 } model2accuracy_metric = { - "resnet":"% classification", - "maskrcnn":"Box min AP", - "ssd":"% mAP", - "minigo": "% win rate vs. checkpoint", - "rnnt":"Word Error Rate", - "bert":"Mask-LM accuracy", - "dlrm":"AUC", - "3dunet":"Mean DICE score" + "resnet": "% classification", + "maskrcnn": "Box min AP", + "ssd": "% mAP", + "minigo": "% win rate vs. checkpoint", + "rnnt": "Word Error Rate", + "bert": "Mask-LM accuracy", + "dlrm": "AUC", + "3dunet": "Mean DICE score" } + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -75,10 +76,11 @@ def preprocess(i): os.remove(f) # Query cache for results dirs - r = cm.access({'action':'find', - 'automation':'cache,541d6f712a6b464e', - 'tags':'get,repo,mlperf-training-results'}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-training-results'}) + if r['return'] > 0: + return r lst = r['list'] @@ -105,43 +107,46 @@ def preprocess(i): env['CM_MLPERF_TRAINING_CURRENT_DIR'] = cur_dir env['CM_MLPERF_TRAINING_REPO_VERSION'] = version - print ('') - print ('Repo path: {}'.format(path)) - print ('Repo version: {}'.format(version)) + print('') + print('Repo path: {}'.format(path)) + print('Repo version: {}'.format(version)) - r = automation.run_native_script({'run_script_input':run_script_input, - 'env':env, - 'script_name':'run_mlperf_logger'}) - if r['return']>0: + r = automation.run_native_script({'run_script_input': run_script_input, + 'env': env, + 'script_name': 'run_mlperf_logger'}) + if r['return'] > 0: return r r = convert_summary_csv_to_experiment(path, version, env) - if r['return']>0: return r + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} def convert_summary_csv_to_experiment(path, version, env): - print ('* Processing MLPerf training results repo in cache path: {}'.format(path)) + print('* Processing MLPerf training results repo in cache path: {}'.format(path)) cur_dir = os.getcwd() # Get Git URL os.chdir(path) - burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + burl = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url']) url = burl.decode('UTF-8').strip() - print (' Git URL: {}'.format(url)) + print(' Git URL: {}'.format(url)) os.chdir(cur_dir) if not os.path.isfile(file_summary): - return {'return':1, 'error':'{} was not created'.format(file_summary)} + return {'return': 1, + 'error': '{} was not created'.format(file_summary)} else: summary = [] - with open (file_summary, encoding = 'utf-8') as fcsv: + with open(file_summary, encoding='utf-8') as fcsv: csv_reader = csv.DictReader(fcsv) for rows in csv_reader: @@ -153,39 +158,40 @@ def convert_summary_csv_to_experiment(path, version, env): v = rows[k] if v == 'False': - v=False + v = False elif v == 'True': - v=True + v = True else: try: - v=float(v) + v = float(v) - if v==int(v): - v=int(v) + if v == int(v): + v = int(v) except ValueError: pass result[k] = v # Add extra tags - if url!='': - result['git_url']=url + if url != '': + result['git_url'] = url - location = result.get('Location','') + location = result.get('Location', '') if location != '': - result['url']=url+'/tree/master/'+location + result['url'] = url + '/tree/master/' + location - if result.get('Accuracy',0)>0: - result['Accuracy_div_100'] = float('{:.5f}'.format(result['Accuracy']/100)) + if result.get('Accuracy', 0) > 0: + result['Accuracy_div_100'] = float( + '{:.5f}'.format(result['Accuracy'] / 100)) # Add ratios - # Append to summary summary.append(result) - r=utils.save_json(file_summary_json.format(version), summary) - if r['return']>0: return r + r = utils.save_json(file_summary_json.format(version), summary) + if r['return'] > 0: + return r # Create virtual experiment entries experiment = {} @@ -193,7 +199,7 @@ def convert_summary_csv_to_experiment(path, version, env): for result in summary: for model in model2task: - if result.get(model, '')!='': + if result.get(model, '') != '': result1 = {} result1['Result'] = result[model] @@ -213,52 +219,58 @@ def convert_summary_csv_to_experiment(path, version, env): result1['_Dataset'] = model2dataset[model] result1['_Model_ID'] = model - result1['version']=version - result1['_version']=version - result1['Organization']=result['submitter'] - result1['_Organization']=result['submitter'] - result1['_System']=result['system'] + result1['version'] = version + result1['_version'] = version + result1['Organization'] = result['submitter'] + result1['_Organization'] = result['submitter'] + result1['_System'] = result['system'] for k in result: - if k==model or k not in model2task: - result1[k]=result[k] + if k == model or k not in model2task: + result1[k] = result[k] xdivision = result['division'] - name = 'mlperf-training--{}--'+xdivision+'--'+model2task[model] + name = 'mlperf-training--{}--' + \ + xdivision + '--' + model2task[model] name_all = name.format('all') name_ver = name.format(version) for name in [name_all, name_ver]: - if name not in experiment: experiment[name]=[] + if name not in experiment: + experiment[name] = [] experiment[name].append(result1) # Checking experiment - env_target_repo=env.get('CM_IMPORT_MLPERF_TRAINING_TARGET_REPO','').strip() - target_repo='' if env_target_repo=='' else env_target_repo+':' + env_target_repo = env.get( + 'CM_IMPORT_MLPERF_TRAINING_TARGET_REPO', '').strip() + target_repo = '' if env_target_repo == '' else env_target_repo + ':' - print ('') + print('') for name in experiment: - print (' Preparing experiment artifact "{}"'.format(name)) + print(' Preparing experiment artifact "{}"'.format(name)) tags = name.split('--') - if 'mlperf' not in tags: tags.insert(0, 'mlperf') + if 'mlperf' not in tags: + tags.insert(0, 'mlperf') # Checking if experiment already exists - r = cm.access({'action':'find', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':target_repo+name}) - if r['return']>0: return r + r = cm.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - r = cm.access({'action':'add', - 'automation':'experiment,a0a2d123ef064bcb', - 'artifact':target_repo+name, - 'tags':tags}) - if r['return']>0: return r + if len(lst) == 0: + r = cm.access({'action': 'add', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name, + 'tags': tags}) + if r['return'] > 0: + return r path = r['path'] else: @@ -276,12 +288,14 @@ def convert_summary_csv_to_experiment(path, version, env): path2 = dd break - if path2=='': + if path2 == '': r = utils.get_current_date_time({}) - if r['return']>0: return r + if r['return'] > 0: + return r - date_time = r['iso_datetime'].replace(':','-').replace('T','.') + date_time = r['iso_datetime'].replace( + ':', '-').replace('T', '.') path2 = os.path.join(path, date_time) @@ -291,8 +305,9 @@ def convert_summary_csv_to_experiment(path, version, env): fresult = os.path.join(path2, file_result) if os.path.isfile(fresult): - r=utils.load_json(fresult) - if r['return']>0: return r + r = utils.load_json(fresult) + if r['return'] > 0: + return r existing_results = r['meta'] @@ -304,10 +319,11 @@ def convert_summary_csv_to_experiment(path, version, env): for result2 in results: matched = True - # Need to iterate over keys in the new results since old results can have more keys (derivates, etc) + # Need to iterate over keys in the new results since + # old results can have more keys (derivates, etc) for k in result2: - if k!='uid': - if k not in result or result2[k]!=result[k]: + if k != 'uid': + if k not in result or result2[k] != result[k]: matched = False break @@ -319,17 +335,19 @@ def convert_summary_csv_to_experiment(path, version, env): results.append(result) # Check extra keys - final_results=[] + final_results = [] for result in results: # Generate UID if 'uid' not in result: - r=utils.gen_uid() - if r['return']>0: return r + r = utils.gen_uid() + if r['return'] > 0: + return r result['uid'] = r['uid'] # Write results - r=utils.save_json(fresult, results) - if r['return']>0: return r + r = utils.save_json(fresult, results) + if r['return'] > 0: + return r - return {'return':0} + return {'return': 0} diff --git a/script/install-apt-package/customize.py b/script/install-apt-package/customize.py index b8e750ecb9..21e27eedaa 100644 --- a/script/install-apt-package/customize.py +++ b/script/install-apt-package/customize.py @@ -2,6 +2,7 @@ import os import re + def preprocess(i): os_info = i['os_info'] @@ -12,13 +13,15 @@ def preprocess(i): install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') if not install_cmd: - return {'return': 1, 'error': 'Package manager installation command not detected for the given OS'} + return { + 'return': 1, 'error': 'Package manager installation command not detected for the given OS'} sudo = env.get('CM_SUDO', '') - env['CM_APT_INSTALL_CMD'] = sudo + ' ' +install_cmd + ' ' + package_name + env['CM_APT_INSTALL_CMD'] = sudo + ' ' + install_cmd + ' ' + package_name - if env.get('CM_APT_CHECK_CMD', '') != '' and env['CM_APT_INSTALL_CMD'] != '': + if env.get('CM_APT_CHECK_CMD', + '') != '' and env['CM_APT_INSTALL_CMD'] != '': env['CM_APT_INSTALL_CMD'] = f"""{env['CM_APT_CHECK_CMD']} || {env['CM_APT_INSTALL_CMD']}""" - return {'return':0} + return {'return': 0} diff --git a/script/install-aws-cli/customize.py b/script/install-aws-cli/customize.py index df2744ac4d..032a4f3bf2 100644 --- a/script/install-aws-cli/customize.py +++ b/script/install-aws-cli/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,4 +15,4 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} diff --git a/script/install-bazel/customize.py b/script/install-bazel/customize.py index d94ba83c81..4bd79760b8 100644 --- a/script/install-bazel/customize.py +++ b/script/install-bazel/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,11 +12,12 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) # if 'CM_GIT_CHECKOUT' not in env: # env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version @@ -31,14 +33,14 @@ def preprocess(i): platform = env['CM_HOST_PLATFORM_FLAVOR'] ext = '.sh' - filename = 'bazel-{}-{}{}-{}{}'.format(need_version, prefix, xos, platform, ext) - url = 'https://github.com/bazelbuild/bazel/releases/download/{}/{}'.format(need_version, filename) + url = 'https://github.com/bazelbuild/bazel/releases/download/{}/{}'.format( + need_version, filename) cur_dir = os.getcwd() @@ -55,6 +57,6 @@ def preprocess(i): env['CM_BAZEL_INSTALLED_PATH'] = path env['CM_BAZEL_BIN_WITH_PATH'] = os.path.join(path, bazel_bin) - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() - return {'return':0} + return {'return': 0} diff --git a/script/install-cmake-prebuilt/customize.py b/script/install-cmake-prebuilt/customize.py index 85596e6e9f..2f5c4c4f1a 100644 --- a/script/install-cmake-prebuilt/customize.py +++ b/script/install-cmake-prebuilt/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,11 +12,12 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) version_split = need_version.split(".") while len(version_split) < 3: @@ -26,12 +28,13 @@ def preprocess(i): host_os_bits = env['CM_HOST_OS_BITS'] if os_info['platform'] != 'windows': - host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI # Prepare package name if os_info['platform'] == 'darwin': if host_os_bits != '64': - return {'return':1, 'error':'this package doesn\'t support non 64-bit MacOS'} + return {'return': 1, + 'error': 'this package doesn\'t support non 64-bit MacOS'} package_name = 'cmake-' + need_version + '-macos-universal.tar.gz' @@ -46,54 +49,58 @@ def preprocess(i): package_name += '.zip' else: - package_name='cmake-' + need_version + '-linux-' + package_name = 'cmake-' + need_version + '-linux-' - if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): - if host_os_bits=='64': + if host_os_machine.startswith( + 'arm') or host_os_machine.startswith('aarch'): + if host_os_bits == '64': package_name += 'aarch64' else: - return {'return':1, 'error':'this script doesn\'t support armv7'} + return {'return': 1, 'error': 'this script doesn\'t support armv7'} else: package_name += 'x86_64' - package_name += '.tar.gz' - + package_name += '.tar.gz' - package_url = 'https://github.com/Kitware/CMake/releases/download/v' + need_version + '/' + package_name + package_url = 'https://github.com/Kitware/CMake/releases/download/v' + \ + need_version + '/' + package_name - print (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + print(recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) - print ('') - print ('Downloading from {} ...'.format(package_url)) + print('') + print('Downloading from {} ...'.format(package_url)) cm = automation.cmind - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':package_url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': package_url}) + if r['return'] > 0: + return r filename = r['filename'] # Check what to do with this file depending on OS if os_info['platform'] == 'windows': - print ('Unzipping file {}'.format(filename)) + print('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', - 'strip_folders':1, - 'filename':filename}) - if r['return']>0: return r + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'strip_folders': 1, + 'filename': filename}) + if r['return'] > 0: + return r if os.path.isfile(filename): - print ('Removing file {}'.format(filename)) + print('Removing file {}'.format(filename)) os.remove(filename) path_bin = os.path.join(os.getcwd(), 'bin') path_include = os.path.join(os.getcwd(), 'include') elif os_info['platform'] == 'darwin': path_bin = os.path.join(os.getcwd(), 'CMake.app', 'Contents', 'bin') - path_include = os.path.join(os.getcwd(), 'CMake.app', 'Contents', 'include') + path_include = os.path.join( + os.getcwd(), 'CMake.app', 'Contents', 'include') else: path_bin = os.path.join(os.getcwd(), 'bin') path_include = os.path.join(os.getcwd(), 'include') @@ -101,16 +108,17 @@ def preprocess(i): env['CM_CMAKE_PACKAGE'] = filename env['CM_CMAKE_INSTALLED_PATH'] = path_bin - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() bin_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' env['CM_CMAKE_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) - # We don't need to check default paths here because we force install to cache + # We don't need to check default paths here because we force install to + # cache env['+PATH'] = [env['CM_CMAKE_INSTALLED_PATH']] if os.path.isdir(path_include): - env['+C_INCLUDE_PATH'] = [ path_include ] + env['+C_INCLUDE_PATH'] = [path_include] - return {'return':0} + return {'return': 0} diff --git a/script/install-cuda-package-manager/customize.py b/script/install-cuda-package-manager/customize.py index 002e85e2ef..fb84652e3d 100644 --- a/script/install-cuda-package-manager/customize.py +++ b/script/install-cuda-package-manager/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -10,7 +11,6 @@ def preprocess(i): automation = i['automation'] recursion_spaces = i['recursion_spaces'] - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() - + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() - return {'return':0} + return {'return': 0} diff --git a/script/install-cuda-prebuilt/customize.py b/script/install-cuda-prebuilt/customize.py index ac20aca719..db49f81b32 100644 --- a/script/install-cuda-prebuilt/customize.py +++ b/script/install-cuda-prebuilt/customize.py @@ -1,13 +1,14 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - if str(env.get('CUDA_SKIP_SUDO','')).lower() == 'true': + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': env['CM_SUDO'] = '' meta = i['meta'] @@ -16,7 +17,8 @@ def preprocess(i): if version not in env.get('CM_CUDA_LINUX_FILENAME', ''): supported_versions = list(meta['versions'].keys()) - return {'return': 1, 'error': "Only CUDA versions {} are supported now".format(', '.join(supported_versions))} + return {'return': 1, 'error': "Only CUDA versions {} are supported now".format( + ', '.join(supported_versions))} install_prefix = env.get('CM_CUDA_INSTALL_PREFIX', os.getcwd()) @@ -30,20 +32,24 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] nvcc_bin = "nvcc" - env['WGET_URL']="https://developer.download.nvidia.com/compute/cuda/"+env['CM_VERSION']+"/local_installers/"+env['CM_CUDA_LINUX_FILENAME'] + env['WGET_URL'] = "https://developer.download.nvidia.com/compute/cuda/" + \ + env['CM_VERSION'] + "/local_installers/" + \ + env['CM_CUDA_LINUX_FILENAME'] extra_options = env.get('CUDA_ADDITIONAL_INSTALL_OPTIONS', '') - if env.get('CM_CUDA_INSTALL_DRIVER','') == "yes": + if env.get('CM_CUDA_INSTALL_DRIVER', '') == "yes": extra_options += " --driver" env['CUDA_ADDITIONAL_INSTALL_OPTIONS'] = extra_options env['CM_CUDA_INSTALLED_PATH'] = os.path.join(install_prefix, 'install') - env['CM_NVCC_BIN_WITH_PATH'] = os.path.join(install_prefix, 'install', 'bin', nvcc_bin) - env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVCC_BIN_WITH_PATH'] + env['CM_NVCC_BIN_WITH_PATH'] = os.path.join( + install_prefix, 'install', 'bin', nvcc_bin) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVCC_BIN_WITH_PATH'] env['CM_CUDA_EXTRA_INSTALL_ARGS'] = extra_install_args - # Set CUDA_RUN_FILE_LOCAL_PATH to empty if not set for backwards compatibility in download file - env['CUDA_RUN_FILE_LOCAL_PATH'] = env.get('CUDA_RUN_FILE_LOCAL_PATH','') + # Set CUDA_RUN_FILE_LOCAL_PATH to empty if not set for backwards + # compatibility in download file + env['CUDA_RUN_FILE_LOCAL_PATH'] = env.get('CUDA_RUN_FILE_LOCAL_PATH', '') - return {'return':0} + return {'return': 0} diff --git a/script/install-diffusers-from-src/customize.py b/script/install-diffusers-from-src/customize.py index 0e1ca24f5c..6d5e7f9db3 100644 --- a/script/install-diffusers-from-src/customize.py +++ b/script/install-diffusers-from-src/customize.py @@ -1,21 +1,22 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - automation = i['automation'] recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/install-gcc-src/customize.py b/script/install-gcc-src/customize.py index caff463edc..a368cdfb8d 100644 --- a/script/install-gcc-src/customize.py +++ b/script/install-gcc-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,15 +15,16 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) if 'CM_GIT_CHECKOUT' not in env: env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version env['CM_GCC_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') - return {'return':0} + return {'return': 0} diff --git a/script/install-generic-conda-package/customize.py b/script/install-generic-conda-package/customize.py index 2e6486a54d..5310923d66 100644 --- a/script/install-generic-conda-package/customize.py +++ b/script/install-generic-conda-package/customize.py @@ -2,6 +2,7 @@ import os import cmind as cm + def preprocess(i): os_info = i['os_info'] @@ -15,22 +16,22 @@ def preprocess(i): install_cmd = env['CM_CONDA_BIN_WITH_PATH'] + " install -y " if env.get('CM_CONDA_PKG_SRC', '') != '': - install_cmd += " -c "+env['CM_CONDA_PKG_SRC'] + " " + install_cmd += " -c " + env['CM_CONDA_PKG_SRC'] + " " install_cmd += package_name install_cmd += version_string env['CM_CONDA_PKG_INSTALL_CMD'] = install_cmd + return {'return': 0} - return {'return':0} def detect_version(i): # TBD - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) - return {'return':0, 'version':version} + return {'return': 0, 'version': version} def postprocess(i): @@ -39,6 +40,7 @@ def postprocess(i): version = env.get('CM_VERSION', '') if env['CM_CONDA_PKG_NAME'] == "python": - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']), "python") + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']), "python") - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/install-gflags-from-src/customize.py b/script/install-gflags-from-src/customize.py index 216af841e8..030b0b8b45 100644 --- a/script/install-gflags-from-src/customize.py +++ b/script/install-gflags-from-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,13 +15,14 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] env['CM_GFLAGS_BUILD_PATH'] = os.path.join(os.getcwd(), "gflags", "build") - env['CM_DEPENDENT_CACHED_PATH' ] = env['CM_GFLAGS_BUILD_PATH'] + env['CM_DEPENDENT_CACHED_PATH'] = env['CM_GFLAGS_BUILD_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/install-gflags/customize.py b/script/install-gflags/customize.py index 65872c79a0..d9caffb60c 100644 --- a/script/install-gflags/customize.py +++ b/script/install-gflags/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,15 +15,17 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + return {'return': 0} - return {'return':0} def postprocess(i): inp = i['input'] env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/install-github-cli/customize.py b/script/install-github-cli/customize.py index cd7d65a35b..5079c4d741 100644 --- a/script/install-github-cli/customize.py +++ b/script/install-github-cli/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -10,4 +11,4 @@ def preprocess(i): env['CM_TMP_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' - return {'return':0} + return {'return': 0} diff --git a/script/install-intel-neural-speed-from-src/customize.py b/script/install-intel-neural-speed-from-src/customize.py index c40b31af25..9ac64a4f8a 100644 --- a/script/install-intel-neural-speed-from-src/customize.py +++ b/script/install-intel-neural-speed-from-src/customize.py @@ -1,16 +1,18 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_BIN_PATH'], "python") + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_BIN_PATH'], "python") automation = i['automation'] @@ -18,4 +20,4 @@ def preprocess(i): env['+PATH'] = [] - return {'return':0} + return {'return': 0} diff --git a/script/install-ipex-from-src/customize.py b/script/install-ipex-from-src/customize.py index d146b70552..d257493046 100644 --- a/script/install-ipex-from-src/customize.py +++ b/script/install-ipex-from-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -15,19 +16,25 @@ def preprocess(i): if env.get('CM_USE_LLVM_FOR_IPEX', '') == 'yes': env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] - env['LLVM_DIR'] = os.path.join(env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + env['LLVM_DIR'] = os.path.join( + env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") - run_cmd="python setup.py clean && python setup.py install" + run_cmd = "python setup.py clean && python setup.py install" env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] env['CM_IPEX_BUILD_PATH'] = os.path.join(os.getcwd(), "ipex_src", "build") - env['CM_IPEX_INSTALLED_PATH'] = os.path.join(env['CM_IPEX_BUILD_PATH'], "Release", "packages", "intel_extension_for_pytorch") + env['CM_IPEX_INSTALLED_PATH'] = os.path.join( + env['CM_IPEX_BUILD_PATH'], + "Release", + "packages", + "intel_extension_for_pytorch") env['CM_DEPENDENT_CACHED_PATH'] = env['CM_IPEX_INSTALLED_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/install-llvm-prebuilt/customize.py b/script/install-llvm-prebuilt/customize.py index 17e9746925..23c116ee99 100644 --- a/script/install-llvm-prebuilt/customize.py +++ b/script/install-llvm-prebuilt/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -11,42 +12,49 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') clang_file_name = "clang" if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) host_os_bits = env['CM_HOST_OS_BITS'] if os_info['platform'] != 'windows': - host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI # Prepare package name # First check if it is forced by external environment - package_name = env.get('CM_LLVM_PACKAGE','').strip() + package_name = env.get('CM_LLVM_PACKAGE', '').strip() if package_name == '': need_version_split = need_version.split('.') - # If package_name if not forced, attempt to synthesize it based on OS and arch + # If package_name if not forced, attempt to synthesize it based on OS + # and arch if os_info['platform'] == 'darwin': - force_arch = env.get('CM_LLVM_PACKAGE_FORCE_ARCH','') # To allow x86_64 if needed - if force_arch == '': force_arch = 'arm64' - force_darwin_version = env.get('CM_LLVM_PACKAGE_FORCE_DARWIN_VERSION','') + force_arch = env.get( + 'CM_LLVM_PACKAGE_FORCE_ARCH', + '') # To allow x86_64 if needed + if force_arch == '': + force_arch = 'arm64' + force_darwin_version = env.get( + 'CM_LLVM_PACKAGE_FORCE_DARWIN_VERSION', '') if force_darwin_version == '': - if len(need_version_split)>0: + if len(need_version_split) > 0: hver = 0 try: hver = int(need_version_split[0]) - except: + except BaseException: pass - if hver>0 and hver<16: + if hver > 0 and hver < 16: force_darwin_version = '21.0' else: force_darwin_version = '22.0' - package_name = 'clang+llvm-' + need_version + '-'+force_arch+'-apple-darwin'+force_darwin_version+'.tar.xz' + package_name = 'clang+llvm-' + need_version + '-' + force_arch + \ + '-apple-darwin' + force_darwin_version + '.tar.xz' elif os_info['platform'] == 'windows': package_name = 'LLVM-' + need_version + '-win' + host_os_bits + '.exe' @@ -61,8 +69,9 @@ def preprocess(i): input('Press Enter to continue!') else: - if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): - if host_os_bits=='64': + if host_os_machine.startswith( + 'arm') or host_os_machine.startswith('aarch'): + if host_os_bits == '64': package_name = 'clang+llvm-' + need_version + '-aarch64-linux-gnu.tar.xz' else: package_name = 'clang+llvm-' + need_version + '-armv7a-linux-gnueabihf.tar.xz' @@ -80,18 +89,18 @@ def preprocess(i): if True: default_os = '22.04' - if len(need_version_split)>0: + if len(need_version_split) > 0: hver = 0 try: hver = int(need_version_split[0]) - except: + except BaseException: pass - if hver>0: - if hver<16: - default_os='18.04' + if hver > 0: + if hver < 16: + default_os = '18.04' else: - default_os='22.04' + default_os = '22.04' if need_version == '10.0.1': default_os = '16.04' @@ -111,7 +120,7 @@ def preprocess(i): elif need_version == '12.0.1': default_os = '16.04' - #if host_os_version.startswith('18') or host_os_version.startswith('20'): + # if host_os_version.startswith('18') or host_os_version.startswith('20'): # default_os = '18.04' elif need_version == '13.0.0': @@ -149,39 +158,44 @@ def preprocess(i): elif need_version == '17.0.6': default_os = '22.04' - package_name = 'clang+llvm-' + need_version + '-x86_64-linux-gnu-ubuntu-' + default_os + '.tar.xz' - + package_name = 'clang+llvm-' + need_version + \ + '-x86_64-linux-gnu-ubuntu-' + default_os + '.tar.xz' - package_url = 'https://github.com/llvm/llvm-project/releases/download/llvmorg-' + need_version + '/' + package_name + package_url = 'https://github.com/llvm/llvm-project/releases/download/llvmorg-' + \ + need_version + '/' + package_name - print (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + print(recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) - print ('') - print ('Downloading from {} ...'.format(package_url)) + print('') + print('Downloading from {} ...'.format(package_url)) cm = automation.cmind - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':package_url}) - if r['return']>0: return r + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': package_url}) + if r['return'] > 0: + return r - filename = r['filename'] # 'clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz' # f['filename'] + # 'clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz' # f['filename'] + filename = r['filename'] env['CM_LLVM_PACKAGE'] = filename env['CM_LLVM_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'bin') - env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join(os.getcwd(), 'bin', clang_file_name) + env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + os.getcwd(), 'bin', clang_file_name) env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] - # We don't need to check default paths here because we force install to cache + # We don't need to check default paths here because we force install to + # cache env['+PATH'] = [env['CM_LLVM_INSTALLED_PATH']] path_include = os.path.join(os.getcwd(), 'include') if os.path.isdir(path_include): - env['+C_INCLUDE_PATH'] = [ path_include ] + env['+C_INCLUDE_PATH'] = [path_include] + return {'return': 0} - return {'return':0} def postprocess(i): @@ -204,5 +218,4 @@ def postprocess(i): # if cur_dir_include not in env['+CPLUS_INCLUDE_PATH']: # env['+CPLUS_INCLUDE_PATH'].append(cur_dir_include) - - return {'return':0, 'version': version} + return {'return': 0, 'version': version} diff --git a/script/install-llvm-src/customize.py b/script/install-llvm-src/customize.py index b1bb23250f..5a359ac7e0 100644 --- a/script/install-llvm-src/customize.py +++ b/script/install-llvm-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -23,8 +24,8 @@ def preprocess(i): env['CM_REQUIRE_INSTALL'] = 'yes' i['run_script_input']['script_name'] = "install-llvm-16-intel-mlperf-inference" clang_file_name = "llvm-link" - #env['USE_LLVM'] = install_prefix - #env['LLVM_DIR'] = os.path.join(env['USE_LLVM'], "lib", "cmake", "llvm") + # env['USE_LLVM'] = install_prefix + # env['LLVM_DIR'] = os.path.join(env['USE_LLVM'], "lib", "cmake", "llvm") else: if env.get('CM_LLVM_ENABLE_RUNTIMES', '') != '': enable_runtimes = env['CM_LLVM_ENABLE_RUNTIMES'].replace(":", ";") @@ -38,19 +39,24 @@ def preprocess(i): llvm_build_type = env['CM_LLVM_BUILD_TYPE'] - cmake_cmd = "cmake " + os.path.join(env["CM_LLVM_SRC_REPO_PATH"], "llvm") + " -GNinja -DCMAKE_BUILD_TYPE="+llvm_build_type + " -DLLVM_ENABLE_PROJECTS="+ enable_projects+ " -DLLVM_ENABLE_RUNTIMES='"+enable_runtimes + "' -DCMAKE_INSTALL_PREFIX=" + install_prefix + " -DLLVM_ENABLE_RTTI=ON -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD=X86 " + extra_cmake_options + cmake_cmd = "cmake " + os.path.join(env["CM_LLVM_SRC_REPO_PATH"], "llvm") + " -GNinja -DCMAKE_BUILD_TYPE=" + llvm_build_type + " -DLLVM_ENABLE_PROJECTS=" + enable_projects + " -DLLVM_ENABLE_RUNTIMES='" + \ + enable_runtimes + "' -DCMAKE_INSTALL_PREFIX=" + install_prefix + \ + " -DLLVM_ENABLE_RTTI=ON -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD=X86 " + \ + extra_cmake_options env['CM_LLVM_CMAKE_CMD'] = cmake_cmd - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') - #print(cmake_cmd) + # print(cmake_cmd) env['CM_LLVM_INSTALLED_PATH'] = install_prefix - env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin", clang_file_name) + env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + env['CM_LLVM_INSTALLED_PATH'], "bin", clang_file_name) + + # env['+PATH'] = [] + return {'return': 0} - #env['+PATH'] = [] - return {'return':0} def postprocess(i): @@ -59,11 +65,12 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] if env.get('CM_LLVM_CONDA_ENV', '') != "yes": - # We don't need to check default paths here because we force install to cache - env['+PATH'] = [ os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin") ] + # We don't need to check default paths here because we force install to + # cache + env['+PATH'] = [os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin")] path_include = os.path.join(env['CM_LLVM_INSTALLED_PATH'], 'include') if os.path.isdir(path_include): - env['+C_INCLUDE_PATH'] = [ path_include ] + env['+C_INCLUDE_PATH'] = [path_include] - return {'return':0} + return {'return': 0} diff --git a/script/install-mlperf-logging-from-src/customize.py b/script/install-mlperf-logging-from-src/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/install-mlperf-logging-from-src/customize.py +++ b/script/install-mlperf-logging-from-src/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/install-nccl-libs/customize.py b/script/install-nccl-libs/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/install-nccl-libs/customize.py +++ b/script/install-nccl-libs/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/install-numactl-from-src/customize.py b/script/install-numactl-from-src/customize.py index 5c1ee2674f..5605e99b00 100644 --- a/script/install-numactl-from-src/customize.py +++ b/script/install-numactl-from-src/customize.py @@ -1,16 +1,17 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - run_cmd="python setup.py install" + run_cmd = "python setup.py install" env['CM_RUN_CMD'] = run_cmd @@ -20,4 +21,4 @@ def preprocess(i): env['+PATH'] = [] - return {'return':0} + return {'return': 0} diff --git a/script/install-onednn-from-src/customize.py b/script/install-onednn-from-src/customize.py index 95d18ff02e..facd107df1 100644 --- a/script/install-onednn-from-src/customize.py +++ b/script/install-onednn-from-src/customize.py @@ -1,16 +1,17 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - run_cmd="" + run_cmd = "" env['CM_RUN_CMD'] = run_cmd env['CM_ONEDNN_INSTALLED_PATH'] = os.path.join(os.getcwd(), "onednn") @@ -22,10 +23,11 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/install-onnxruntime-from-src/customize.py b/script/install-onnxruntime-from-src/customize.py index be854b226b..0f11967a0e 100644 --- a/script/install-onnxruntime-from-src/customize.py +++ b/script/install-onnxruntime-from-src/customize.py @@ -1,16 +1,17 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - run_cmd="./build.sh --config RelWithDebInfo --build_wheel --parallel --allow_running_as_root --skip_tests " + run_cmd = "./build.sh --config RelWithDebInfo --build_wheel --parallel --allow_running_as_root --skip_tests " if env.get('CM_ONNXRUNTIME_GPU', '') == "yes": cuda_home = env['CUDA_HOME'] @@ -19,4 +20,4 @@ def preprocess(i): env['CM_RUN_DIR'] = env['CM_ONNXRUNTIME_SRC_REPO_PATH'] env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} diff --git a/script/install-opencv-from-src/customize.py b/script/install-opencv-from-src/customize.py index 0e5521f943..bc4ebf61fc 100644 --- a/script/install-opencv-from-src/customize.py +++ b/script/install-opencv-from-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,13 +15,14 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] env['CM_OPENCV_BUILD_PATH'] = os.path.join(os.getcwd(), "opencv", "build") - env['CM_DEPENDENT_CACHED_PATH' ] = env['CM_OPENCV_BUILD_PATH'] + env['CM_DEPENDENT_CACHED_PATH'] = env['CM_OPENCV_BUILD_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/install-openssl/customize.py b/script/install-openssl/customize.py index e6163a0f5e..23ece8afb5 100644 --- a/script/install-openssl/customize.py +++ b/script/install-openssl/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,20 +15,27 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + return {'return': 0} - return {'return':0} def postprocess(i): inp = i['input'] env = i['env'] tags = inp['tags'] tag_list = tags.split(",") - install_path = os.path.join(os.getcwd(), 'openssl-'+env['CM_VERSION']+'g', 'install') + install_path = os.path.join( + os.getcwd(), + 'openssl-' + + env['CM_VERSION'] + + 'g', + 'install') path_lib = os.path.join(install_path, 'lib') if '+LD_LIBRARY_PATH' not in env: env['+LD_LIBRARY_PATH'] = [] @@ -36,4 +44,4 @@ def postprocess(i): path_bin = os.path.join(install_path, 'bin') env['CM_OPENSSL_INSTALLED_PATH'] = path_bin env['CM_OPENSSL_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) - return {'return':0} + return {'return': 0} diff --git a/script/install-pip-package-for-cmind-python/customize.py b/script/install-pip-package-for-cmind-python/customize.py index 97c88f8402..05960de84e 100644 --- a/script/install-pip-package-for-cmind-python/customize.py +++ b/script/install-pip-package-for-cmind-python/customize.py @@ -3,9 +3,11 @@ import subprocess import sys + def install(package): additional_install_options = [] - r = subprocess.run([sys.executable, "-m", "pip", "--version"], check=True, capture_output=True) + r = subprocess.run([sys.executable, "-m", "pip", + "--version"], check=True, capture_output=True) r = r.stdout.decode("utf-8") if "pip" in r: out_split = r.split(" ") @@ -18,7 +20,8 @@ def install(package): run_cmd += additional_install_options r = subprocess.run(run_cmd, check=True) - return {'return':0} + return {'return': 0} + def preprocess(i): @@ -29,10 +32,11 @@ def preprocess(i): if r['return'] > 0: return r - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/install-python-src/customize.py b/script/install-python-src/customize.py index a7025a6cf2..25edf22f49 100644 --- a/script/install-python-src/customize.py +++ b/script/install-python-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,17 +15,19 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - need_version = env.get('CM_VERSION','') + need_version = env.get('CM_VERSION', '') if need_version == '': - return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) path_bin = os.path.join(os.getcwd(), 'install', 'bin') env['CM_PYTHON_INSTALLED_PATH'] = path_bin - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -32,15 +35,17 @@ def postprocess(i): variation_tags = i['variation_tags'] path_lib = os.path.join(os.getcwd(), 'install', 'lib') - env['+LD_LIBRARY_PATH'] = [ path_lib ] + env['+LD_LIBRARY_PATH'] = [path_lib] - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_PYTHON_INSTALLED_PATH'], 'python3') + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_PYTHON_INSTALLED_PATH'], 'python3') - # We don't need to check default paths here because we force install to cache + # We don't need to check default paths here because we force install to + # cache env['+PATH'] = [env['CM_PYTHON_INSTALLED_PATH']] path_include = os.path.join(os.getcwd(), 'install', 'include') - env['+C_INCLUDE_PATH'] = [ path_include ] + env['+C_INCLUDE_PATH'] = [path_include] - return {'return':0} + return {'return': 0} diff --git a/script/install-python-venv/customize.py b/script/install-python-venv/customize.py index e6b5993ed3..a9a411524d 100644 --- a/script/install-python-venv/customize.py +++ b/script/install-python-venv/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -16,16 +17,18 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] # Add extra tags to python - add_extra_cache_tags = [] # for this script - add_python_extra_cache_tags = ['virtual'] # for get-python script + add_extra_cache_tags = [] # for this script + add_python_extra_cache_tags = ['virtual'] # for get-python script - name = env.get('CM_NAME','') + name = env.get('CM_NAME', '') if not quiet and name == '': - print ('') - x = input('Enter some tag to describe this virtual env (mlperf-inf,octoml-bench,etc): ') + print('') + x = input( + 'Enter some tag to describe this virtual env (mlperf-inf,octoml-bench,etc): ') x = x.strip() - if x != '': name = x + if x != '': + name = x directory_name = 'venv' if name != '': @@ -39,20 +42,22 @@ def preprocess(i): env['CM_VIRTUAL_ENV_PATH'] = os.path.join(os.getcwd(), directory_name) s = 'Scripts' if os_info['platform'] == 'windows' else 'bin' - env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] = os.path.join(env['CM_VIRTUAL_ENV_PATH'], s) + env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] = os.path.join( + env['CM_VIRTUAL_ENV_PATH'], s) env['CM_TMP_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' - - r = automation.update_deps({'deps':meta['post_deps'], - 'update_deps':{'register-python': - {'extra_cache_tags':','.join(add_python_extra_cache_tags)}}}) - if r['return']>0: return r + r = automation.update_deps({'deps': meta['post_deps'], + 'update_deps': {'register-python': + {'extra_cache_tags': ','.join(add_python_extra_cache_tags)}}}) + if r['return'] > 0: + return r env['CM_PYTHON_INSTALLED_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] - return {'return':0, 'add_extra_cache_tags':add_extra_cache_tags} + return {'return': 0, 'add_extra_cache_tags': add_extra_cache_tags} + def postprocess(i): @@ -62,15 +67,16 @@ def postprocess(i): state = i['state'] - script_prefix = state.get('script_prefix',[]) + script_prefix = state.get('script_prefix', []) - path_to_activate = os.path.join(env['CM_VIRTUAL_ENV_SCRIPTS_PATH'], 'activate') + path_to_activate = os.path.join( + env['CM_VIRTUAL_ENV_SCRIPTS_PATH'], 'activate') # If windows, download here otherwise use run.sh if os_info['platform'] == 'windows': path_to_activate += '.bat' - s = os_info['run_bat'].replace('${bat_file}', '"'+path_to_activate+'"') + s = os_info['run_bat'].replace('${bat_file}', '"' + path_to_activate + '"') script_prefix.append(s) state['script_prefix'] = script_prefix @@ -78,6 +84,7 @@ def postprocess(i): python_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' # Will be passed to get-python to finalize registering of the new python - env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_PYTHON_INSTALLED_PATH'], python_name) + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_PYTHON_INSTALLED_PATH'], python_name) - return {'return':0} + return {'return': 0} diff --git a/script/install-pytorch-from-src/customize.py b/script/install-pytorch-from-src/customize.py index 67f2673d33..d44872d7a8 100644 --- a/script/install-pytorch-from-src/customize.py +++ b/script/install-pytorch-from-src/customize.py @@ -1,23 +1,24 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] if env.get('CM_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL', '') == "yes": i['run_script_input']['script_name'] = "run-intel-mlperf-inference-v3_1" - run_cmd="CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " + run_cmd = "CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " env['CM_RUN_CMD'] = run_cmd - elif env.get('CM_MLPERF_INFERENCE_INTEL_MODEL', '') in [ "resnet50", "retinanet" ]: + elif env.get('CM_MLPERF_INFERENCE_INTEL_MODEL', '') in ["resnet50", "retinanet"]: i['run_script_input']['script_name'] = "run-intel-mlperf-inference-vision" - run_cmd=f"CC={env['CM_C_COMPILER_WITH_PATH']} CXX={env['CM_CXX_COMPILER_WITH_PATH']} USE_CUDA=OFF python -m pip install -e . " + run_cmd = f"CC={env['CM_C_COMPILER_WITH_PATH']} CXX={env['CM_CXX_COMPILER_WITH_PATH']} USE_CUDA=OFF python -m pip install -e . " env['CM_RUN_CMD'] = run_cmd @@ -26,10 +27,12 @@ def preprocess(i): if not env.get('+ CXXFLAGS', []): env['+ CXXFLAGS'] = [] - env['+ CFLAGS'] += [ "-Wno-error=uninitialized", "-Wno-error=maybe-uninitialized", "-fno-strict-aliasing" ] - env['+ CXXFLAGS'] += [ "-Wno-error=uninitialized", "-Wno-error=maybe-uninitialized", "-fno-strict-aliasing" ] + env['+ CFLAGS'] += ["-Wno-error=uninitialized", + "-Wno-error=maybe-uninitialized", "-fno-strict-aliasing"] + env['+ CXXFLAGS'] += ["-Wno-error=uninitialized", + "-Wno-error=maybe-uninitialized", "-fno-strict-aliasing"] automation = i['automation'] recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} diff --git a/script/install-pytorch-kineto-from-src/customize.py b/script/install-pytorch-kineto-from-src/customize.py index df2744ac4d..032a4f3bf2 100644 --- a/script/install-pytorch-kineto-from-src/customize.py +++ b/script/install-pytorch-kineto-from-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,4 +15,4 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} diff --git a/script/install-qaic-compute-sdk-from-src/customize.py b/script/install-qaic-compute-sdk-from-src/customize.py index 12deb42c17..fec123a6be 100644 --- a/script/install-qaic-compute-sdk-from-src/customize.py +++ b/script/install-qaic-compute-sdk-from-src/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -24,20 +25,26 @@ def preprocess(i): ''' quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - #env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + # env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") if '+PATH' not in env: env['+PATH'] = [] - env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join(os.getcwd(), "src", "install", "qaic-compute-"+env['CM_QAIC_COMPUTE_SDK_INSTALL_MODE']) + env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join( + os.getcwd(), + "src", + "install", + "qaic-compute-" + + env['CM_QAIC_COMPUTE_SDK_INSTALL_MODE']) env['QAIC_COMPUTE_INSTALL_DIR'] = env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] env['+PATH'].append(os.path.join(env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'], "exec")) - return {'return':0} + return {'return': 0} diff --git a/script/install-rapidjson-from-src/customize.py b/script/install-rapidjson-from-src/customize.py index 895731add7..6d5e7f9db3 100644 --- a/script/install-rapidjson-from-src/customize.py +++ b/script/install-rapidjson-from-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,7 +15,8 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/install-rocm/customize.py b/script/install-rocm/customize.py index 3ae7daafb6..2762f81389 100644 --- a/script/install-rocm/customize.py +++ b/script/install-rocm/customize.py @@ -1,12 +1,14 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} - return {'return':0} def postprocess(i): @@ -14,6 +16,6 @@ def postprocess(i): installed_path = "/opt/rocm/bin" env['CM_ROCM_INSTALLED_PATH'] = installed_path env['CM_ROCM_BIN_WITH_PATH'] = os.path.join(installed_path, "rocminfo") - env['+PATH'] = [ installed_path ] + env['+PATH'] = [installed_path] - return {'return':0} + return {'return': 0} diff --git a/script/install-tensorflow-for-c/customize.py b/script/install-tensorflow-for-c/customize.py index a0a8a42148..d950482c4e 100644 --- a/script/install-tensorflow-for-c/customize.py +++ b/script/install-tensorflow-for-c/customize.py @@ -1,27 +1,32 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: -# 20221024: we save and restore env in the main script and can clean env here for determinism -# if key not in env: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: env[key] = [] - env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) - env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'install', 'include')) + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) lib_path = os.path.join(os.getcwd(), 'install', 'lib') env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0} + return {'return': 0} diff --git a/script/install-tensorflow-from-src/customize.py b/script/install-tensorflow-from-src/customize.py index 3ddee9a900..f591d55227 100644 --- a/script/install-tensorflow-from-src/customize.py +++ b/script/install-tensorflow-from-src/customize.py @@ -1,34 +1,61 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] env['CC'] = env['CM_C_COMPILER_WITH_PATH'] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: -# 20221024: we save and restore env in the main script and can clean env here for determinism -# if key not in env: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: env[key] = [] bazel_install_root = os.path.join(os.getcwd(), "src", "bazel-out") bazel_install_bin = os.path.join(os.getcwd(), "src", "bazel-bin") inc_paths = [] inc_paths.append(os.path.join(os.getcwd(), "src")) inc_paths.append(bazel_install_bin) - inc_paths.append(os.path.join(bazel_install_bin, "external", "flatbuffers", "_virtual_includes", "flatbuffers")) - inc_paths.append(os.path.join(bazel_install_bin, "external", "FP16", "_virtual_includes", "FP16")) - inc_paths.append(os.path.join(bazel_install_bin, "external", "pthreadpool", "_virtual_includes", "pthreadpool")) - inc_paths.append(os.path.join(bazel_install_bin, "external", "cpuinfo", "_virtual_includes", "cpuinfo")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "flatbuffers", + "_virtual_includes", + "flatbuffers")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "FP16", + "_virtual_includes", + "FP16")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "pthreadpool", + "_virtual_includes", + "pthreadpool")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "cpuinfo", + "_virtual_includes", + "cpuinfo")) env['+C_INCLUDE_PATH'] = inc_paths env['+CPLUS_INCLUDE_PATH'] = inc_paths @@ -41,4 +68,4 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0} + return {'return': 0} diff --git a/script/install-terraform-from-src/customize.py b/script/install-terraform-from-src/customize.py index 84fccf236e..fb46c1192f 100644 --- a/script/install-terraform-from-src/customize.py +++ b/script/install-terraform-from-src/customize.py @@ -1,19 +1,22 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] installed_path = os.path.join(os.getcwd(), 'bin') env['CM_TERRAFORM_INSTALLED_PATH'] = installed_path - env['CM_TERRAFORM_BIN_WITH_PATH'] = os.path.join(installed_path, "terraform") - env['+PATH'] = [ installed_path ] + env['CM_TERRAFORM_BIN_WITH_PATH'] = os.path.join( + installed_path, "terraform") + env['+PATH'] = [installed_path] - return {'return':0} + return {'return': 0} diff --git a/script/install-tflite-from-src/customize.py b/script/install-tflite-from-src/customize.py index ab0055816d..b230459384 100644 --- a/script/install-tflite-from-src/customize.py +++ b/script/install-tflite-from-src/customize.py @@ -1,20 +1,23 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: -# 20221024: we save and restore env in the main script and can clean env here for determinism -# if key not in env: + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: env[key] = [] env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'src')) @@ -24,4 +27,4 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - return {'return':0} + return {'return': 0} diff --git a/script/install-torchvision-from-src/customize.py b/script/install-torchvision-from-src/customize.py index 895731add7..6d5e7f9db3 100644 --- a/script/install-torchvision-from-src/customize.py +++ b/script/install-torchvision-from-src/customize.py @@ -1,12 +1,13 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -14,7 +15,8 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/install-tpp-pytorch-extension/customize.py b/script/install-tpp-pytorch-extension/customize.py index 35ee0e05a6..6ee676b687 100644 --- a/script/install-tpp-pytorch-extension/customize.py +++ b/script/install-tpp-pytorch-extension/customize.py @@ -1,23 +1,25 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] env['TPP_PEX_DIR'] = env['CM_TPP_PEX_SRC_REPO_PATH'] env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] - env['LLVM_DIR'] = os.path.join(env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + env['LLVM_DIR'] = os.path.join( + env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") - run_cmd="python setup.py clean && python setup.py install" + run_cmd = "python setup.py clean && python setup.py install" env['CM_RUN_DIR'] = env['TPP_PEX_DIR'] env['CM_RUN_CMD'] = run_cmd - return {'return':0} + return {'return': 0} diff --git a/script/install-transformers-from-src/customize.py b/script/install-transformers-from-src/customize.py index 29f69cfc69..c287139fe5 100644 --- a/script/install-transformers-from-src/customize.py +++ b/script/install-transformers-from-src/customize.py @@ -1,16 +1,17 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] - run_cmd="python setup.py install" + run_cmd = "python setup.py install" env['CM_RUN_CMD'] = run_cmd @@ -18,4 +19,4 @@ def preprocess(i): recursion_spaces = i['recursion_spaces'] - return {'return':0} + return {'return': 0} diff --git a/script/launch-benchmark/customize.py b/script/launch-benchmark/customize.py index 285f2889ea..86e7297156 100644 --- a/script/launch-benchmark/customize.py +++ b/script/launch-benchmark/customize.py @@ -2,10 +2,12 @@ import os import copy -base_path={} -base_path_meta={} +base_path = {} +base_path_meta = {} + +########################################################################## + -################################################################################## def preprocess(i): os_info = i['os_info'] @@ -18,41 +20,44 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + +########################################################################## + -################################################################################## def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} -################################################################################## +########################################################################## def load_cfg(i): - tags = i.get('tags','') - artifact = i.get('artifact','') + tags = i.get('tags', '') + artifact = i.get('artifact', '') - key = i.get('key','') + key = i.get('key', '') - ii={'action':'find', - 'automation':'cfg'} - if artifact!='': - ii['artifact']=artifact - elif tags!='': - ii['tags']=tags + ii = {'action': 'find', + 'automation': 'cfg'} + if artifact != '': + ii['artifact'] = artifact + elif tags != '': + ii['tags'] = tags - r=cmind.access(ii) - if r['return']>0: return r + r = cmind.access(ii) + if r['return'] > 0: + return r lst = r['list'] - prune = i.get('prune',{}) + prune = i.get('prune', {}) prune_key = prune.get('key', '') prune_key_uid = prune.get('key_uid', '') prune_uid = prune.get('uid', '') - prune_list = prune.get('list',[]) + prune_list = prune.get('list', []) # Checking individual files inside CM entry selection = [] @@ -62,11 +67,11 @@ def load_cfg(i): meta = l.meta full_path = l.path - meta['full_path']=full_path + meta['full_path'] = full_path add = True - if prune_key!='' and prune_key_uid!='': + if prune_key != '' and prune_key_uid != '': if prune_key_uid not in meta.get(prune_key, []): add = False @@ -77,15 +82,16 @@ def load_cfg(i): path = l.path main_meta = l.meta - all_tags = main_meta.get('tags',[]) + all_tags = main_meta.get('tags', []) files = os.listdir(path) for f in files: - if key!='' and not f.startswith(key): + if key != '' and not f.startswith(key): continue - if f.startswith('_') or (not f.endswith('.json') and not f.endswith('.yaml')): + if f.startswith('_') or (not f.endswith( + '.json') and not f.endswith('.yaml')): continue full_path = os.path.join(path, f) @@ -93,14 +99,15 @@ def load_cfg(i): full_path_without_ext = full_path[:-5] r = cmind.utils.load_yaml_and_json(full_path_without_ext) - if r['return']>0: - print ('Warning: problem loading file {}'.format(full_path)) + if r['return'] > 0: + print('Warning: problem loading file {}'.format(full_path)) else: meta = r['meta'] # Check base r = process_base(meta, full_path) - if r['return']>0: return r + if r['return'] > 0: + return r meta = r['meta'] uid = meta['uid'] @@ -108,43 +115,48 @@ def load_cfg(i): # Check pruning add = True - if len(prune)>0: - if prune_uid!='' and uid != prune_uid: + if len(prune) > 0: + if prune_uid != '' and uid != prune_uid: add = False - if add and len(prune_list)>0 and uid not in prune_list: + if add and len( + prune_list) > 0 and uid not in prune_list: add = False - if add and prune_key!='' and prune_key_uid!='' and prune_key_uid != meta.get(prune_key, None): + if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get( + prune_key, None): add = False if add: - meta['full_path']=full_path + meta['full_path'] = full_path add_all_tags = copy.deepcopy(all_tags) - name = meta.get('name','') - if name=='': - name = ' '.join(meta.get('tags',[])) + name = meta.get('name', '') + if name == '': + name = ' '.join(meta.get('tags', [])) name = name.strip() meta['name'] = name file_tags = meta.get('tags', '').strip() - if file_tags=='': - if name!='': - add_all_tags += [v.lower() for v in name.split(' ')] + if file_tags == '': + if name != '': + add_all_tags += [v.lower() + for v in name.split(' ')] else: add_all_tags += file_tags.split(',') - meta['all_tags']=add_all_tags + meta['all_tags'] = add_all_tags - meta['main_meta']=main_meta + meta['main_meta'] = main_meta selection.append(meta) - return {'return':0, 'lst':lst, 'selection':selection} + return {'return': 0, 'lst': lst, 'selection': selection} + +########################################################################## + -################################################################################## def process_base(meta, full_path): global base_path, base_path_meta @@ -157,7 +169,8 @@ def process_base(meta, full_path): full_path_base = os.path.dirname(full_path) if not filename.endswith('.yaml') and not filename.endswith('.json'): - return {'return':1, 'error':'_base file {} in {} must be .yaml or .json'.format(filename, full_path)} + return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format( + filename, full_path)} if ':' in _base: x = _base.split(':') @@ -167,16 +180,18 @@ def process_base(meta, full_path): if full_path_base == '': # Find artifact - r = cmind.access({'action':'find', - 'automation':'cfg', - 'artifact':name}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'cfg', + 'artifact': name}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: + if len(lst) == 0: if not os.path.isfile(path): - return {'return':1, 'error':'_base artifact {} not found in {}'.format(name, full_path)} + return {'return': 1, 'error': '_base artifact {} not found in {}'.format( + name, full_path)} full_path_base = lst[0].path @@ -188,7 +203,8 @@ def process_base(meta, full_path): path = os.path.join(full_path_base, filename) if not os.path.isfile(path): - return {'return':1, 'error':'_base file {} not found in {}'.format(filename, full_path)} + return {'return': 1, 'error': '_base file {} not found in {}'.format( + filename, full_path)} if path in base_path_meta: base = copy.deepcopy(base_path_meta[path]) @@ -196,44 +212,42 @@ def process_base(meta, full_path): path_without_ext = path[:-5] r = cmind.utils.load_yaml_and_json(path_without_ext) - if r['return']>0: return r + if r['return'] > 0: + return r base = r['meta'] - base_path_meta[path]=copy.deepcopy(base) + base_path_meta[path] = copy.deepcopy(base) for k in meta: v = meta[k] if k not in base: - base[k]=v + base[k] = v else: if isinstance(v, str): # Only merge a few special keys and overwrite the rest - if k in ['tags','name']: + if k in ['tags', 'name']: base[k] += meta[k] else: base[k] = meta[k] - elif type(v) == list: - for vv in v: + +elif isinstance(v, elif) for vv in v: base[k].append(vv) - elif type(v) == dict: - base[k].merge(v) +elif isinstance(v, elif ) base[k].merge(v) meta = base - return {'return':0, 'meta':meta} - + return {'return': 0, 'meta':meta} - -################################################################################## +########################################################################## def get_with_complex_key(meta, key): j = key.find('.') - if j<0: + if j <0: return meta.get(key) key0 = key[:j] @@ -241,17 +255,20 @@ def get_with_complex_key(meta, key): if key0 not in meta: return None - return get_with_complex_key(meta[key0], key[j+1:]) + return get_with_complex_key(meta[key0], key[j +1:]) + +########################################################################## -################################################################################## def get_with_complex_key_safe(meta, key): v = get_with_complex_key(meta, key) - if v == None: v='' + if v == None: + v='' return v -################################################################################## +########################################################################## + def prepare_table(i): import pandas as pd @@ -298,24 +315,19 @@ def prepare_table(i): # If dimensions, sort by dimensions for d in list(reversed(dimension_keys)): - selection = sorted(selection, key = lambda x: get_with_complex_key_safe(selection, d)) - + selection = sorted(selection, key= lambda x: get_with_complex_key_safe(selection, d)) keys += [ - ('functional', 'Functional', 80, ''), + ('functional', 'Functional', 80, ''), ('reproduced', 'Reproduced', 80, ''), ('notes', 'Notes', 200, 'lefAligned'), - ] + ] j = 0 - badges_url={'functional':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png', - 'reproduced':'https://cTuning.org/images/results_reproduced_v1_1_small.png'} - - - - + badges_url ={'functional':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png', + 'reproduced': 'https://cTuning.org/images/results_reproduced_v1_1_small.png'} for s in selection: @@ -327,12 +339,13 @@ def prepare_table(i): url = misc.make_url(uid, key='uid', action='howtorun', md=False) - name = s.get('name','') - if name == '': name = uid - + name = s.get('name', '') + if name == '': + name = uid if len(dimensions) == 0: - row['test'] = '{}'.format(url, name) + row['test'] = '{}'.format( + url, name) else: row['test'] = 'View'.format(url) for k in dimensions: @@ -343,50 +356,45 @@ def prepare_table(i): row[kk] = str(v) - - # Check ACM/IEEE functional badge x = '' if s.get('functional', False): - x = '
'.format(url, badges_url['functional']) + x = '
'.format( + url, badges_url['functional']) row['functional'] = x # Check ACM/IEEE reproduced badge x = '' if s.get('reproduced', False): - x = '
'.format(url, badges_url['reproduced']) + x = '
'.format( + url, badges_url['reproduced']) row['reproduced'] = x # Check misc notes - row['notes']=s.get('notes','') + row['notes'] = s.get('notes','') # Finish row all_data.append(row) - # Visualize table pd_keys = [v[0] for v in keys] pd_key_names = [v[1] for v in keys] pd_all_data = [] - for row in sorted(all_data, key=lambda row: (row.get('x1',0))): - pd_row=[] + for row in sorted(all_data, key=lambda row: (row.get('x1', 0))): + pd_row = [] for k in pd_keys: pd_row.append(row.get(k)) pd_all_data.append(pd_row) - df = pd.DataFrame(pd_all_data, columns = pd_key_names) - - df.index+=1 - - return {'return':0, 'df':df} + df = pd.DataFrame(pd_all_data, columns= pd_key_names) + df.index += 1 + return {'return': 0, 'df':df} - - -################################################################################## +########################################################################## def gui(i): params = i['params'] @@ -407,60 +415,59 @@ def gui(i): st.markdown('### {}'.format(title)) - - - # Check if test uid is specified uid = '' - x = params.get('uid',['']) - if len(x)>0 and x[0]!='': uid = x[0].strip() + x = params.get('uid', ['']) + if len(x)>0 and x[0]!='': + uid = x[0].strip() bench_uid = '' - x = params.get('bench_uid',['']) - if len(x)>0 and x[0]!='': bench_uid = x[0].strip() + x = params.get('bench_uid', ['']) + if len(x)>0 and x[0]!='': + bench_uid = x[0].strip() compute_uid = '' - x = params.get('compute_uid',['']) - if len(x)>0 and x[0]!='': compute_uid = x[0].strip() - - + x = params.get('compute_uid', ['']) + if len(x)>0 and x[0]!='': + compute_uid = x[0].strip() ############################################################## # Check the first level of benchmarks - ii = {'tags':'benchmark,run', 'skip_files':True, 'prune':{}} + ii = {'tags': 'benchmark,run', 'skip_files':True, 'prune':{}} if uid != '': ii['skip_files'] = False - ii['prune']['uid']=uid - if bench_uid !='': - ii['artifact']=bench_uid - if compute_uid !='': - ii['prune']['key']='supported_compute' - ii['prune']['key_uid']=compute_uid + ii['prune']['uid'] = uid + if bench_uid != '': + ii['artifact'] = bench_uid + if compute_uid != '': + ii['prune']['key'] = 'supported_compute' + ii['prune']['key_uid'] = compute_uid - r=load_cfg(ii) - if r['return']>0: return r + r = load_cfg(ii) + if r['return']>0: + return r lst = r['selection'] - if len(lst)==0: + if len(lst) ==0: st.markdown('Warning: no benchmarks found!') - return {'return':0} + return {'return': 0} test_meta = {} bench_id = 0 - - ########################################################################################################### + ########################################################################## if uid != '': - if len(lst)==0: + if len(lst) ==0: st.markdown('CM test with UID "{}" not found!'.format(uid)) - return {'return':0} - elif len(lst)>1: - st.markdown('Warning: More than 1 CM test found with UID "{}" - ambiguity!'.format(uid)) - return {'return':0} + return {'return': 0} + elif len(lst) >1: + st.markdown( + 'Warning: More than 1 CM test found with UID "{}" - ambiguity!'.format(uid)) + return {'return': 0} test_meta = lst[0] @@ -468,12 +475,11 @@ def gui(i): compute_uid = test_meta['compute_uid'] bench_supported_compute = [compute_uid] - if uid == '': - selection = sorted(lst, key = lambda v: v['name']) - bench_selection = [{'name':''}] + selection + selection = sorted(lst, key= lambda v: v['name']) + bench_selection = [{'name': ''}] + selection - if bench_uid !='': + if bench_uid != '': bench_id_index = 1 else: # Check if want to force some benchmark by default @@ -481,131 +487,133 @@ def gui(i): bench_id_index = 0 - j=0 + j = 0 for b in bench_selection: - if b.get('uid','')=='27c06c35bceb4059': - bench_id_index=j + if b.get('uid','') =='27c06c35bceb4059': + bench_id_index = j break - j+=1 - + j += 1 bench_id = st.selectbox('Select benchmark:', - range(len(bench_selection)), - format_func=lambda x: bench_selection[x]['name'], - index = bench_id_index, - key = 'bench') - + range(len(bench_selection)), + format_func=lambda x: bench_selection[x]['name'], + index = bench_id_index, + key = 'bench') bench_supported_compute = [] bench_meta = {} - if bench_id>0: + if bench_id >0: bench_meta = bench_selection[bench_id] - bench_supported_compute = bench_meta.get('supported_compute',[]) + bench_supported_compute = bench_meta.get('supported_compute', []) - urls = bench_meta.get('urls',[]) - if len(urls)>0: + urls = bench_meta.get('urls', []) + if len(urls) >0: x = '\n' for u in urls: name = u['name'] url = u['url'] - x+=' [ [{}]({}) ] '.format(name, url) - x+='\n' + x += ' [ [{}]({}) ] '.format(name, url) + x += '\n' st.markdown(x) - ########################################################################################################### - if True==True: + ########################################################################## + if True ==True: ############################################################## # Check compute - ii = {'tags':'benchmark,compute'} - if bench_id>0: - if compute_uid !='': + ii = {'tags': 'benchmark,compute'} + if bench_id >0: + if compute_uid != '': x = [compute_uid] else: x = bench_supported_compute if len(x) == 0: st.markdown('Warning: no supported compute selected!') - return {'return':0} + return {'return': 0} - ii['prune']={'list':x} + ii['prune'] = {'list':x} - r=load_cfg(ii) - if r['return']>0: return r + r = load_cfg(ii) + if r['return']>0: + return r - selection = sorted(r['selection'], key = lambda v: v['name']) + selection = sorted(r['selection'], key= lambda v: v['name']) - if len(selection) == 0 : + if len(selection) == 0: st.markdown('Warning: no supported compute found!') - return {'return':0} + return {'return': 0} - compute_selection = [{'name':''}] - if len(selection)>0: + compute_selection = [{'name': ''}] + if len(selection) >0: compute_selection += selection compute_id_index = 0 if compute_uid == '' else 1 if uid == '': compute_id = st.selectbox('Select target hardware to benchmark:', - range(len(compute_selection)), - format_func=lambda x: compute_selection[x]['name'], - index = compute_id_index, - key = 'compute') + range(len(compute_selection)), + format_func=lambda x: compute_selection[x]['name'], + index = compute_id_index, + key = 'compute') compute = {} - if compute_id>0: + if compute_id >0: compute = compute_selection[compute_id] compute_uid = compute['uid'] compute_meta = {} for c in compute_selection: - if c.get('uid','')!='': - compute_meta[c['uid']]=c + if c.get('uid','') !='': + compute_meta[c['uid']] = c - ########################################################################################################### + ########################################################################## if uid == '': ############################################################## # Check tests - ii = {'tags':'benchmark,run'} + ii = {'tags': 'benchmark,run'} - if bench_id>0: + if bench_id >0: bench_uid = bench_selection[bench_id]['uid'] - ii['artifact']=bench_uid - if compute_uid!='': - ii['prune']={'key':'compute_uid', 'key_uid':compute_uid} + ii['artifact'] = bench_uid + if compute_uid !='': + ii['prune'] = {'key':'compute_uid', 'key_uid':compute_uid} - r=load_cfg(ii) - if r['return']>0: return r + r = load_cfg(ii) + if r['return']>0: + return r - selection = sorted(r['selection'], key = lambda v: v['name']) + selection = sorted(r['selection'], key= lambda v: v['name']) # Check how many and prune if len(selection) == 0: st.markdown('No CM tests found') - return {'return':0} + return {'return': 0} for s in selection: - c_uid = s.get('compute_uid','') - if c_uid!='': - c_tags = compute_meta[c_uid].get('tags','') - if c_tags!='': - s['all_tags']+=c_tags.split(',') + c_uid = s.get('compute_uid', '') + if c_uid !='': + c_tags = compute_meta[c_uid].get('tags', '') + if c_tags !='': + s['all_tags'] += c_tags.split(',') - s['compute_meta']=compute_meta[c_uid] + s['compute_meta'] = compute_meta[c_uid] - if len(selection)>1: + if len(selection) >1: # Update selection with compute tags test_tags = '' - x = params.get('tags',['']) - if len(x)>0 and x[0]!='': test_tags = x[0].strip() + x = params.get('tags', ['']) + if len(x)>0 and x[0]!='': + test_tags = x[0].strip() - test_tags = st.text_input('Found {} CM tests. Prune them by tags:'.format(str(len(selection))), value=test_tags, key='test_tags').strip() + test_tags = st.text_input('Found {} CM tests. Prune them by tags:'.format( + str(len(selection))), value=test_tags, key='test_tags').strip() - if test_tags!='': - test_tags_list = test_tags.replace(' ',',').split(',') + if test_tags !='': + test_tags_list = test_tags.replace(' ', ',').split(',') pruned_selection = [] @@ -624,35 +632,35 @@ def gui(i): selection = pruned_selection - test_selection = [{'name':''}] + selection + test_selection = [{'name': ''}] + selection - if len(selection)<200: + if len(selection) <200: # Creating compute selector - test_id_index = 1 if len(selection)==1 else 0 + test_id_index = 1 if len(selection) == 1 else 0 test_id = st.selectbox('Select a test from {}:'.format(str(len(selection))), - range(len(test_selection)), - format_func=lambda x: test_selection[x]['name'], - index = test_id_index, - key = 'test') + range(len(test_selection)), + format_func=lambda x: test_selection[x]['name'], + index = test_id_index, + key = 'test') - if test_id >0: + if test_id > 0: test_meta = test_selection[test_id] else: - ######################################################################### + ############################################################### # View many (table) - ii = {'selection':selection, - 'misc_module':misc} + ii = {'selection': selection, + 'misc_module': misc} # Check if dimensions in the bench dimensions = bench_meta.get('dimensions', []) - if len(dimensions)>0: + if len(dimensions) >0: viewer_selection = ['benchmark specific', 'universal'] - viewer = st.selectbox('Viewer:', viewer_selection, key = 'viewer') + viewer = st.selectbox('Viewer:', viewer_selection, key= 'viewer') if viewer == 'benchmark specific': ii['dimensions'] = dimensions @@ -661,33 +669,28 @@ def gui(i): st.markdown('---') r = prepare_table(ii) - if r['return']>0: return r + if r['return']>0: + return r df = r['df'] - html=df.to_html(escape=False, justify='left') - st.write(html, unsafe_allow_html = True) + html = df.to_html(escape=False, justify='left') + st.write(html, unsafe_allow_html= True) # st.dataframe(df, unsafe_allow_html = True) - - - - - ############################################################## # Show individual test - if len(test_meta)>0: + if len(test_meta) >0: if uid != '': - c_uid = test_meta.get('compute_uid','') - if c_uid!='': - c_tags = compute_meta[c_uid].get('tags','') - if c_tags!='': - test_meta['all_tags']+=c_tags.split(',') - - test_meta['compute_meta']=compute_meta[c_uid] + c_uid = test_meta.get('compute_uid', '') + if c_uid !='': + c_tags = compute_meta[c_uid].get('tags', '') + if c_tags !='': + test_meta['all_tags'] += c_tags.split(',') + test_meta['compute_meta'] = compute_meta[c_uid] if uid == '': st.markdown('---') @@ -697,11 +700,12 @@ def gui(i): # First, check if there is a README test_path = test_meta['full_path'] - test_md = test_meta['full_path'][:-5]+'.md' + test_md = test_meta['full_path'][:-5] +'.md' if os.path.isfile(test_md): r = cmind.utils.load_txt(test_md) - if r['return']>0: return r + if r['return']>0: + return r s = r['string'] @@ -719,13 +723,9 @@ def gui(i): st.markdown(x) - - - - # Create self link # This misc module is in CM "gui" script x1 = misc.make_url(uid, key='uid', action='howtorun', md=False) - end_html='
Self link
'.format(x1) + end_html = '
Self link
'.format(x1) - return {'return':0, 'end_html': end_html} + return {'return': 0, 'end_html': end_html} diff --git a/script/launch-benchmark/tests/debug.py b/script/launch-benchmark/tests/debug.py index 842003b2c6..7c8bab4b73 100644 --- a/script/launch-benchmark/tests/debug.py +++ b/script/launch-benchmark/tests/debug.py @@ -1,6 +1,6 @@ import cmind -r=cmind.access({'action':'gui', - 'automation':'script', - 'artifact':'launch benchmark'}) -print (r) +r = cmind.access({'action': 'gui', + 'automation': 'script', + 'artifact': 'launch benchmark'}) +print(r) diff --git a/script/plug-prebuilt-cudnn-to-cuda/customize.py b/script/plug-prebuilt-cudnn-to-cuda/customize.py index 912e9cba68..2beb23e1fa 100644 --- a/script/plug-prebuilt-cudnn-to-cuda/customize.py +++ b/script/plug-prebuilt-cudnn-to-cuda/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): recursion_spaces = i['recursion_spaces'] @@ -11,7 +12,7 @@ def preprocess(i): env = i['env'] - if str(env.get('CUDA_SKIP_SUDO','')).lower() == 'true': + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': env['CM_SUDO'] = '' meta = i['meta'] @@ -21,7 +22,8 @@ def preprocess(i): supported_versions = list(meta['versions'].keys()) if version not in supported_versions: - return {'return': 1, 'error': "Only cuDNN versions {} are supported now".format(', '.join(supported_versions))} + return {'return': 1, 'error': "Only cuDNN versions {} are supported now".format( + ', '.join(supported_versions))} env['CM_CUDNN_VERSION'] = version @@ -39,12 +41,12 @@ def preprocess(i): cudnn_url = f'https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/{filename}' - print ('') - print (f'URL to download cuDNN: {cudnn_url}') + print('') + print(f'URL to download cuDNN: {cudnn_url}') env['CM_CUDNN_TAR_DIR'] = cudnn_dir env['CM_CUDNN_UNTAR_PATH'] = os.path.join(cur_dir, cudnn_dir) env['WGET_URL'] = cudnn_url env['CM_DOWNLOAD_CHECKSUM'] = cudnn_md5sum - return {'return':0} + return {'return': 0} diff --git a/script/plug-prebuilt-cusparselt-to-cuda/customize.py b/script/plug-prebuilt-cusparselt-to-cuda/customize.py index a2d7926d89..9262677055 100644 --- a/script/plug-prebuilt-cusparselt-to-cuda/customize.py +++ b/script/plug-prebuilt-cusparselt-to-cuda/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): recursion_spaces = i['recursion_spaces'] @@ -11,7 +12,7 @@ def preprocess(i): env = i['env'] - if str(env.get('CUDA_SKIP_SUDO','')).lower() == 'true': + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': env['CM_SUDO'] = '' meta = i['meta'] @@ -21,7 +22,8 @@ def preprocess(i): supported_versions = list(meta['versions'].keys()) if version not in supported_versions: - return {'return': 1, 'error': "Only CUSPARSELT versions {} are supported now".format(', '.join(supported_versions))} + return {'return': 1, 'error': "Only CUSPARSELT versions {} are supported now".format( + ', '.join(supported_versions))} env['CM_CUSPARSELT_VERSION'] = version @@ -39,12 +41,12 @@ def preprocess(i): cusparselt_url = f'https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/{filename}' - print ('') - print (f'URL to download CUSPARSELT: {cusparselt_url}') + print('') + print(f'URL to download CUSPARSELT: {cusparselt_url}') env['CM_CUSPARSELT_TAR_DIR'] = cusparselt_dir env['CM_CUSPARSELT_UNTAR_PATH'] = os.path.join(cur_dir, cusparselt_dir) env['WGET_URL'] = cusparselt_url env['CM_DOWNLOAD_CHECKSUM'] = cusparselt_md5sum - return {'return':0} + return {'return': 0} diff --git a/script/prepare-training-data-bert/customize.py b/script/prepare-training-data-bert/customize.py index fbb57ea134..e2c3907e28 100644 --- a/script/prepare-training-data-bert/customize.py +++ b/script/prepare-training-data-bert/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -23,13 +24,25 @@ def preprocess(i): env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") if env.get("CM_TMP_VARIATION", "") == "nvidia": - code_path = os.path.join(env['CM_GIT_REPO_CHECKOUT_PATH'], 'NVIDIA', 'benchmarks', 'bert', 'implementations', 'pytorch-22.09') + code_path = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + 'NVIDIA', + 'benchmarks', + 'bert', + 'implementations', + 'pytorch-22.09') env['CM_RUN_DIR'] = code_path elif env.get("CM_TMP_VARIATION", "") == "reference": - code_path = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], 'language_model', 'tensorflow', 'bert', 'cleanup_scripts') + code_path = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + 'language_model', + 'tensorflow', + 'bert', + 'cleanup_scripts') env['CM_RUN_DIR'] = code_path - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -39,12 +52,15 @@ def postprocess(i): env['CM_MLPERF_TRAINING_BERT_DATA_PATH'] = data_dir if env.get("CM_TMP_VARIATION", "") == "nvidia": - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join(data_dir, "hdf5", "eval", "eval_all.hdf5") + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + data_dir, "hdf5", "eval", "eval_all.hdf5") elif env.get("CM_TMP_VARIATION", "") == "reference": - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join(data_dir, "tfrecords", "eval_10k") - env['CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH'] = os.path.join(data_dir, "tfrecords") + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + data_dir, "tfrecords", "eval_10k") + env['CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH'] = os.path.join( + data_dir, "tfrecords") env['CM_MLPERF_TRAINING_BERT_VOCAB_PATH'] = env['CM_BERT_VOCAB_FILE_PATH'] env['CM_MLPERF_TRAINING_BERT_CONFIG_PATH'] = env['CM_BERT_CONFIG_FILE_PATH'] - return {'return':0} + return {'return': 0} diff --git a/script/prepare-training-data-resnet/customize.py b/script/prepare-training-data-resnet/customize.py index 494bd894a0..ee4c879cc6 100644 --- a/script/prepare-training-data-resnet/customize.py +++ b/script/prepare-training-data-resnet/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -21,16 +22,24 @@ def preprocess(i): env['CM_IMAGENET_LABELS_DOWNLOAD_DIR'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] if env.get("CM_TMP_VARIATION", "") == "nvidia": - code_path = os.path.join(env['CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], 'MxNet', 'Classification', 'RN50v1.5') + code_path = os.path.join( + env['CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], + 'MxNet', + 'Classification', + 'RN50v1.5') env['CM_RUN_DIR'] = code_path i['run_script_input']['script_name'] = "run-nvidia" elif env.get("CM_TMP_VARIATION", "") == "reference": - code_path = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], 'image_classification', 'tensorflow2') + code_path = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + 'image_classification', + 'tensorflow2') env['CM_RUN_DIR'] = code_path i['run_script_input']['script_name'] = "run-reference" - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -46,7 +55,9 @@ def postprocess(i): env['CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH'] = data_dir elif env.get("CM_TMP_VARIATION", "") == "reference": - env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join(data_dir, "tfrecords") - env['CM_MLPERF_TRAINING_RESNET_TFRECORDS_PATH'] = os.path.join(data_dir, "tfrecords") + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + data_dir, "tfrecords") + env['CM_MLPERF_TRAINING_RESNET_TFRECORDS_PATH'] = os.path.join( + data_dir, "tfrecords") - return {'return':0} + return {'return': 0} diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py index e7768c68fb..fc91b08a88 100644 --- a/script/preprocess-mlperf-inference-submission/customize.py +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -4,6 +4,7 @@ from os.path import exists import shutil + def preprocess(i): os_info = i['os_info'] @@ -12,11 +13,13 @@ def preprocess(i): if submission_dir == "": print("Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR") - return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} + return {'return': 1, + 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} if not os.path.exists(submission_dir): print("Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR to a valid submission directory") - return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not existing'} + return {'return': 1, + 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not existing'} submission_dir = submission_dir.rstrip(os.path.sep) submitter = env.get("CM_MLPERF_SUBMITTER", "MLCommons") @@ -27,21 +30,23 @@ def preprocess(i): shutil.rmtree(submission_processed) CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", - "preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" + "preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" env['CM_RUN_CMD'] = CMD - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] submission_dir = env["CM_MLPERF_INFERENCE_SUBMISSION_DIR"] import datetime - submission_backup = submission_dir+"_backup_"+'{date:%Y-%m-%d_%H:%M:%S}'.format( date=datetime.datetime.now() ) + submission_backup = submission_dir + "_backup_" + \ + '{date:%Y-%m-%d_%H:%M:%S}'.format(date=datetime.datetime.now()) submission_processed = submission_dir + "_processed" shutil.copytree(submission_dir, submission_backup) shutil.rmtree(submission_dir) os.rename(submission_processed, submission_dir) - return {'return':0} + return {'return': 0} diff --git a/script/print-any-text/customize.py b/script/print-any-text/customize.py index ec28a2c1d8..6437e1c474 100644 --- a/script/print-any-text/customize.py +++ b/script/print-any-text/customize.py @@ -3,6 +3,7 @@ from cmind import utils import os + def postprocess(i): env = i['env'] @@ -11,19 +12,19 @@ def postprocess(i): os_env_keys = env.get('CM_PRINT_ANY_OS_ENV_KEYS', '').strip() printed = False - for k,e,t in [(cm_env_keys, env, 'CM_ENV'), - (os_env_keys, os.environ, 'OS_ENV')]: + for k, e, t in [(cm_env_keys, env, 'CM_ENV'), + (os_env_keys, os.environ, 'OS_ENV')]: - if k!='': + if k != '': for kk in k.split(','): kk = kk.strip() - if kk!='': + if kk != '': vv = e.get(kk) - print ('{}[{}]: {}'.format(t, kk, vv)) + print('{}[{}]: {}'.format(t, kk, vv)) printed = True if printed: - print ('') + print('') - return {'return':0} + return {'return': 0} diff --git a/script/print-croissant-desc/code.py b/script/print-croissant-desc/code.py index 480e388bbc..c53ad5fdff 100644 --- a/script/print-croissant-desc/code.py +++ b/script/print-croissant-desc/code.py @@ -3,25 +3,27 @@ import os import mlcroissant as mlc + def main(): url = os.environ.get('CM_PRINT_CROISSANT_URL', '') - if url=='': - print ('Error: --url is not specified') + if url == '': + print('Error: --url is not specified') exit(1) ds = mlc.Dataset(url) metadata = ds.metadata.to_json() - print ('') - print ('Croissant meta data URL: {}'.format(url)) - print ('') - print (f"{metadata['name']}: {metadata['description']}") + print('') + print('Croissant meta data URL: {}'.format(url)) + print('') + print(f"{metadata['name']}: {metadata['description']}") - print ('') + print('') for x in ds.records(record_set="default"): print(x) + if __name__ == '__main__': main() diff --git a/script/print-hello-world-py/app.py b/script/print-hello-world-py/app.py index 9dfd48b33a..12382ac80d 100644 --- a/script/print-hello-world-py/app.py +++ b/script/print-hello-world-py/app.py @@ -1,16 +1,20 @@ def main(): - print ('') + print('') # Import cmind to test break points import cmind.utils import os if os.environ.get('CM_TMP_DEBUG_UID', '') == 'f52670e5f3f345a2': - cmind.utils.debug_here(__file__, port=5678, text='Debugging main.py!').breakpoint() + cmind.utils.debug_here( + __file__, + port=5678, + text='Debugging main.py!').breakpoint() - print ('HELLO WORLD from Python') + print('HELLO WORLD from Python') x = 1 - print (x) + print(x) + if __name__ == '__main__': main() diff --git a/script/print-hello-world-py/customize.py b/script/print-hello-world-py/customize.py index 8308693677..3ee63ecb18 100644 --- a/script/print-hello-world-py/customize.py +++ b/script/print-hello-world-py/customize.py @@ -2,13 +2,14 @@ import os + def preprocess(i): os_info = i['os_info'] env = i['env'] meta = i['meta'] - return {'return':0} + return {'return': 0} def postprocess(i): @@ -16,4 +17,4 @@ def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/process-ae-users/code.py b/script/process-ae-users/code.py index 4bc917ecb8..7f3626fe45 100644 --- a/script/process-ae-users/code.py +++ b/script/process-ae-users/code.py @@ -3,76 +3,78 @@ import json import cmind + def main(): - f = os.environ.get('CM_PROCESS_AE_USERS_INPUT_FILE','') + f = os.environ.get('CM_PROCESS_AE_USERS_INPUT_FILE', '') - print ('Input CSV file: {}'.format(f)) + print('Input CSV file: {}'.format(f)) users = [] with open(f, 'r') as ff: csvreader = csv.DictReader(ff) for row in csvreader: - if len(row)>0: + if len(row) > 0: users.append(row) - print ('') + print('') html = '
    \n' - for user in sorted(users, key = lambda u: (u['last'].lower(), u['first'].lower())): + for user in sorted(users, key=lambda u: ( + u['last'].lower(), u['first'].lower())): - full_name = user['first']+' '+user['last'] + full_name = user['first'] + ' ' + user['last'] - name = full_name + ' ('+user['affiliation']+')' + name = full_name + ' (' + user['affiliation'] + ')' - print (name) + print(name) - html += '
  • '+name+'\n' + html += '
  • ' + name + '\n' # Checking contributor - r = cmind.access({'action':'find', - 'automation':'contributor', - 'artifact':full_name}) - if r['return']>0: return r + r = cmind.access({'action': 'find', + 'automation': 'contributor', + 'artifact': full_name}) + if r['return'] > 0: + return r lst = r['list'] - if len(lst)==0: - print (' CM contributor not found!') + if len(lst) == 0: + print(' CM contributor not found!') meta = { - 'challenges': [ - 'ae-micro2023' - ], - 'last_participation_date': '202309', - 'name': full_name, - 'organization': user['affiliation'] - } - - print (' Adding to mlcommons@ck ...') - r = cmind.access({'out':'con', - 'action':'add', - 'automation':'contributor,68eae17b590d4f8f', # Need UID since using common function - 'artifact':'mlcommons@ck:'+full_name, - 'meta':meta, - 'common':True + 'challenges': [ + 'ae-micro2023' + ], + 'last_participation_date': '202309', + 'name': full_name, + 'organization': user['affiliation'] + } + + print(' Adding to mlcommons@ck ...') + r = cmind.access({'out': 'con', + 'action': 'add', + # Need UID since using common function + 'automation': 'contributor,68eae17b590d4f8f', + 'artifact': 'mlcommons@ck:' + full_name, + 'meta': meta, + 'common': True }) - if r['return']>0: return r - + if r['return'] > 0: + return r html += '
\n' - fo = f+'.html' + fo = f + '.html' - print ('') - print ('Saved HTML to {}'.format(fo)) + print('') + print('Saved HTML to {}'.format(fo)) cmind.utils.save_txt(fo, html) - - - return {'return':0} + return {'return': 0} if __name__ == '__main__': - r=main() - if r['return']>0: + r = main() + if r['return'] > 0: cmind.error(r) diff --git a/script/process-ae-users/customize.py b/script/process-ae-users/customize.py index fef0a3ddfd..e0f6b1ef23 100644 --- a/script/process-ae-users/customize.py +++ b/script/process-ae-users/customize.py @@ -2,9 +2,10 @@ import cmind as cm import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index bb124cc02c..381b1cdcd1 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -2,6 +2,7 @@ import cmind as cm import os + def preprocess(i): os_info = i['os_info'] @@ -13,86 +14,107 @@ def preprocess(i): if results_dir == "": print("Please set CM_MLPERF_ACCURACY_RESULTS_DIR") - return {'return':-1} + return {'return': -1} # In fact, we expect only 1 command line here run_cmds = [] - if env.get('CM_MAX_EXAMPLES', '') != '' and env.get('CM_MLPERF_RUN_STYLE', '') != 'valid': + if env.get('CM_MAX_EXAMPLES', '') != '' and env.get( + 'CM_MLPERF_RUN_STYLE', '') != 'valid': max_examples_string = " --max_examples " + env['CM_MAX_EXAMPLES'] else: max_examples_string = "" results_dir_split = results_dir.split(xsep) dataset = env['CM_DATASET'] - regenerate_accuracy_file = env.get('CM_MLPERF_REGENERATE_ACCURACY_FILE', env.get('CM_RERUN', False)) + regenerate_accuracy_file = env.get( + 'CM_MLPERF_REGENERATE_ACCURACY_FILE', env.get( + 'CM_RERUN', False)) for result_dir in results_dir_split: out_file = os.path.join(result_dir, 'accuracy.txt') - if os.path.exists(out_file) and (os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: + if os.path.exists(out_file) and ( + os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: continue if dataset == "openimages": if env.get('CM_DATASET_PATH_ROOT', '') != '': dataset_dir = env['CM_DATASET_PATH_ROOT'] if 'DATASET_ANNOTATIONS_FILE_PATH' in env: - del(env['DATASET_ANNOTATIONS_FILE_PATH']) + del (env['DATASET_ANNOTATIONS_FILE_PATH']) else: env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - dataset_dir = os.getcwd() # not used, just to keep the script happy - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " "+"'" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ - "accuracy-openimages.py") + "'"+" --mlperf-accuracy-file "+"'" + os.path.join(result_dir, \ - "mlperf_log_accuracy.json") + "'"+" --openimages-dir "+"'" + dataset_dir + "'"+" --verbose > "+"'" + \ + dataset_dir = os.getcwd() # not used, just to keep the script happy + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ out_file + "'" elif dataset == "imagenet": CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", - "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['CM_DATASET_AUX_PATH'], - "val.txt") + "' --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['CM_DATASET_AUX_PATH'], + "val.txt") + "' --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" elif dataset == "squad": CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], - "accuracy-squad.py") + "' --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + \ + "accuracy-squad.py") + "' --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + \ "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ "' --vocab_file '" + env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ "' --features_cache_file '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ - "' --output_dtype " + env['CM_ACCURACY_DTYPE'] + env.get('CM_OUTPUT_TRANSPOSED','') + max_examples_string + " > '" + out_file + "'" + "' --output_dtype " + env['CM_ACCURACY_DTYPE'] + env.get( + 'CM_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" elif dataset == "cnndm": if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'intel': accuracy_checker_file = env['CM_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] - env['+PYTHONPATH'] = [os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] - suffix_string = " --model-name-or-path '"+ env['GPTJ_CHECKPOINT_PATH'] +"'" + env['+PYTHONPATH'] = [os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ + os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] + suffix_string = " --model-name-or-path '" + \ + env['GPTJ_CHECKPOINT_PATH'] + "'" else: accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", - "evaluation.py") - suffix_string = " --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + "evaluation.py") + suffix_string = " --dtype " + \ + env.get('CM_ACCURACY_DTYPE', "float32") CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['CM_DATASET_EVAL_PATH'] + "'" +suffix_string +" > '" + out_file + "'" + "' --dataset-file '" + \ + env['CM_DATASET_EVAL_PATH'] + "'" + \ + suffix_string + " > '" + out_file + "'" elif dataset == "openorca": accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", - "evaluate-accuracy.py") - if env.get('CM_VLLM_SERVER_MODEL_NAME','') == '': + "evaluate-accuracy.py") + if env.get('CM_VLLM_SERVER_MODEL_NAME', '') == '': checkpoint_path = env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] else: checkpoint_path = env['CM_VLLM_SERVER_MODEL_NAME'] CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['CM_DATASET_PREPROCESSED_PATH'] + "'"+ " --dtype " + env.get('CM_ACCURACY_DTYPE', "int32") +" > '" + out_file + "'" + "' --dataset-file '" + env['CM_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( + 'CM_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" elif dataset == "openorca-gsm8k-mbxp-combined": accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", - "evaluate-accuracy.py") + "evaluate-accuracy.py") CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --dataset-file '" + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'"+ " --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") +" > '" + out_file + "'" - + "' --dataset-file '" + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ + " --dtype " + env.get('CM_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" elif dataset == "coco2014": - env['+PYTHONPATH'] = [ os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") , os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", "fid") ] + env['+PYTHONPATH'] = [ + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools"), + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools", + "fid")] extra_options = "" if env.get('CM_SDXL_STATISTICS_FILE_PATH', '') != '': @@ -103,33 +125,39 @@ def preprocess(i): else: extra_options += f""" --compliance-images-path '{os.path.join(result_dir, "images")}' """ - if env.get('CM_COCO2014_SAMPLE_ID_PATH','') != '': + if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '': extra_options += f" --ids-path '{env['CM_COCO2014_SAMPLE_ID_PATH']}' " if env.get('CM_SDXL_ACCURACY_RUN_DEVICE', '') != '': extra_options += f" --device '{env['CM_SDXL_ACCURACY_RUN_DEVICE']}' " - - #env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", - "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --caption-path '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "coco2014", "captions", "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" + # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", + "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --caption-path '" + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "coco2014", + "captions", + "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" elif dataset == "kits19": CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], - "accuracy_kits.py") + \ + "accuracy_kits.py") + \ "' --preprocessed_data_dir '" + env['CM_DATASET_PREPROCESSED_PATH'] +\ "' --postprocessed_data_dir '" + result_dir +\ "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --output_dtype " + env['CM_ACCURACY_DTYPE'] +" > '" + out_file + "'" + "' --output_dtype " + \ + env['CM_ACCURACY_DTYPE'] + " > '" + out_file + "'" elif dataset == "librispeech": CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], - "accuracy_eval.py") + \ + "accuracy_eval.py") + \ "' --dataset_dir '" + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") +\ "' --manifest '" + env['CM_DATASET_PREPROCESSED_JSON'] +\ "' --log_dir '" + result_dir + \ - "' --output_dtype " + env['CM_ACCURACY_DTYPE'] +" > '" + out_file + "'" + "' --output_dtype " + \ + env['CM_ACCURACY_DTYPE'] + " > '" + out_file + "'" elif dataset == "terabyte": extra_options = "" @@ -138,22 +166,28 @@ def preprocess(i): if env.get('CM_DLRM_V2_DAY23_FILE_PATH', '') != '': extra_options += f" --day-23-file '{env['CM_DLRM_V2_DAY23_FILE_PATH']}' " CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", - "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, - "mlperf_log_accuracy.json") + "'" + extra_options + \ - " --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + extra_options + \ + " --dtype " + env.get('CM_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" else: return {'return': 1, 'error': 'Unsupported dataset'} run_cmds.append(CMD) - if os_info['platform'] == 'windows': - env['CM_RUN_CMDS'] = ('\n'.join(run_cmds)).replace("'", '"').replace('>','^>') + env['CM_RUN_CMDS'] = ( + '\n'.join(run_cmds)).replace( + "'", + '"').replace( + '>', + '^>') else: env['CM_RUN_CMDS'] = "??".join(run_cmds) - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -171,15 +205,15 @@ def postprocess(i): accuracy_file = os.path.join(result_dir, "accuracy.txt") if os.path.exists(accuracy_file): - print ('') - print ('Accuracy file: {}'.format(accuracy_file)) - print ('') + print('') + print('Accuracy file: {}'.format(accuracy_file)) + print('') x = '' with open(accuracy_file, "r") as fp: - x=fp.read() + x = fp.read() - if x!='': + if x != '': print(x) # Trying to extract accuracy dict @@ -189,12 +223,12 @@ def postprocess(i): import json try: - z=json.loads(y) - state['app_mlperf_inference_accuracy']=z + z = json.loads(y) + state['app_mlperf_inference_accuracy'] = z break except ValueError as e: pass - print ('') - return {'return':0} + print('') + return {'return': 0} diff --git a/script/prune-bert-models/customize.py b/script/prune-bert-models/customize.py index 34e0810231..ae3a704e98 100644 --- a/script/prune-bert-models/customize.py +++ b/script/prune-bert-models/customize.py @@ -1,44 +1,49 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - ckpt_path = env.get('CM_BERT_PRUNE_CKPT_PATH','') + ckpt_path = env.get('CM_BERT_PRUNE_CKPT_PATH', '') if ckpt_path == '': p = env['CM_ML_MODEL_FILE_WITH_PATH'] x = os.listdir(p) for y in x: if y.startswith('models--'): - z = os.path.join(p,y) + z = os.path.join(p, y) if os.path.isdir(z): z1 = os.path.join(z, 'snapshots') if os.path.isdir(z1): z2 = os.listdir(z1) - if len(z2)>0: - ckpt_path=os.path.join(z1, z2[0]) + if len(z2) > 0: + ckpt_path = os.path.join(z1, z2[0]) env['CM_BERT_PRUNE_CKPT_PATH'] = ckpt_path - out_dir=env.get('CM_BERT_PRUNE_OUTPUT_DIR','') + out_dir = env.get('CM_BERT_PRUNE_OUTPUT_DIR', '') if out_dir == '': out_dir = os.path.join(os.getcwd(), 'pruned-model-output') env['CM_BERT_PRUNE_OUTPUT_DIR'] = out_dir - print ('') - print ('Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) + print('') + print( + 'Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + + env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) - print ('') - for k in ["CM_ML_MODEL_FILE_WITH_PATH", "CM_BERT_PRUNE_CKPT_PATH", "CM_BERT_PRUNE_OUTPUT_DIR"]: - print ('ENV["{}"]: {}'.format(k, env[k])) + print('') + for k in ["CM_ML_MODEL_FILE_WITH_PATH", + "CM_BERT_PRUNE_CKPT_PATH", "CM_BERT_PRUNE_OUTPUT_DIR"]: + print('ENV["{}"]: {}'.format(k, env[k])) - print ('') + print('') return {'return': 0} + def postprocess(i): env = i['env'] diff --git a/script/publish-results-to-dashboard/code.py b/script/publish-results-to-dashboard/code.py index 104c80131d..2ce02a9dff 100644 --- a/script/publish-results-to-dashboard/code.py +++ b/script/publish-results-to-dashboard/code.py @@ -2,19 +2,20 @@ import os + def main(): # For now quick prototype hardwired to "summary.json" from MLPerf # Later need to clean it and make it universal - print ('') - print ('Reading summary.json ...') - print ('') + print('') + print('Reading summary.json ...') + print('') import json - filename = os.environ.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') - if filename=='': + filename = os.environ.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') + if filename == '': filename = 'summary' - filename+='.json' + filename += '.json' f = open(filename) @@ -22,34 +23,39 @@ def main(): f.close() - print ('=========================================================') - print ('Sending results to W&B dashboard ...') - print ('') + print('=========================================================') + print('Sending results to W&B dashboard ...') + print('') import wandb env = os.environ dashboard_user = env.get('CM_MLPERF_DASHBOARD_WANDB_USER', '') - if dashboard_user == '': dashboard_user = 'cmind' + if dashboard_user == '': + dashboard_user = 'cmind' dashboard_project = env.get('CM_MLPERF_DASHBOARD_WANDB_PROJECT', '') - if dashboard_project == '': dashboard_project = 'cm-mlperf-dse-testing' + if dashboard_project == '': + dashboard_project = 'cm-mlperf-dse-testing' for k in results: - result=results[k] + result = results[k] organization = str(result.get('Organization', '')) - if organization == '': organization = 'anonymous' + if organization == '': + organization = 'anonymous' label = organization - system_name = str(result.get('SystemName','')) - if system_name != '': label += '(' + system_name + ')' + system_name = str(result.get('SystemName', '')) + if system_name != '': + label += '(' + system_name + ')' qps = result.get('Result', 0.0) - # since v4.1 mlperf results return a key:value pairs for accuracy. We are taking only the first key:value here + # since v4.1 mlperf results return a key:value pairs for accuracy. We + # are taking only the first key:value here result_acc = result.get('Accuracy') accuracy = 0.0 if result_acc: @@ -63,36 +69,37 @@ def main(): # Check extra env variables x = { - "lang": "CM_MLPERF_LANG", - "device": "CM_MLPERF_DEVICE", - "submitter": "CM_MLPERF_SUBMITTER", - "backend": "CM_MLPERF_BACKEND", - "model": "CM_MLPERF_MODEL", - "run_style": "CM_MLPERF_RUN_STYLE", - "rerun": "CM_RERUN", - "hw_name": "CM_HW_NAME", - "max_batchsize": "CM_MLPERF_LOADGEN_MAX_BATCHSIZE", - "num_threads": "CM_NUM_THREADS", - "scenario": "CM_MLPERF_LOADGEN_SCENARIO", - "test_query_count": "CM_TEST_QUERY_COUNT", - "run_checker": "CM_RUN_SUBMISSION_CHECKER", - "skip_truncation": "CM_SKIP_TRUNCATE_ACCURACY" + "lang": "CM_MLPERF_LANG", + "device": "CM_MLPERF_DEVICE", + "submitter": "CM_MLPERF_SUBMITTER", + "backend": "CM_MLPERF_BACKEND", + "model": "CM_MLPERF_MODEL", + "run_style": "CM_MLPERF_RUN_STYLE", + "rerun": "CM_RERUN", + "hw_name": "CM_HW_NAME", + "max_batchsize": "CM_MLPERF_LOADGEN_MAX_BATCHSIZE", + "num_threads": "CM_NUM_THREADS", + "scenario": "CM_MLPERF_LOADGEN_SCENARIO", + "test_query_count": "CM_TEST_QUERY_COUNT", + "run_checker": "CM_RUN_SUBMISSION_CHECKER", + "skip_truncation": "CM_SKIP_TRUNCATE_ACCURACY" } for k in x: env_key = x[k] - if os.environ.get(env_key,'')!='': - result['cm_misc_input_'+k]=os.environ[env_key] + if os.environ.get(env_key, '') != '': + result['cm_misc_input_' + k] = os.environ[env_key] - wandb.init(entity = dashboard_user, - project = dashboard_project, - name = label) + wandb.init(entity=dashboard_user, + project=dashboard_project, + name=label) wandb.log(result) wandb.finish() - print ('=========================================================') + print('=========================================================') + if __name__ == '__main__': main() diff --git a/script/pull-git-repo/customize.py b/script/pull-git-repo/customize.py index 021d42465e..55a581bb5c 100644 --- a/script/pull-git-repo/customize.py +++ b/script/pull-git-repo/customize.py @@ -2,22 +2,23 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] meta = i['meta'] if 'CM_GIT_CHECKOUT_PATH' not in env: - return {'return':1, 'error': 'CM_GIT_CHECKOUT_PATH is not set'} + return {'return': 1, 'error': 'CM_GIT_CHECKOUT_PATH is not set'} env['CM_GIT_PULL_CMD'] = "git pull --rebase" - return {'return':0} + return {'return': 0} def postprocess(i): @@ -25,4 +26,4 @@ def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/push-csv-to-spreadsheet/customize.py b/script/push-csv-to-spreadsheet/customize.py index e80f262666..213925e2f0 100644 --- a/script/push-csv-to-spreadsheet/customize.py +++ b/script/push-csv-to-spreadsheet/customize.py @@ -2,6 +2,7 @@ import cmind as cm import os + def preprocess(i): os_info = i['os_info'] @@ -9,7 +10,8 @@ def preprocess(i): meta = i['meta'] automation = i['automation'] - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/push-csv-to-spreadsheet/google_api.py b/script/push-csv-to-spreadsheet/google_api.py index d1e7643aa4..24926daed0 100644 --- a/script/push-csv-to-spreadsheet/google_api.py +++ b/script/push-csv-to-spreadsheet/google_api.py @@ -45,7 +45,12 @@ def main(): f = open(csv_file, "r") values = [r for r in csv.reader(f)] - request = service.spreadsheets().values().update(spreadsheetId=DOCUMENT_ID, range=sheet_name, valueInputOption="USER_ENTERED", body={"values": values}).execute() + request = service.spreadsheets().values().update( + spreadsheetId=DOCUMENT_ID, + range=sheet_name, + valueInputOption="USER_ENTERED", + body={ + "values": values}).execute() except HttpError as err: print(err) diff --git a/script/push-mlperf-inference-results-to-github/customize.py b/script/push-mlperf-inference-results-to-github/customize.py index dcdc3c0bfe..f1cfe1ebab 100644 --- a/script/push-mlperf-inference-results-to-github/customize.py +++ b/script/push-mlperf-inference-results-to-github/customize.py @@ -2,6 +2,7 @@ import cmind as cm import os + def preprocess(i): os_info = i['os_info'] @@ -19,17 +20,20 @@ def preprocess(i): else: extra_tags_string = "" - r = automation.update_deps({'deps':meta['prehook_deps'], - 'update_deps':{ - 'get-git-repo':{ - 'tags':"_repo."+repo+extra_tags_string - } - } - }) - if r['return']>0: return r - env['CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE'] = env.get('CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE', 'Added new results') + r = automation.update_deps({'deps': meta['prehook_deps'], + 'update_deps': { + 'get-git-repo': { + 'tags': "_repo." + repo + extra_tags_string + } + } + }) + if r['return'] > 0: + return r + env['CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE'] = env.get( + 'CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE', 'Added new results') + + return {'return': 0} - return {'return':0} def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/remote-run-commands/customize.py b/script/remote-run-commands/customize.py index 492fa4b5ca..bd5a0f9dbf 100644 --- a/script/remote-run-commands/customize.py +++ b/script/remote-run-commands/customize.py @@ -1,25 +1,26 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] - cmd_string='' + cmd_string = '' - #pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', ['source $HOME/cm/bin/activate']) + # pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', ['source $HOME/cm/bin/activate']) pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', []) run_cmds = env.get('CM_SSH_RUN_COMMANDS', []) run_cmds = pre_run_cmds + run_cmds - for i,cmd in enumerate(run_cmds): + for i, cmd in enumerate(run_cmds): if 'cm ' in cmd: - #cmd=cmd.replace(":", "=") - cmd=cmd.replace(";;", ",") + # cmd=cmd.replace(":", "=") + cmd = cmd.replace(";;", ",") run_cmds[i] = cmd cmd_string += " ; ".join(run_cmds) @@ -27,7 +28,7 @@ def preprocess(i): password = env.get('CM_SSH_PASSWORD', None) host = env.get('CM_SSH_HOST') if password: - password_string = " -p "+password + password_string = " -p " + password else: password_string = "" cmd_extra = '' @@ -35,13 +36,15 @@ def preprocess(i): if env.get("CM_SSH_SKIP_HOST_VERIFY"): cmd_extra += " -o StrictHostKeyChecking=no" if env.get("CM_SSH_KEY_FILE"): - cmd_extra += " -i "+env.get("CM_SSH_KEY_FILE") + cmd_extra += " -i " + env.get("CM_SSH_KEY_FILE") - ssh_command = "ssh "+user+"@"+host+password_string+ cmd_extra + " '"+cmd_string + "'" + ssh_command = "ssh " + user + "@" + host + \ + password_string + cmd_extra + " '" + cmd_string + "'" env['CM_SSH_CMD'] = ssh_command - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-22/customize.py b/script/reproduce-ieee-acm-micro2023-paper-22/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-22/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-22/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-28/customize.py b/script/reproduce-ieee-acm-micro2023-paper-28/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-28/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-28/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-33/customize.py b/script/reproduce-ieee-acm-micro2023-paper-33/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-33/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-33/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-5/customize.py b/script/reproduce-ieee-acm-micro2023-paper-5/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-5/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-5/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-5/main.py b/script/reproduce-ieee-acm-micro2023-paper-5/main.py index d851f1450f..caa499bf08 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-5/main.py +++ b/script/reproduce-ieee-acm-micro2023-paper-5/main.py @@ -2,9 +2,9 @@ if __name__ == "__main__": - print ('') - print ('Main script:') - print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) - print ('') + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') exit(0) diff --git a/script/reproduce-ieee-acm-micro2023-paper-8/customize.py b/script/reproduce-ieee-acm-micro2023-paper-8/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-8/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-8/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-8/main.py b/script/reproduce-ieee-acm-micro2023-paper-8/main.py index d851f1450f..caa499bf08 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-8/main.py +++ b/script/reproduce-ieee-acm-micro2023-paper-8/main.py @@ -2,9 +2,9 @@ if __name__ == "__main__": - print ('') - print ('Main script:') - print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) - print ('') + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') exit(0) diff --git a/script/reproduce-ieee-acm-micro2023-paper-85/customize.py b/script/reproduce-ieee-acm-micro2023-paper-85/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-85/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-85/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-96/customize.py b/script/reproduce-ieee-acm-micro2023-paper-96/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-96/customize.py +++ b/script/reproduce-ieee-acm-micro2023-paper-96/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-ieee-acm-micro2023-paper-96/main.py b/script/reproduce-ieee-acm-micro2023-paper-96/main.py index d851f1450f..caa499bf08 100644 --- a/script/reproduce-ieee-acm-micro2023-paper-96/main.py +++ b/script/reproduce-ieee-acm-micro2023-paper-96/main.py @@ -2,9 +2,9 @@ if __name__ == "__main__": - print ('') - print ('Main script:') - print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) - print ('') + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') exit(0) diff --git a/script/reproduce-ipol-paper-2022-439/customize.py b/script/reproduce-ipol-paper-2022-439/customize.py index 6b57ab932f..fca74f12fc 100644 --- a/script/reproduce-ipol-paper-2022-439/customize.py +++ b/script/reproduce-ipol-paper-2022-439/customize.py @@ -1,34 +1,40 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] # Check if input files are empty and add files - input_file_1 = env.get('CM_INPUT_1','') - if input_file_1 == '': input_file_1 = 'ipol-paper-2024-439-sample-image-1.png' + input_file_1 = env.get('CM_INPUT_1', '') + if input_file_1 == '': + input_file_1 = 'ipol-paper-2024-439-sample-image-1.png' if not os.path.isfile(input_file_1): - return {'return':1, 'error':'input file 1 "{}" not found'.format(input_file_1)} + return {'return': 1, + 'error': 'input file 1 "{}" not found'.format(input_file_1)} - env['CM_INPUT_1']=os.path.abspath(input_file_1) + env['CM_INPUT_1'] = os.path.abspath(input_file_1) - input_file_2 = env.get('CM_INPUT_2','') - if input_file_2 == '': input_file_2 = 'ipol-paper-2024-439-sample-image-2.png' + input_file_2 = env.get('CM_INPUT_2', '') + if input_file_2 == '': + input_file_2 = 'ipol-paper-2024-439-sample-image-2.png' if not os.path.isfile(input_file_2): - return {'return':1, 'error':'input file 2 "{}" not found'.format(input_file_2)} + return {'return': 1, + 'error': 'input file 2 "{}" not found'.format(input_file_2)} + + env['CM_INPUT_2'] = os.path.abspath(input_file_2) - env['CM_INPUT_2']=os.path.abspath(input_file_2) + return {'return': 0} - return {'return':0} def postprocess(i): - print ('') - print ('Please check "diff.png"') - print ('') + print('') + print('Please check "diff.png"') + print('') - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-mlperf-octoml-tinyml-results/customize.py b/script/reproduce-mlperf-octoml-tinyml-results/customize.py index f63d95abaa..7e9eac6901 100644 --- a/script/reproduce-mlperf-octoml-tinyml-results/customize.py +++ b/script/reproduce-mlperf-octoml-tinyml-results/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -12,12 +13,13 @@ def preprocess(i): env['CM_TINY_MODEL'] = 'ic' if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env['+C_INCLUDE_PATH'] = [] - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/reproduce-mlperf-training-nvidia/customize.py b/script/reproduce-mlperf-training-nvidia/customize.py index 25e9929d82..cdc98cc7c8 100644 --- a/script/reproduce-mlperf-training-nvidia/customize.py +++ b/script/reproduce-mlperf-training-nvidia/customize.py @@ -2,17 +2,19 @@ import os import shutil + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] conf = env.get('CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME', '') if conf == "": - return {'return':1, 'error': 'Please provide --system_conf_name='} + return {'return': 1, + 'error': 'Please provide --system_conf_name='} if not conf.endswith(".sh"): conf = conf + ".sh" @@ -23,10 +25,11 @@ def preprocess(i): env['CONFIG_FILE'] = conf # print(env) - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/run-all-mlperf-models/customize.py b/script/run-all-mlperf-models/customize.py index fda731f9d7..7887602c08 100644 --- a/script/run-all-mlperf-models/customize.py +++ b/script/run-all-mlperf-models/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -29,18 +30,22 @@ def preprocess(i): power = env.get('POWER', '') - if str(power).lower() in [ "yes", "true" ]: - POWER_STRING = " --power yes --adr.mlperf-power-client.power_server=" + env.get('POWER_SERVER', '192.168.0.15') + " --adr.mlperf-power-client.port=" + env.get('POWER_SERVER_PORT', '4950') + " " + if str(power).lower() in ["yes", "true"]: + POWER_STRING = " --power yes --adr.mlperf-power-client.power_server=" + \ + env.get('POWER_SERVER', '192.168.0.15') + " --adr.mlperf-power-client.port=" + \ + env.get('POWER_SERVER_PORT', '4950') + " " else: POWER_STRING = "" if not devices: - return {'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} + return { + 'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} for model in models: env['MODEL'] = model cmds = [] - run_script_content = '#!/bin/bash\nsource '+ os.path.join(script_path, "run-template.sh") + run_script_content = '#!/bin/bash\nsource ' + \ + os.path.join(script_path, "run-template.sh") if not backends: if implementation == "reference": @@ -65,10 +70,17 @@ def preprocess(i): for backend in backends: for device in devices: - offline_target_qps = (((state.get(model, {})).get(device, {})).get(backend, {})).get('offline_target_qps') + offline_target_qps = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('offline_target_qps') if offline_target_qps: pass - else: #try to do a test run with reasonable number of samples to get and record the actual system performance + else: # try to do a test run with reasonable number of samples to get and record the actual system performance if device == "cpu": if model == "resnet50": test_query_count = 1000 @@ -81,23 +93,20 @@ def preprocess(i): test_query_count = 1000 cmd = f'run_test "{backend}" "{test_query_count}" "{implementation}" "{device}" "$find_performance_cmd"' cmds.append(cmd) - #second argument is unused for submission_cmd + # second argument is unused for submission_cmd cmd = f'run_test "{backend}" "100" "{implementation}" "{device}" "$submission_cmd"' cmds.append(cmd) - run_file_name = 'tmp-'+model+'-run' - run_script_content += "\n\n" +"\n\n".join(cmds) - with open(os.path.join(script_path, run_file_name+".sh"), 'w') as f: + run_file_name = 'tmp-' + model + '-run' + run_script_content += "\n\n" + "\n\n".join(cmds) + with open(os.path.join(script_path, run_file_name + ".sh"), 'w') as f: f.write(run_script_content) print(cmds) + return {'return': 0} - - - return {'return':0} - def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 188cd72d90..9703080d0e 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -4,28 +4,33 @@ import subprocess from os.path import exists + def preprocess(i): os_info = i['os_info'] env = i['env'] - interactive = env.get('CM_DOCKER_INTERACTIVE_MODE','') + interactive = env.get('CM_DOCKER_INTERACTIVE_MODE', '') - if str(interactive).lower() in ['yes', 'true', '1' ]: - env['CM_DOCKER_DETACHED_MODE']='no' + if str(interactive).lower() in ['yes', 'true', '1']: + env['CM_DOCKER_DETACHED_MODE'] = 'no' if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: env['CM_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" - CM_RUN_CMD="cm version" + CM_RUN_CMD = "cm version" else: - CM_RUN_CMD="cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' + CM_RUN_CMD = "cm run script --tags=" + \ + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' - r = cm.access({'action':'search', - 'automation':'script', + r = cm.access({'action': 'search', + 'automation': 'script', 'tags': env['CM_DOCKER_RUN_SCRIPT_TAGS']}) if len(r['list']) < 1: - raise Exception('CM script with tags '+ env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' not found!') + raise Exception( + 'CM script with tags ' + + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + + ' not found!') PATH = r['list'][0].path os.chdir(PATH) @@ -40,26 +45,30 @@ def preprocess(i): docker_image_name = env['CM_DOCKER_IMAGE_NAME'] docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] - DOCKER_CONTAINER = docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + DOCKER_CONTAINER = docker_image_repo + "/" + \ + docker_image_name + ":" + docker_image_tag - print ('') - print ('Checking existing Docker container:') - print ('') + print('') + print('Checking existing Docker container:') + print('') CMD = f"""docker ps --filter "ancestor={DOCKER_CONTAINER}" """ if os_info['platform'] == 'windows': CMD += " 2> nul" else: CMD += " 2> /dev/null" - print (' '+CMD) - print ('') + print(' ' + CMD) + print('') try: - docker_container = subprocess.check_output(CMD, shell=True).decode("utf-8") + docker_container = subprocess.check_output( + CMD, shell=True).decode("utf-8") except Exception as e: - return {'return':1, 'error':'Docker is either not installed or not started:\n{}'.format(e)} + return { + 'return': 1, 'error': 'Docker is either not installed or not started:\n{}'.format(e)} output_split = docker_container.split("\n") - if len(output_split) > 1 and str(env.get('CM_DOCKER_REUSE_EXISTING_CONTAINER', '')).lower() in [ "1", "true", "yes" ]: #container exists + if len(output_split) > 1 and str(env.get('CM_DOCKER_REUSE_EXISTING_CONTAINER', + '')).lower() in ["1", "true", "yes"]: # container exists out = output_split[1].split(" ") existing_container_id = out[0] print(f"Reusing existing container {existing_container_id}") @@ -67,25 +76,27 @@ def preprocess(i): else: if env.get('CM_DOCKER_CONTAINER_ID', '') != '': - del(env['CM_DOCKER_CONTAINER_ID']) #not valid ID + del (env['CM_DOCKER_CONTAINER_ID']) # not valid ID - CMD = "docker images -q " + DOCKER_CONTAINER + CMD = "docker images -q " + DOCKER_CONTAINER if os_info['platform'] == 'windows': CMD += " 2> nul" else: CMD += " 2> /dev/null" - print ('') - print ('Checking Docker images:') - print ('') - print (' '+CMD) - print ('') + print('') + print('Checking Docker images:') + print('') + print(' ' + CMD) + print('') try: - docker_image = subprocess.check_output(CMD, shell=True).decode("utf-8") + docker_image = subprocess.check_output( + CMD, shell=True).decode("utf-8") except Exception as e: - return {'return':1, 'error':'Docker is either not installed or not started:\n{}'.format(e)} + return { + 'return': 1, 'error': 'Docker is either not installed or not started:\n{}'.format(e)} recreate_image = env.get('CM_DOCKER_IMAGE_RECREATE', '') @@ -97,8 +108,8 @@ def preprocess(i): # elif recreate_image == "yes": # env['CM_DOCKER_IMAGE_RECREATE'] = "no" + return {'return': 0} - return {'return':0} def postprocess(i): @@ -106,7 +117,6 @@ def postprocess(i): env = i['env'] - # Updating Docker info update_docker_info(env) @@ -120,16 +130,15 @@ def postprocess(i): port_map_cmds = [] run_opts = '' - #not completed as su command breaks the execution sequence + # not completed as su command breaks the execution sequence # - #if env.get('CM_DOCKER_PASS_USER_ID', '') != '': + # if env.get('CM_DOCKER_PASS_USER_ID', '') != '': # run_opts += " --user 0 " # run_cmds.append(f"(usermod -u {os.getuid()} cmuser || echo pass)") # run_cmds.append(f"(chown -R {os.getuid()}:{os.getuid()} /home/cmuser || echo pass)") # run_cmds.append(" ( su cmuser )") # run_cmds.append('export PATH="/home/cmuser/venv/cm/bin:$PATH"') - if env.get('CM_DOCKER_PRE_RUN_COMMANDS', []): for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: run_cmds.append(pre_run_cmd) @@ -142,7 +151,7 @@ def postprocess(i): run_opts += " --group-add $(id -g $USER) " if env.get('CM_DOCKER_ADD_DEVICE', '') != '': - run_opts += " --device="+env['CM_DOCKER_ADD_DEVICE'] + run_opts += " --device=" + env['CM_DOCKER_ADD_DEVICE'] if env.get('CM_DOCKER_PRIVILEGED_MODE', '') == 'yes': run_opts += " --privileged " @@ -162,7 +171,8 @@ def postprocess(i): for ports in env['CM_DOCKER_PORT_MAPS']: port_map_cmds.append(ports) - run_cmd = env['CM_DOCKER_RUN_CMD'] + " " +env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":","=") + run_cmd = env['CM_DOCKER_RUN_CMD'] + " " + \ + env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") run_cmds.append(run_cmd) if 'CM_DOCKER_POST_RUN_COMMANDS' in env: for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: @@ -172,18 +182,20 @@ def postprocess(i): run_cmd = run_cmd.replace("--docker_run_deps", "") if mount_cmds: - for i,mount_cmd in enumerate(mount_cmds): + for i, mount_cmd in enumerate(mount_cmds): # Since windows may have 2 :, we search from the right j = mount_cmd.rfind(':') - if j>0: - mount_parts = [mount_cmd[:j], mount_cmd[j+1:]] + if j > 0: + mount_parts = [mount_cmd[:j], mount_cmd[j + 1:]] else: - return {'return':1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount_cmd)} + return {'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format( + mount_cmd)} # mount_parts = mount_cmd.split(":") # if len(mount_parts) != 2: -# return {'return': 1, 'error': 'Invalid mount {} specified'.format(mount_parts)} +# return {'return': 1, 'error': 'Invalid mount {} +# specified'.format(mount_parts)} host_mount = mount_parts[0] @@ -192,7 +204,8 @@ def postprocess(i): abs_host_mount = os.path.abspath(mount_parts[0]) - if abs_host_mount != host_mount or " " in abs_host_mount and not host_mount.startswith('"'): + if abs_host_mount != host_mount or " " in abs_host_mount and not host_mount.startswith( + '"'): mount_cmds[i] = f"\"{abs_host_mount}\":{mount_parts[1]}" mount_cmd_string = " -v " + " -v ".join(mount_cmds) @@ -208,37 +221,51 @@ def postprocess(i): run_opts += port_map_cmd_string # Currently have problem running Docker in detached mode on Windows: - detached = str(env.get('CM_DOCKER_DETACHED_MODE','')).lower() in ['yes', 'true', "1"] + detached = str( + env.get( + 'CM_DOCKER_DETACHED_MODE', + '')).lower() in [ + 'yes', + 'true', + "1"] # if detached and os_info['platform'] != 'windows': if detached: if os_info['platform'] == 'windows': - return {'return':1, 'error':'Currently we don\'t support running Docker containers in detached mode on Windows - TBD'} + return { + 'return': 1, 'error': 'Currently we don\'t support running Docker containers in detached mode on Windows - TBD'} existing_container_id = env.get('CM_DOCKER_CONTAINER_ID', '') if existing_container_id: CMD = f"ID={existing_container_id} && docker exec $ID bash -c '" + run_cmd + "'" else: - CONTAINER=f"docker run -dt {run_opts} --rm {docker_image_repo}/{docker_image_name}:{docker_image_tag} bash" + CONTAINER = f"docker run -dt {run_opts} --rm {docker_image_repo}/{docker_image_name}:{docker_image_tag} bash" CMD = f"ID=`{CONTAINER}` && docker exec $ID bash -c '{run_cmd}'" - if False and str(env.get('CM_KEEP_DETACHED_CONTAINER', '')).lower() not in [ 'yes', "1", 'true' ]: - CMD += " && docker kill $ID >/dev/null" + if False and str(env.get('CM_KEEP_DETACHED_CONTAINER', '')).lower() not in [ + 'yes', "1", 'true']: + CMD += " && docker kill $ID >/dev/null" CMD += ' && echo "ID=$ID"' - print ('=========================') - print ("Container launch command:") - print ('') - print (CMD) - print ('') - print ("Running "+run_cmd+" inside docker container") + print('=========================') + print("Container launch command:") + print('') + print(CMD) + print('') + print("Running " + run_cmd + " inside docker container") - record_script({'cmd':CMD, 'env': env}) + record_script({'cmd': CMD, 'env': env}) - print ('') + print('') # Execute the command try: - result = subprocess.run(CMD, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + CMD, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) print("Command Output:", result.stdout) except subprocess.CalledProcessError as e: print("Error Occurred!") @@ -248,7 +275,7 @@ def postprocess(i): return {'return': 1, 'error': e.stderr} docker_out = result.stdout - #if docker_out != 0: + # if docker_out != 0: # return {'return': docker_out, 'error': 'docker run failed'} lines = docker_out.split("\n") @@ -273,23 +300,24 @@ def postprocess(i): x1 = '-it' x2 = " && bash ) || bash" + CONTAINER = "docker run " + x1 + " --entrypoint " + x + x + " " + run_opts + \ + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + CMD = CONTAINER + " bash -c " + x + run_cmd_prefix + run_cmd + x2 + x - CONTAINER="docker run " + x1 + " --entrypoint " + x + x + " " + run_opts + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag - CMD = CONTAINER + " bash -c " + x + run_cmd_prefix + run_cmd + x2 + x + print('') + print("Container launch command:") + print('') + print(CMD) - print ('') - print ("Container launch command:") - print ('') - print (CMD) + record_script({'cmd': CMD, 'env': env}) - record_script({'cmd':CMD, 'env': env}) - - print ('') + print('') docker_out = os.system(CMD) if docker_out != 0: return {'return': docker_out, 'error': 'docker run failed'} - return {'return':0} + return {'return': 0} + def record_script(i): @@ -308,14 +336,15 @@ def record_script(i): if save_script.endswith('.bat') or save_script.endswith('.sh'): files.append(save_script) else: - files.append(save_script+'.bat') - files.append(save_script+'.sh') + files.append(save_script + '.bat') + files.append(save_script + '.sh') for filename in files: - with open (filename, 'w') as f: + with open(filename, 'w') as f: f.write(cmd + '\n') - return {'return':0} + return {'return': 0} + def update_docker_info(env): @@ -326,7 +355,8 @@ def update_docker_info(env): docker_image_base = env.get('CM_DOCKER_IMAGE_BASE') if not docker_image_base: if env.get("CM_DOCKER_OS", '') != '': - docker_image_base = env["CM_DOCKER_OS"]+":"+env["CM_DOCKER_OS_VERSION"] + docker_image_base = env["CM_DOCKER_OS"] + \ + ":" + env["CM_DOCKER_OS_VERSION"] else: docker_image_base = "ubuntu:22.04" @@ -335,12 +365,15 @@ def update_docker_info(env): if env.get('CM_DOCKER_IMAGE_NAME', '') != '': docker_image_name = env['CM_DOCKER_IMAGE_NAME'] else: - docker_image_name = 'cm-script-'+env['CM_DOCKER_RUN_SCRIPT_TAGS'].replace(',', '-').replace('_','-').replace('+','plus') + docker_image_name = 'cm-script-' + \ + env['CM_DOCKER_RUN_SCRIPT_TAGS'].replace( + ',', '-').replace('_', '-').replace('+', 'plus') env['CM_DOCKER_IMAGE_NAME'] = docker_image_name docker_image_tag_extra = env.get('CM_DOCKER_IMAGE_TAG_EXTRA', '-latest') - docker_image_tag = env.get('CM_DOCKER_IMAGE_TAG', docker_image_base.replace(':','-').replace('_','').replace("/","-") + docker_image_tag_extra) + docker_image_tag = env.get('CM_DOCKER_IMAGE_TAG', docker_image_base.replace( + ':', '-').replace('_', '').replace("/", "-") + docker_image_tag_extra) env['CM_DOCKER_IMAGE_TAG'] = docker_image_tag return diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index d2559d8b6a..67f3493448 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -9,7 +9,9 @@ summary_ext = ['.csv', '.json', '.xlsx'] -################################################################################## +########################################################################## + + def preprocess(i): os_info = i['os_info'] @@ -21,9 +23,9 @@ def preprocess(i): script_path = i['run_script_input']['path'] if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": - return {'return':0} + return {'return': 0} - if env.get('CM_DOCKER_IMAGE_NAME', '') == 'scc24': + if env.get('CM_DOCKER_IMAGE_NAME', '') == 'scc24': if env.get("CM_MLPERF_IMPLEMENTATION", "reference") == "reference": env['CM_DOCKER_IMAGE_NAME'] = "scc24-reference" elif "nvidia" in env.get("CM_MLPERF_IMPLEMENTATION", "reference"): @@ -38,15 +40,15 @@ def preprocess(i): env['CM_MODEL'] = env['CM_MLPERF_MODEL'] # Clean MLPerf inference output tar file if non-standard - x=env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE','') - if x!='' and os.path.isfile(x): + x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') + if x != '' and os.path.isfile(x): os.remove(x) # Clean MLPerf inference submission summary files - x=env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') - if x!='': + x = env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') + if x != '': for y in summary_ext: - z = x+y + z = x + y if os.path.isfile(z): os.remove(z) @@ -59,7 +61,8 @@ def preprocess(i): system_meta['division'] = division if system_meta.get('division', '') != "closed": - env["CM_MLPERF_LOADGEN_COMPLIANCE"] = "no" #no compliance runs needed for open division + # no compliance runs needed for open division + env["CM_MLPERF_LOADGEN_COMPLIANCE"] = "no" clean = False @@ -70,17 +73,19 @@ def preprocess(i): if 'CM_RERUN' not in env: env['CM_RERUN'] = "yes" - if str(env.get('CM_SYSTEM_POWER','no')).lower() != "no" or env.get('CM_MLPERF_POWER', '') == "yes": + if str(env.get('CM_SYSTEM_POWER', 'no')).lower( + ) != "no" or env.get('CM_MLPERF_POWER', '') == "yes": power_variation = ",_power" env['CM_MLPERF_POWER'] = "yes" else: power_variation = "" - if env.get('CM_RUN_STYLE', '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: + if env.get('CM_RUN_STYLE', + '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: env['CM_RUN_MLPERF_ACCURACY'] = "on" - print("Using MLCommons Inference source from " + env['CM_MLPERF_INFERENCE_SOURCE']) - + print("Using MLCommons Inference source from " + + env['CM_MLPERF_INFERENCE_SOURCE']) if 'CM_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" @@ -94,44 +99,61 @@ def preprocess(i): env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" if env.get('CM_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": - env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios(env['CM_MODEL'], system_meta.get('system_type', 'edge'), env['CM_MLPERF_LAST_RELEASE'], env['CM_MLPERF_INFERENCE_SOURCE']) + env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios( + env['CM_MODEL'], + system_meta.get( + 'system_type', + 'edge'), + env['CM_MLPERF_LAST_RELEASE'], + env['CM_MLPERF_INFERENCE_SOURCE']) else: system_meta = {} - env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ env['CM_MLPERF_LOADGEN_SCENARIO'] ] + env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ + env['CM_MLPERF_LOADGEN_SCENARIO']] if env.get('CM_MLPERF_LOADGEN_ALL_MODES', '') == "yes": - env['CM_MLPERF_LOADGEN_MODES'] = [ "performance", "accuracy" ] + env['CM_MLPERF_LOADGEN_MODES'] = ["performance", "accuracy"] else: - env['CM_MLPERF_LOADGEN_MODES'] = [ env['CM_MLPERF_LOADGEN_MODE'] ] + env['CM_MLPERF_LOADGEN_MODES'] = [env['CM_MLPERF_LOADGEN_MODE']] if env.get('OUTPUT_BASE_DIR', '') == '': - env['OUTPUT_BASE_DIR'] = env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) - + env['OUTPUT_BASE_DIR'] = env.get( + 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) test_list = ["TEST01"] - if env['CM_MODEL'] in ["resnet50", "sdxl"]: + if env['CM_MODEL'] in ["resnet50", "sdxl"]: test_list.append("TEST04") if "gpt" in env['CM_MODEL'] or "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env['CM_MODEL']: test_list.remove("TEST01") - #test_list.remove("TEST05") + # test_list.remove("TEST05") - if "llama2" in env['CM_MODEL'].lower() or "mixtral-8x7b" in env['CM_MODEL']: + if "llama2" in env['CM_MODEL'].lower( + ) or "mixtral-8x7b" in env['CM_MODEL']: test_list.append("TEST06") - variation_implementation= "_" + env.get("CM_MLPERF_IMPLEMENTATION", "reference") - variation_model= ",_" + env["CM_MLPERF_MODEL"] - variation_backend= ",_" + env["CM_MLPERF_BACKEND"] if env.get("CM_MLPERF_BACKEND","") != "" else "" - variation_device= ",_" + env["CM_MLPERF_DEVICE"] if env.get("CM_MLPERF_DEVICE","") != "" else "" - variation_run_style= ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") - variation_reproducibility= ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get("CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS","") != "" else "" - variation_all_models= ",_all-models" if env.get("CM_MLPERF_ALL_MODELS","") == "yes" else "" + variation_implementation = "_" + \ + env.get("CM_MLPERF_IMPLEMENTATION", "reference") + variation_model = ",_" + env["CM_MLPERF_MODEL"] + variation_backend = ",_" + \ + env["CM_MLPERF_BACKEND"] if env.get( + "CM_MLPERF_BACKEND", "") != "" else "" + variation_device = ",_" + \ + env["CM_MLPERF_DEVICE"] if env.get( + "CM_MLPERF_DEVICE", "") != "" else "" + variation_run_style = ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") + variation_reproducibility = ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get( + "CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS", "") != "" else "" + variation_all_models = ",_all-models" if env.get( + "CM_MLPERF_ALL_MODELS", "") == "yes" else "" if env.get("CM_MLPERF_MODEL_PRECISION", '') != '': - variation_quantization_string= ",_" + env["CM_MLPERF_MODEL_PRECISION"] + variation_quantization_string = ",_" + env["CM_MLPERF_MODEL_PRECISION"] else: variation_quantization_string = "" - tags = "app,mlperf,inference,generic,"+variation_implementation+variation_model+variation_backend+variation_device+variation_run_style+variation_reproducibility+variation_quantization_string+power_variation+variation_all_models + tags = "app,mlperf,inference,generic," + variation_implementation + variation_model + variation_backend + variation_device + \ + variation_run_style + variation_reproducibility + \ + variation_quantization_string + power_variation + variation_all_models verbose = inp.get('v', False) print_env = inp.get('print_env', False) print_deps = inp.get('print_deps', False) @@ -149,52 +171,60 @@ def preprocess(i): if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} - if add_deps_recursive['mlperf-inference-implementation'].get('tags', '') == '': + if add_deps_recursive['mlperf-inference-implementation'].get( + 'tags', '') == '': add_deps_recursive['mlperf-inference-implementation']['tags'] = '' else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size."+env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'] + add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size." + \ + env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'] if env.get('CM_MLPERF_INFERENCE_SUT_VARIATION', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} - if add_deps_recursive['mlperf-inference-implementation'].get('tags', '') == '': + if add_deps_recursive['mlperf-inference-implementation'].get( + 'tags', '') == '': add_deps_recursive['mlperf-inference-implementation']['tags'] = '' else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' - add_deps_recursive['mlperf-inference-implementation']['tags'] += "_"+env['CM_MLPERF_INFERENCE_SUT_VARIATION'] + add_deps_recursive['mlperf-inference-implementation']['tags'] += "_" + \ + env['CM_MLPERF_INFERENCE_SUT_VARIATION'] if env.get('CM_NETWORK_LOADGEN', '') != '': if not add_deps_recursive.get('mlperf-inference-implementation', {}): add_deps_recursive['mlperf-inference-implementation'] = {} network_variation_tag = f"_network-{env['CM_NETWORK_LOADGEN']}" - if add_deps_recursive['mlperf-inference-implementation'].get('tags', '') == '': + if add_deps_recursive['mlperf-inference-implementation'].get( + 'tags', '') == '': add_deps_recursive['mlperf-inference-implementation']['tags'] = '' else: add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag if env.get('CM_OUTPUT_FOLDER_NAME', '') == '': - env['CM_OUTPUT_FOLDER_NAME'] = env['CM_MLPERF_RUN_STYLE'] + "_results" + env['CM_OUTPUT_FOLDER_NAME'] = env['CM_MLPERF_RUN_STYLE'] + "_results" - output_dir = os.path.join(env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) + output_dir = os.path.join( + env['OUTPUT_BASE_DIR'], + env['CM_OUTPUT_FOLDER_NAME']) if clean: path_to_clean = output_dir - print ('=========================================================') - print ('Cleaning results in {}'.format(path_to_clean)) + print('=========================================================') + print('Cleaning results in {}'.format(path_to_clean)) if os.path.exists(path_to_clean): shutil.rmtree(path_to_clean) - print ('=========================================================') + print('=========================================================') - if str(env.get('CM_MLPERF_USE_DOCKER', '')).lower() in [ "1", "true", "yes"]: + if str(env.get('CM_MLPERF_USE_DOCKER', '') + ).lower() in ["1", "true", "yes"]: action = "docker" - #del(env['OUTPUT_BASE_DIR']) + # del(env['OUTPUT_BASE_DIR']) state = {} docker_extra_input = {} - #if env.get('CM_HW_NAME'): + # if env.get('CM_HW_NAME'): # del(env['CM_HW_NAME']) for k in inp: @@ -202,7 +232,8 @@ def preprocess(i): docker_extra_input[k] = inp[k] inp = {} if str(docker_dt).lower() in ["yes", "true", "1"]: - env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'no' # turning it off for the first run and after that we turn it on + # turning it off for the first run and after that we turn it on + env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'no' env['CM_DOCKER_DETACHED_MODE'] = 'yes' if env.get('CM_DOCKER_IMAGE_NAME', '') != '': @@ -210,10 +241,10 @@ def preprocess(i): else: action = "run" - #local_keys = [ 'CM_MLPERF_SKIP_RUN', 'CM_MLPERF_LOADGEN_QUERY_COUNT', 'CM_MLPERF_LOADGEN_TARGET_QPS', 'CM_MLPERF_LOADGEN_TARGET_LATENCY' ] + # local_keys = [ 'CM_MLPERF_SKIP_RUN', 'CM_MLPERF_LOADGEN_QUERY_COUNT', 'CM_MLPERF_LOADGEN_TARGET_QPS', 'CM_MLPERF_LOADGEN_TARGET_LATENCY' ] for scenario in env['CM_MLPERF_LOADGEN_SCENARIOS']: - scenario_tags = tags + ",_"+scenario.lower() + scenario_tags = tags + ",_" + scenario.lower() env['CM_MLPERF_LOADGEN_SCENARIO'] = scenario if scenario == "Offline": @@ -235,9 +266,9 @@ def preprocess(i): env_copy = copy.deepcopy(env) const_copy = copy.deepcopy(const) print(f"\nRunning loadgen scenario: {scenario} and mode: {mode}") - ii = {'action':action, 'automation':'script', 'tags': scenario_tags, 'quiet': 'true', + ii = {'action': action, 'automation': 'script', 'tags': scenario_tags, 'quiet': 'true', 'env': env_copy, 'const': const_copy, 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': - copy.deepcopy(add_deps_recursive), 'ad': ad, 'adr': copy.deepcopy(adr), 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} + copy.deepcopy(add_deps_recursive), 'ad': ad, 'adr': copy.deepcopy(adr), 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} if action == "docker": for k in docker_extra_input: @@ -250,26 +281,29 @@ def preprocess(i): if env_copy.get('CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR', '') != '': env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = env_copy['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] else: - env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = os.path.join(env['OUTPUT_BASE_DIR'], f"{env['CM_MLPERF_RUN_STYLE']}_results") + env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = os.path.join( + env['OUTPUT_BASE_DIR'], f"{env['CM_MLPERF_RUN_STYLE']}_results") if action == "docker": if str(docker_dt).lower() not in ["yes", "true", "1"]: - print(f"\nStop Running loadgen scenario: {scenario} and mode: {mode}") - return {'return': 0} # We run commands interactively inside the docker container + print( + f"\nStop Running loadgen scenario: {scenario} and mode: {mode}") + # We run commands interactively inside the docker container + return {'return': 0} else: env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'yes' container_id = env_copy['CM_DOCKER_CONTAINER_ID'] env['CM_DOCKER_CONTAINER_ID'] = container_id if state.get('docker', {}): - del(state['docker']) + del (state['docker']) if env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": for test in test_list: env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test env['CM_MLPERF_LOADGEN_MODE'] = "compliance" - ii = {'action':action, 'automation':'script', 'tags': scenario_tags, 'quiet': 'true', + ii = {'action': action, 'automation': 'script', 'tags': scenario_tags, 'quiet': 'true', 'env': copy.deepcopy(env), 'const': copy.deepcopy(const), 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': - copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} + copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} if action == "docker": for k in docker_extra_input: ii[k] = docker_extra_input[k] @@ -277,26 +311,31 @@ def preprocess(i): if r['return'] > 0: return r if state.get('docker', {}): - del(state['docker']) + del (state['docker']) - if env.get('CM_DOCKER_CONTAINER_ID', '') != '' and str(env.get('CM_DOCKER_CONTAINER_KEEP_ALIVE', '')).lower() not in ["yes", "1", "true"]: + if env.get('CM_DOCKER_CONTAINER_ID', '') != '' and str(env.get( + 'CM_DOCKER_CONTAINER_KEEP_ALIVE', '')).lower() not in ["yes", "1", "true"]: container_id = env['CM_DOCKER_CONTAINER_ID'] CMD = f"docker kill {container_id}" docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8") if state.get("cm-mlperf-inference-results"): - #print(state["cm-mlperf-inference-results"]) - for sut in state["cm-mlperf-inference-results"]:#only one sut will be there - # Better to do this in a stand alone CM script with proper deps but currently we manage this by modifying the sys path of the python executing CM + # print(state["cm-mlperf-inference-results"]) + for sut in state["cm-mlperf-inference-results"]: # only one sut will be there + # Better to do this in a stand alone CM script with proper deps but + # currently we manage this by modifying the sys path of the python + # executing CM import mlperf_utils print(sut) - result_table, headers = mlperf_utils.get_result_table(state["cm-mlperf-inference-results"][sut]) - print(tabulate(result_table, headers = headers, tablefmt="pretty")) + result_table, headers = mlperf_utils.get_result_table( + state["cm-mlperf-inference-results"][sut]) + print(tabulate(result_table, headers=headers, tablefmt="pretty")) - print(f"\nThe MLPerf inference results are stored at {output_dir}\n") + print( + f"\nThe MLPerf inference results are stored at {output_dir}\n") - return {'return':0} + return {'return': 0} def get_valid_scenarios(model, category, mlperf_version, mlperf_path): @@ -306,9 +345,10 @@ def get_valid_scenarios(model, category, mlperf_version, mlperf_path): submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") sys.path.append(submission_checker_dir) - if not os.path.exists(os.path.join(submission_checker_dir, "submission_checker.py")): - shutil.copy(os.path.join(submission_checker_dir,"submission-checker.py"), os.path.join(submission_checker_dir, - "submission_checker.py")) + if not os.path.exists(os.path.join( + submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir, "submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) import submission_checker as checker @@ -319,64 +359,84 @@ def get_valid_scenarios(model, category, mlperf_version, mlperf_path): config = checker.MODEL_CONFIG - internal_model_name = config[mlperf_version]["model_mapping"].get(model, model) + internal_model_name = config[mlperf_version]["model_mapping"].get( + model, model) - valid_scenarios = config[mlperf_version]["required-scenarios-"+category.replace(",", "-")][internal_model_name] + valid_scenarios = config[mlperf_version]["required-scenarios-" + + category.replace(",", "-")][internal_model_name] - print("Valid Scenarios for " + model + " in " + category + " category are :" + str(valid_scenarios)) + print( + "Valid Scenarios for " + + model + + " in " + + category + + " category are :" + + str(valid_scenarios)) return valid_scenarios -################################################################################## +########################################################################## + + def postprocess(i): env = i['env'] state = i['state'] if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'reference': - x1 = env.get('CM_MLPERF_INFERENCE_SOURCE','') - x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH','') + x1 = env.get('CM_MLPERF_INFERENCE_SOURCE', '') + x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH', '') if x1 != '' and x2 != '': - print ('') - print ('Path to the MLPerf inference benchmark reference sources: {}'.format(x1)) - print ('Path to the MLPerf inference reference configuration file: {}'.format(x2)) - print ('') + print('') + print( + 'Path to the MLPerf inference benchmark reference sources: {}'.format(x1)) + print( + 'Path to the MLPerf inference reference configuration file: {}'.format(x2)) + print('') + + return {'return': 0} + +########################################################################## - return {'return':0} -################################################################################## def load_md(path, path2, name): - fn = os.path.join(path, path2, name+'.md') + fn = os.path.join(path, path2, name + '.md') s = '' if os.path.isfile(fn): r = utils.load_txt(fn) - if r['return']>0: return r + if r['return'] > 0: + return r s = r['string'] - return {'return':0, 'string':s} + return {'return': 0, 'string': s} + +########################################################################## + -################################################################################## def get_url(url, path, path2, name, text): - name_md = name+'.md' + name_md = name + '.md' fn = os.path.join(path, path2, name_md) urlx = '' url_online = '' if os.path.isfile(fn): - if not url.endswith('/'): url+='/' + if not url.endswith('/'): + url += '/' urlx = url + path2 + '/' + name_md url_online = '[{}]({})'.format(text, urlx) - return {'return':0, 'url_online':url_online} + return {'return': 0, 'url_online': url_online} + +########################################################################## + -################################################################################## def gui(i): params = i['params'] @@ -387,15 +447,15 @@ def gui(i): misc = i['misc_module'] script_path = i['script_path'] - script_url = i.get('script_url','') + script_url = i.get('script_url', '') script_tags = i.get('script_tags', '') - compute_meta = i.get('compute_meta',{}) + compute_meta = i.get('compute_meta', {}) compute_tags = compute_meta.get('tags', []) - bench_meta = i.get('bench_meta',{}) + bench_meta = i.get('bench_meta', {}) - compute_uid = compute_meta.get('uid','') - bench_uid = bench_meta.get('uid','') + compute_uid = compute_meta.get('uid', '') + bench_uid = bench_meta.get('uid', '') st_inputs_custom = {} @@ -410,120 +470,145 @@ def gui(i): # Here we can update params v = compute_meta.get('mlperf_inference_device') - if v!=None and v!='': + if v is not None and v != '': inp['device']['force'] = v if v in ['tpu', 'gaudi']: st.markdown('----') - st.markdown('**WARNING: unified CM workflow support for this hardware is pending - please [feel free to help](https://discord.gg/JjWNWXKxwT)!**') - return {'return':0, 'skip': True, 'end_html':end_html} + st.markdown( + '**WARNING: unified CM workflow support for this hardware is pending - please [feel free to help](https://discord.gg/JjWNWXKxwT)!**') + return {'return': 0, 'skip': True, 'end_html': end_html} elif 'orin' in compute_tags: st.markdown('----') - st.markdown('**WARNING: we need to encode CM knowledge from [this Orin setp](https://github.com/mlcommons/ck/blob/master/docs/mlperf/setup/setup-nvidia-jetson-orin.md) to this GUI!**') - return {'return':0, 'skip': True, 'end_html':end_html} + st.markdown( + '**WARNING: we need to encode CM knowledge from [this Orin setp](https://github.com/mlcommons/ck/blob/master/docs/mlperf/setup/setup-nvidia-jetson-orin.md) to this GUI!**') + return {'return': 0, 'skip': True, 'end_html': end_html} st.markdown('---') st.markdown('**How would you like to run the MLPerf inference benchmark?**') - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_device', 'desc':inp['device']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_device', + 'desc': inp['device']}) device = r.get('value2') inp['device']['force'] = device - - if device == 'cpu': - inp['implementation']['choices']=['mlcommons-python', 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite'] + inp['implementation']['choices'] = ['mlcommons-python', + 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite'] if 'intel' in compute_tags: - inp['implementation']['default']='intel' + inp['implementation']['default'] = 'intel' else: - inp['implementation']['default']='mlcommons-python' - inp['backend']['choices']=['onnxruntime','deepsparse','pytorch','tf','tvm-onnx'] - inp['backend']['default']='onnxruntime' + inp['implementation']['default'] = 'mlcommons-python' + inp['backend']['choices'] = [ + 'onnxruntime', 'deepsparse', 'pytorch', 'tf', 'tvm-onnx'] + inp['backend']['default'] = 'onnxruntime' elif device == 'rocm': - inp['implementation']['force']='mlcommons-python' - inp['precision']['force']='' - inp['backend']['force']='onnxruntime' - st.markdown('*WARNING: CM-MLPerf inference workflow was not tested thoroughly for AMD GPU - please feel free to test and improve!*') + inp['implementation']['force'] = 'mlcommons-python' + inp['precision']['force'] = '' + inp['backend']['force'] = 'onnxruntime' + st.markdown( + '*WARNING: CM-MLPerf inference workflow was not tested thoroughly for AMD GPU - please feel free to test and improve!*') elif device == 'qaic': - inp['implementation']['force']='qualcomm' - inp['precision']['force']='' - inp['backend']['force']='glow' - - - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_division', 'desc':inp['division']}) + inp['implementation']['force'] = 'qualcomm' + inp['precision']['force'] = '' + inp['backend']['force'] = 'glow' + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_division', + 'desc': inp['division']}) division = r.get('value2') inp['division']['force'] = division - y = 'compliance' - if division=='closed': + if division == 'closed': inp[y]['default'] = 'yes' - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_compliance', 'desc':inp[y]}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_compliance', + 'desc': inp[y]}) compliance = r.get('value2') inp[y]['force'] = compliance if compliance == 'yes': - st.markdown('*:red[See [online table with required compliance tests](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#5132-inference)].*') + st.markdown( + '*:red[See [online table with required compliance tests](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#5132-inference)].*') else: inp[y]['force'] = 'no' - - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_category', 'desc':inp['category']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_category', + 'desc': inp['category']}) category = r.get('value2') inp['category']['force'] = category - - - - ############################################################################# + ########################################################################## # Implementation v = bench_input.get('mlperf_inference_implementation') - if v!=None and v!='': + if v is not None and v != '': inp['implementation']['force'] = v else: if device == 'cuda': - inp['implementation']['choices']=['nvidia','mlcommons-python','mlcommons-cpp'] - inp['implementation']['default']='nvidia' - inp['backend']['choices']=['tensorrt','onnxruntime','pytorch'] - inp['backend']['default']='tensorrt' - - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_implementation', 'desc':inp['implementation']}) + inp['implementation']['choices'] = [ + 'nvidia', 'mlcommons-python', 'mlcommons-cpp'] + inp['implementation']['default'] = 'nvidia' + inp['backend']['choices'] = ['tensorrt', 'onnxruntime', 'pytorch'] + inp['backend']['default'] = 'tensorrt' + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_implementation', + 'desc': inp['implementation']}) implementation = r.get('value2') inp['implementation']['force'] = implementation implementation_setup = '' - r = load_md(script_path, 'setup', 'i-'+implementation) - if r['return'] == 0: implementation_setup = r['string'] + r = load_md(script_path, 'setup', 'i-' + implementation) + if r['return'] == 0: + implementation_setup = r['string'] url_faq_implementation = '' r = get_url(script_url, script_path, 'faq', implementation, 'FAQ online') - if r['return'] == 0: url_faq_implementation = r['url_online'] + if r['return'] == 0: + url_faq_implementation = r['url_online'] can_have_docker_flag = False if implementation == 'mlcommons-cpp': -# inp['backend']['choices'] = ['onnxruntime'] - inp['precision']['force']='float32' + # inp['backend']['choices'] = ['onnxruntime'] + inp['precision']['force'] = 'float32' inp['backend']['force'] = 'onnxruntime' inp['model']['choices'] = ['resnet50', 'retinanet'] - st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp)]*') + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp)]*') elif implementation == 'mlcommons-python': - inp['precision']['force']='float32' + inp['precision']['force'] = 'float32' if device == 'cuda': - inp['backend']['choices']=['onnxruntime','pytorch','tf'] + inp['backend']['choices'] = ['onnxruntime', 'pytorch', 'tf'] inp['backend']['default'] = 'onnxruntime' - st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python)]*') + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python)]*') elif implementation == 'ctuning-cpp-tflite': - inp['precision']['force']='float32' - inp['model']['force']='resnet50' - st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite)]*') + inp['precision']['force'] = 'float32' + inp['model']['force'] = 'resnet50' + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite)]*') elif implementation == 'nvidia': inp['backend']['force'] = 'tensorrt' extra['skip_script_docker_func'] = True can_have_docker_flag = True - st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia)]*') + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia)]*') elif implementation == 'intel': inp['model']['choices'] = ['bert-99', 'gptj-99'] inp['model']['default'] = 'bert-99' @@ -535,38 +620,48 @@ def gui(i): can_have_docker_flag = True extra['skip_script_docker_func'] = True # st.markdown('*:red[Note: Intel implementation require extra CM command to build and run Docker container - you will run CM commands to run MLPerf benchmarks there!]*') - st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-intel)]*') + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-intel)]*') elif implementation == 'qualcomm': inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99'] inp['model']['default'] = 'bert-99' inp['precision']['default'] = 'float16' extra['skip_script_docker_func'] = True - st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-qualcomm)]*') - + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-qualcomm)]*') - ############################################################################# + ########################################################################## # Backend - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_backend', 'desc':inp['backend']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_backend', + 'desc': inp['backend']}) backend = r.get('value2') inp['backend']['force'] = backend backend_setup = '' - r = load_md(script_path, 'setup', 'b-'+backend) - if r['return'] == 0: backend_setup = r['string'] + r = load_md(script_path, 'setup', 'b-' + backend) + if r['return'] == 0: + backend_setup = r['string'] if backend == 'deepsparse': - inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99', 'bert-99.9'] + inp['model']['choices'] = [ + 'resnet50', 'retinanet', 'bert-99', 'bert-99.9'] inp['model']['default'] = 'bert-99' inp['precision']['choices'] = ['float32', 'int8'] inp['precision']['default'] = 'int8' - if 'force' in inp['precision']: del(inp['precision']['force']) + if 'force' in inp['precision']: + del (inp['precision']['force']) - - - ############################################################################# + ########################################################################## # Model - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_model', 'desc':inp['model']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_model', + 'desc': inp['model']}) model = r.get('value2') inp['model']['force'] = model @@ -575,8 +670,9 @@ def gui(i): if model == 'retinanet': x = '50' if implementation == 'mlcommons-python': - x= '200' - st.markdown(':red[This model requires ~{}GB of free disk space for preprocessed dataset in a full/submission run!]\n'.format(x)) + x = '200' + st.markdown( + ':red[This model requires ~{}GB of free disk space for preprocessed dataset in a full/submission run!]\n'.format(x)) elif model.startswith('bert-'): github_doc_model = 'bert' @@ -602,14 +698,17 @@ def gui(i): elif model.startswith('mixtral-'): github_doc_model = 'mixtral-8x7b' - if github_doc_model == '': github_doc_model = model + if github_doc_model == '': + github_doc_model = model - model_cm_url='https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format(github_doc_model) + model_cm_url = 'https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format( + github_doc_model) extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url) - st.markdown('*[CM-MLPerf GitHub docs for this model]({})*'.format(model_cm_url)) + st.markdown( + '*[CM-MLPerf GitHub docs for this model]({})*'.format(model_cm_url)) - ############################################################################# + ########################################################################## # Precision if implementation == 'intel': if model == 'bert-99': @@ -624,53 +723,83 @@ def gui(i): elif model == 'bert-99': inp['precision']['print'] = 'int8/float16' - if inp['precision'].get('force','')=='': - x = inp['precision'].get('print','') - if x!='': + if inp['precision'].get('force', '') == '': + x = inp['precision'].get('print', '') + if x != '': st.markdown('**{}**: {}'.format(inp['precision']['desc'], x)) else: - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_precision', 'desc':inp['precision']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_precision', + 'desc': inp['precision']}) precision = r.get('value2') inp['precision']['force'] = precision - ############################################################################# + ########################################################################## # Benchmark version script_meta_variations = script_meta['variations'] - choices = [''] + [k for k in script_meta_variations if script_meta_variations[k].get('group','') == 'benchmark-version'] - desc = {'choices': choices, 'default':choices[0], 'desc':'Force specific benchmark version?'} - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_version', 'desc':desc}) + choices = [''] + [ + k for k in script_meta_variations if script_meta_variations[k].get( + 'group', '') == 'benchmark-version'] + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Force specific benchmark version?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_version', + 'desc': desc}) benchmark_version = r.get('value2') - if benchmark_version!='': - params['~~benchmark-version']=[benchmark_version] + if benchmark_version != '': + params['~~benchmark-version'] = [benchmark_version] - ############################################################################# + ########################################################################## # Run via Docker container if can_have_docker_flag: default_choice = 'yes - run in container' choices = [default_choice, 'no - run natively'] - desc = {'choices': choices, 'default':choices[0], 'desc':'Should CM script prepare and run Docker container in interactive mode to run MLPerf? You can then copy/paste CM commands generated by this GUI to benchmark different models.'} - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_docker', 'desc':desc}) + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Should CM script prepare and run Docker container in interactive mode to run MLPerf? You can then copy/paste CM commands generated by this GUI to benchmark different models.'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_docker', + 'desc': desc}) benchmark_docker = r.get('value2') if benchmark_docker == 'yes - run in container': - add_to_st_inputs['@docker']=True - add_to_st_inputs['@docker_cache']='no' + add_to_st_inputs['@docker'] = True + add_to_st_inputs['@docker_cache'] = 'no' - ############################################################################# + ########################################################################## # Prepare submission st.markdown('---') - submission = st.toggle('Would you like to prepare official submission?', value = False) + submission = st.toggle( + 'Would you like to prepare official submission?', + value=False) if submission: - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_hw_name', 'desc':inp['hw_name']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_hw_name', + 'desc': inp['hw_name']}) inp['hw_name']['force'] = r.get('value2') - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_submitter', 'desc':inp['submitter']}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_submitter', + 'desc': inp['submitter']}) submitter = r.get('value2') inp['submitter']['force'] = submitter @@ -680,7 +809,7 @@ def gui(i): inp['clean']['default'] = False inp['repro']['force'] = True - x = '*:red[Use the following command to find local directory with the submission tree and results:]*\n```bash\ncm find cache --tags=submission,dir\n```\n' + x = '*:red[Use the following command to find local directory with the submission tree and results:]*\n```bash\ncm find cache --tags=submission,dir\n```\n' x += '*:red[You will also find results in `mlperf-inference-submission.tar.gz` file that you can submit to MLPerf!]*\n\n' @@ -691,13 +820,24 @@ def gui(i): st.markdown('---') else: - inp['submitter']['force']='' - inp['clean']['default']=True - params['~submission']=['false'] - - choices = ['Performance', 'Accuracy', 'Find Performance from a short run', 'Performance and Accuracy'] - desc = {'choices': choices, 'default':choices[0], 'desc':'What to measure?'} - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_measure', 'desc':desc}) + inp['submitter']['force'] = '' + inp['clean']['default'] = True + params['~submission'] = ['false'] + + choices = [ + 'Performance', + 'Accuracy', + 'Find Performance from a short run', + 'Performance and Accuracy'] + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'What to measure?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_measure', + 'desc': desc}) measure = r.get('value2') x = '' @@ -710,29 +850,31 @@ def gui(i): elif measure == 'Performance and Accuracy': x = 'submission' - params['~~submission-generation']=[x] + params['~~submission-generation'] = [x] - - ############################################################################# + ####################################################################### # Prepare scenario xall = 'All applicable' choices = ['Offline', 'Server', 'SingleStream', 'MultiStream', xall] - desc = {'choices':choices, 'default':choices[0], 'desc':'Which scenario(s)?'} - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_scenario', 'desc':desc}) + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Which scenario(s)?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_scenario', + 'desc': desc}) scenario = r.get('value2') - if scenario == xall: - params['~all-scenarios']=['true'] - inp['scenario']['force']='' + params['~all-scenarios'] = ['true'] + inp['scenario']['force'] = '' else: - inp['scenario']['force']=scenario + inp['scenario']['force'] = scenario - - - - ############################################################################# + ########################################################################## # Short or full run x = ['Full run', 'Short run'] @@ -741,113 +883,137 @@ def gui(i): else: choices = [x[1], x[0]] - desc = {'choices':choices, 'default':choices[0], 'desc':'Short (test) or full (valid) run?'} - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_how', 'desc':desc}) + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Short (test) or full (valid) run?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_how', + 'desc': desc}) how = r.get('value2') if how == x[0]: - params['~~submission-generation-style']=['full'] + params['~~submission-generation-style'] = ['full'] inp['execution_mode']['force'] = 'valid' else: - params['~~submission-generation-style']=['short'] + params['~~submission-generation-style'] = ['short'] inp['execution_mode']['force'] = 'test' - - - ############################################################################# + ########################################################################## # Power # desc = {'boolean':True, 'default':False, 'desc':'Measure power?'} # r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power', 'desc':desc}) # power = r.get('value2', False) - power = st.toggle('Measure power consumption?', value = False) + power = st.toggle('Measure power consumption?', value=False) if power: inp['power']['force'] = 'yes' y = 'adr.mlperf-power-client.power_server' - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_server', 'desc':inp[y]}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_server', + 'desc': inp[y]}) inp[y]['force'] = r.get('value2') y = 'adr.mlperf-power-client.port' - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_port', 'desc':inp[y]}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_port', + 'desc': inp[y]}) inp[y]['force'] = r.get('value2') - st.markdown('*:red[See [online notes](https://github.com/mlcommons/ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)] to setup power meter and server.*') + st.markdown( + '*:red[See [online notes](https://github.com/mlcommons/ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)] to setup power meter and server.*') else: inp['power']['force'] = 'no' - inp['adr.mlperf-power-client.power_server']['force']='' - inp['adr.mlperf-power-client.port']['force']='' + inp['adr.mlperf-power-client.power_server']['force'] = '' + inp['adr.mlperf-power-client.port']['force'] = '' - - ############################################################################# + ########################################################################## # Dashboard # desc = {'boolean':True, 'default':False, 'desc':'Output results to W&B dashboard?'} # r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_dashboard', 'desc':desc}) # dashboard = r.get('value2', False) - dashboard = st.toggle('Output results to W&B dashboard?', value = False) + dashboard = st.toggle('Output results to W&B dashboard?', value=False) if dashboard: - params['~dashboard']=['true'] + params['~dashboard'] = ['true'] y = 'dashboard_wb_project' - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_wb_project', 'desc':inp[y]}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_wb_project', + 'desc': inp[y]}) inp[y]['force'] = r.get('value2') y = 'dashboard_wb_user' - r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power_wb_user', 'desc':inp[y]}) + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_wb_user', + 'desc': inp[y]}) inp[y]['force'] = r.get('value2') else: - params['~dashboard']=['false'] - inp['dashboard_wb_project']['force']='' - inp['dashboard_wb_user']['force']='' - - - + params['~dashboard'] = ['false'] + inp['dashboard_wb_project']['force'] = '' + inp['dashboard_wb_user']['force'] = '' # Hide customization by default params['hide_script_customization'] = True x = implementation_setup - if backend_setup!='': - if x != '': x+='\n\n' - x+=backend_setup + if backend_setup != '': + if x != '': + x += '\n\n' + x += backend_setup extra['extra_notes_online'] = extra_notes_online extra['extra_faq_online'] = url_faq_implementation extra['extra_setup'] = x - ############################################################################# - value_reproduce = inp.get('repro',{}).get('force', False) - reproduce = st.toggle('Record extra info for reproducibility?', value = value_reproduce) + ########################################################################## + value_reproduce = inp.get('repro', {}).get('force', False) + reproduce = st.toggle( + 'Record extra info for reproducibility?', + value=value_reproduce) - explore = st.toggle('Explore/tune benchmark (batch size, threads, etc)?', value = False) + explore = st.toggle( + 'Explore/tune benchmark (batch size, threads, etc)?', + value=False) if reproduce or explore: add_to_st_inputs.update({ - "@repro_extra.run-mlperf-inference-app.bench_uid": bench_uid, - "@repro_extra.run-mlperf-inference-app.compute_uid": compute_uid, - '@results_dir':'{{CM_EXPERIMENT_PATH3}}', - '@submission_dir':'{{CM_EXPERIMENT_PATH3}}' + "@repro_extra.run-mlperf-inference-app.bench_uid": bench_uid, + "@repro_extra.run-mlperf-inference-app.compute_uid": compute_uid, + '@results_dir': '{{CM_EXPERIMENT_PATH3}}', + '@submission_dir': '{{CM_EXPERIMENT_PATH3}}' }) inp['repro']['force'] = True extra['use_experiment'] = True if explore: - add_to_st_inputs['@batch_size']='{{CM_EXPLORE_BATCH_SIZE{[1,2,4,8]}}}' + add_to_st_inputs['@batch_size'] = '{{CM_EXPLORE_BATCH_SIZE{[1,2,4,8]}}}' - ############################################################################# - debug = st.toggle('Debug and run MLPerf benchmark natively from command line after CM auto-generates CMD?', value=False) + ########################################################################## + debug = st.toggle( + 'Debug and run MLPerf benchmark natively from command line after CM auto-generates CMD?', + value=False) if debug: inp['debug']['force'] = True - extra['add_to_st_inputs'] = add_to_st_inputs - return {'return':0, 'end_html':end_html, 'extra':extra} + return {'return': 0, 'end_html': end_html, 'extra': extra} diff --git a/script/run-mlperf-inference-app/run_mobilenet.py b/script/run-mlperf-inference-app/run_mobilenet.py index abad10a78b..63ad8986e0 100644 --- a/script/run-mlperf-inference-app/run_mobilenet.py +++ b/script/run-mlperf-inference-app/run_mobilenet.py @@ -3,31 +3,31 @@ import sys models = { - "mobilenet": { - "v1": { - "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25" ], - "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], - "kind": [""] - }, - "v2": { - "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35" ], - "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], - "kind": [""] - }, - "v3": { - "multiplier": [""], - "resolution": [""], - "kind": [ "large", "large-minimalistic", "small", "small-minimalistic" ] - } - }, - "efficientnet": { - "": { - "multiplier": [""], - "resolution": [""], - "kind": [ "lite0", "lite1", "lite2", "lite3", "lite4" ] - } - } + "mobilenet": { + "v1": { + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], + "kind": [""] + }, + "v2": { + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], + "kind": [""] + }, + "v3": { + "multiplier": [""], + "resolution": [""], + "kind": ["large", "large-minimalistic", "small", "small-minimalistic"] } + }, + "efficientnet": { + "": { + "multiplier": [""], + "resolution": [""], + "kind": ["lite0", "lite1", "lite2", "lite3", "lite4"] + } + } +} variation_strings = {} for t1 in models: variation_strings[t1] = [] @@ -36,68 +36,68 @@ for version in models[t1]: variation_list = [] if version.strip(): - variation_list.append("_"+version) + variation_list.append("_" + version) variation_list_saved = variation_list.copy() for k1 in models[t1][version]["multiplier"]: variation_list = variation_list_saved.copy() if k1.strip(): - variation_list.append("_"+k1) + variation_list.append("_" + k1) variation_list_saved_2 = variation_list.copy() for k2 in models[t1][version]["resolution"]: variation_list = variation_list_saved_2.copy() if k2.strip(): - variation_list.append("_"+k2) + variation_list.append("_" + k2) variation_list_saved_3 = variation_list.copy() for k3 in models[t1][version]["kind"]: variation_list = variation_list_saved_3.copy() if k3.strip(): - variation_list.append("_"+k3) + variation_list.append("_" + k3) variation_strings[t1].append(",".join(variation_list)) args = sys.argv -opt=None +opt = None if len(args) > 1: opt = args[1] -if opt=="submission": - var="_submission" - execution_mode="valid" +if opt == "submission": + var = "_submission" + execution_mode = "valid" else: - var="_find-performance" - execution_mode="test" + var = "_find-performance" + execution_mode = "test" -precisions = [ "fp32", "uint8" ] +precisions = ["fp32", "uint8"] for model in variation_strings: for v in variation_strings[model]: for precision in precisions: if "small-minimalistic" in v and precision == "uint8": - continue; + continue if model == "efficientnet" and precision == "uint8": precision = "int8" cm_input = { - 'action': 'run', - 'automation': 'script', - 'tags': f'generate-run-cmds,mlperf,inference,{var}', - 'quiet': True, - 'implementation': 'tflite-cpp', - 'precision': precision, - 'model': model, - 'scenario': 'SingleStream', - 'execution_mode': execution_mode, - 'test_query_count': '50', - 'adr': { + 'action': 'run', + 'automation': 'script', + 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'quiet': True, + 'implementation': 'tflite-cpp', + 'precision': precision, + 'model': model, + 'scenario': 'SingleStream', + 'execution_mode': execution_mode, + 'test_query_count': '50', + 'adr': { 'tflite-model': { 'tags': v - }, - 'compiler': { + }, + 'compiler': { 'tags': 'gcc' - }, - 'mlperf-inference-implementation': { + }, + 'mlperf-inference-implementation': { 'tags': '_armnn,_use-neon' - } } - } + } + } print(cm_input) r = cmind.access(cm_input) if r['return'] > 0: print(r) - #exit(1) + # exit(1) diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index ace19a6fd8..b6aabca6b6 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -24,29 +24,29 @@ def preprocess(i): models_all = { "mobilenet": { "v1": { - "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25" ], - "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], "kind": [""] - }, + }, "v2": { - "multiplier": [ "multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35" ], - "resolution": [ "resolution-224", "resolution-192", "resolution-160", "resolution-128" ], + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], "kind": [""] - }, + }, "v3": { "multiplier": [""], "resolution": [""], - "kind": [ "large", "large-minimalistic", "small", "small-minimalistic" ] - } - }, + "kind": ["large", "large-minimalistic", "small", "small-minimalistic"] + } + }, "efficientnet": { "": { "multiplier": [""], "resolution": [""], - "kind": [ "lite0", "lite1", "lite2", "lite3", "lite4" ] - } + "kind": ["lite0", "lite1", "lite2", "lite3", "lite4"] } } + } models = {} if env.get('CM_MLPERF_RUN_MOBILENET_V1', '') == "yes": @@ -71,41 +71,41 @@ def preprocess(i): for version in models[t1]: variation_list = [] if version.strip(): - variation_list.append("_"+version) + variation_list.append("_" + version) variation_list_saved = variation_list.copy() for k1 in models[t1][version]["multiplier"]: variation_list = variation_list_saved.copy() if k1.strip(): - variation_list.append("_"+k1) + variation_list.append("_" + k1) variation_list_saved_2 = variation_list.copy() for k2 in models[t1][version]["resolution"]: variation_list = variation_list_saved_2.copy() if k2.strip(): - variation_list.append("_"+k2) + variation_list.append("_" + k2) variation_list_saved_3 = variation_list.copy() for k3 in models[t1][version]["kind"]: variation_list = variation_list_saved_3.copy() if k3.strip(): - variation_list.append("_"+k3) + variation_list.append("_" + k3) variation_strings[t1].append(",".join(variation_list)) - if env.get('CM_MLPERF_SUBMISSION_MODE','') == "yes": - var="_submission" - execution_mode="valid" - elif env.get('CM_MLPERF_ACCURACY_MODE','') == "yes" and env.get('CM_MLPERF_PERFORMANCE_MODE','') == "yes": - var="_full,_performance-and-accuracy" - execution_mode="valid" - elif env.get('CM_MLPERF_ACCURACY_MODE','') == "yes": - var="_full,_accuracy-only" - execution_mode="valid" - elif env.get('CM_MLPERF_PERFORMANCE_MODE','') == "yes": - var="_full,_performance-only" - execution_mode="valid" + if env.get('CM_MLPERF_SUBMISSION_MODE', '') == "yes": + var = "_submission" + execution_mode = "valid" + elif env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes" and env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + var = "_full,_performance-and-accuracy" + execution_mode = "valid" + elif env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes": + var = "_full,_accuracy-only" + execution_mode = "valid" + elif env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + var = "_full,_performance-only" + execution_mode = "valid" else: - var="_find-performance" - execution_mode="test" + var = "_find-performance" + execution_mode = "test" - precisions = [ ] + precisions = [] if env.get('CM_MLPERF_RUN_FP32', '') == "yes": precisions.append("fp32") if env.get('CM_MLPERF_RUN_INT8', '') == "yes": @@ -156,10 +156,12 @@ def preprocess(i): } } if add_deps_recursive: - cm_input['add_deps_recursive'] = add_deps_recursive #script automation will merge adr and add_deps_recursive + # script automation will merge adr and add_deps_recursive + cm_input['add_deps_recursive'] = add_deps_recursive if adr: - utils.merge_dicts({'dict1':cm_input['adr'], 'dict2':adr, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts( + {'dict1': cm_input['adr'], 'dict2': adr, 'append_lists': True, 'append_unique': True}) if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') != '': cm_input['results_dir'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] @@ -167,20 +169,21 @@ def preprocess(i): if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': cm_input['submission_dir'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] - if env.get('CM_MLPERF_FIND_PERFORMANCE_MODE','') == "yes" and env.get('CM_MLPERF_NO_RERUN','') != 'yes': + if env.get('CM_MLPERF_FIND_PERFORMANCE_MODE', '') == "yes" and env.get( + 'CM_MLPERF_NO_RERUN', '') != 'yes': cm_input['rerun'] = True - if env.get('CM_MLPERF_POWER','') == "yes": + if env.get('CM_MLPERF_POWER', '') == "yes": cm_input['power'] = 'yes' - if env.get('CM_MLPERF_ACCURACY_MODE','') == "yes": + if env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes": cm_input['mode'] = 'accuracy' print(cm_input) r = cmind.access(cm_input) if r['return'] > 0: return r - if env.get('CM_MLPERF_PERFORMANCE_MODE','') == "yes": + if env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": cm_input['mode'] = 'performance' print(cm_input) @@ -189,21 +192,22 @@ def preprocess(i): return r if env.get('CM_TEST_ONE_RUN', '') == "yes": - return {'return':0} + return {'return': 0} clean_input = { - 'action': 'rm', - 'automation': 'cache', - 'tags': 'get,preprocessed,dataset,_for.mobilenet', + 'action': 'rm', + 'automation': 'cache', + 'tags': 'get,preprocessed,dataset,_for.mobilenet', 'quiet': True, 'v': verbose, 'f': 'True' - } + } r = cmind.access(clean_input) - #if r['return'] > 0: + # if r['return'] > 0: # return r - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/run-mlperf-inference-submission-checker/code.py b/script/run-mlperf-inference-submission-checker/code.py index 892d16be33..85fce01bc5 100644 --- a/script/run-mlperf-inference-submission-checker/code.py +++ b/script/run-mlperf-inference-submission-checker/code.py @@ -3,25 +3,27 @@ import os import pandas + def main(): - print ('=========================================================') + print('=========================================================') - print ('Searching for summary.csv ...') + print('Searching for summary.csv ...') if os.path.isfile('summary.csv'): - print ('Converting to json ...') + print('Converting to json ...') import pandas df = pandas.read_csv('summary.csv').T - print ('') - print (df) - print ('') + print('') + print(df) + print('') df.to_json('summary.json', orient='columns', indent=4) - print ('=========================================================') + print('=========================================================') + if __name__ == '__main__': main() diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index 72a8928921..dc6cf4bba8 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -3,6 +3,7 @@ import os import subprocess + def preprocess(i): os_info = i['os_info'] @@ -11,14 +12,16 @@ def preprocess(i): submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") - version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','') + version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', '') if submission_dir == "": - return {'return': 1, 'error': 'Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR'} + return {'return': 1, + 'error': 'Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR'} - submitter = env.get("CM_MLPERF_SUBMITTER", "") #"default") + submitter = env.get("CM_MLPERF_SUBMITTER", "") # "default") if ' ' in submitter: - return {'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + return { + 'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} if 'CM_MLPERF_SKIP_COMPLIANCE' in env: skip_compliance = " --skip_compliance" @@ -26,21 +29,26 @@ def preprocess(i): skip_compliance = "" submission_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", - "submission_checker.py") + "submission_checker.py") if env['CM_MLPERF_SHORT_RUN'] == "yes": import shutil - new_submission_checker_file = os.path.join(os.path.dirname(submission_checker_file), "submission_checker1.py") + new_submission_checker_file = os.path.join( + os.path.dirname(submission_checker_file), + "submission_checker1.py") with open(submission_checker_file, 'r') as file: data = file.read() data = data.replace("OFFLINE_MIN_SPQ = 24576", "OFFLINE_MIN_SPQ = 100") - data = data.replace("return is_valid, res, inferred", "return True, res, inferred") + data = data.replace( + "return is_valid, res, inferred", + "return True, res, inferred") with open(new_submission_checker_file, 'w') as file: file.write(data) submission_checker_file = new_submission_checker_file if env.get('CM_MLPERF_EXTRA_MODEL_MAPPING', '') != '': - extra_map = ' --extra_model_benchmark_map "'+env['CM_MLPERF_EXTRA_MODEL_MAPPING']+'"' + extra_map = ' --extra_model_benchmark_map "' + \ + env['CM_MLPERF_EXTRA_MODEL_MAPPING'] + '"' else: extra_map = "" @@ -49,18 +57,18 @@ def preprocess(i): else: power_check = "" - extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS','') + extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') - x_submitter = ' --submitter ' + q + submitter + q if submitter!='' else '' + x_submitter = ' --submitter ' + q + submitter + q if submitter != '' else '' - x_version = ' --version ' + version +' ' if version!='' else '' + x_version = ' --version ' + version + ' ' if version != '' else '' - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' '+ q + submission_checker_file + q +' --input ' + q + submission_dir + q + \ - x_submitter + \ - x_version + \ - skip_compliance + extra_map + power_check + extra_args + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + q + submission_checker_file + q + ' --input ' + q + submission_dir + q + \ + x_submitter + \ + x_version + \ + skip_compliance + extra_map + power_check + extra_args - x_version = ' --version ' + version[1:] +' ' if version!='' else '' + x_version = ' --version ' + version[1:] + ' ' if version != '' else '' x_submission_repo_name = '' x_submission_repo_owner = '' @@ -74,42 +82,43 @@ def preprocess(i): x_submission_repo_branch = f""" --repository-branch {env['CM_MLPERF_RESULTS_GIT_REPO_BRANCH']}""" report_generator_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", - "generate_final_report.py") + "generate_final_report.py") env['CM_RUN_CMD'] = CMD print(CMD) - env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] +' ' + q + report_generator_file + q + ' --input summary.csv ' + \ - x_version + \ - x_submission_repo_name + \ - x_submission_repo_owner + \ - x_submission_repo_branch + env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + q + report_generator_file + q + ' --input summary.csv ' + \ + x_version + \ + x_submission_repo_name + \ + x_submission_repo_owner + \ + x_submission_repo_branch + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - if env.get('CM_TAR_SUBMISSION_DIR',''): + if env.get('CM_TAR_SUBMISSION_DIR', ''): env['CM_TAR_INPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] - x=env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE','') - if x!='': - env['CM_TAR_OUTFILE']=x + x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') + if x != '': + env['CM_TAR_OUTFILE'] = x if env.get('CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') != '': env['CM_TAR_OUTPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR'] - x=env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') - if x!='': + x = env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') + if x != '': for y in ['.csv', '.json', '.xlsx']: - z0 = 'summary'+y + z0 = 'summary' + y if os.path.isfile(z0): - z1 = x+y + z1 = x + y if os.path.isfile(z1): os.remove(z1) os.rename(z0, z1) - return {'return':0} + return {'return': 0} diff --git a/script/run-mlperf-power-client/customize.py b/script/run-mlperf-power-client/customize.py index 72ea87648c..49e55c38bd 100644 --- a/script/run-mlperf-power-client/customize.py +++ b/script/run-mlperf-power-client/customize.py @@ -3,13 +3,15 @@ import os import configparser + def preprocess(i): os_info = i['os_info'] env = i['env'] if not env['CM_MLPERF_RUN_CMD']: - env['CM_MLPERF_RUN_CMD'] = os.path.join(i['run_script_input']['path'], "dummy.sh") + env['CM_MLPERF_RUN_CMD'] = os.path.join( + i['run_script_input']['path'], "dummy.sh") if 'CM_MLPERF_POWER_TIMESTAMP' in env: timestamp = "" @@ -17,27 +19,29 @@ def preprocess(i): timestamp = " --no-timestamp-path" if 'CM_MLPERF_LOADGEN_LOGS_DIR' not in env: - env['CM_MLPERF_LOADGEN_LOGS_DIR'] = os.path.join(os.getcwd(), "loadgen_logs") + env['CM_MLPERF_LOADGEN_LOGS_DIR'] = os.path.join( + os.getcwd(), "loadgen_logs") run_cmd = env['CM_MLPERF_RUN_CMD'].replace("'", '"') run_cmd = run_cmd.replace('"', '\\"') cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' +\ - os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'client.py') + \ - " -a " + env['CM_MLPERF_POWER_SERVER_ADDRESS'] + \ - " -p " + env.get('CM_MLPERF_POWER_SERVER_PORT', "4950") + \ - " -w '" + run_cmd + \ - "' -L " + env['CM_MLPERF_LOADGEN_LOGS_DIR'] + \ - " -o " + env['CM_MLPERF_POWER_LOG_DIR'] + \ - " -n " + env['CM_MLPERF_POWER_NTP_SERVER'] + \ - timestamp + os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'client.py') + \ + " -a " + env['CM_MLPERF_POWER_SERVER_ADDRESS'] + \ + " -p " + env.get('CM_MLPERF_POWER_SERVER_PORT', "4950") + \ + " -w '" + run_cmd + \ + "' -L " + env['CM_MLPERF_LOADGEN_LOGS_DIR'] + \ + " -o " + env['CM_MLPERF_POWER_LOG_DIR'] + \ + " -n " + env['CM_MLPERF_POWER_NTP_SERVER'] + \ + timestamp if 'CM_MLPERF_POWER_MAX_AMPS' in env and 'CM_MLPERF_POWER_MAX_VOLTS' in env: cmd = cmd + " --max-amps " + env['CM_MLPERF_POWER_MAX_AMPS'] + \ - " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] + " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] env['CM_MLPERF_POWER_RUN_CMD'] = cmd - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/run-mlperf-power-server/customize.py b/script/run-mlperf-power-server/customize.py index 65c7830420..ea989bb401 100644 --- a/script/run-mlperf-power-server/customize.py +++ b/script/run-mlperf-power-server/customize.py @@ -3,15 +3,20 @@ import os import configparser + def preprocess(i): os_info = i['os_info'] env = i['env'] config = configparser.ConfigParser() - server_config_file = os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'server.template.conf') + server_config_file = os.path.join( + env['CM_MLPERF_POWER_SOURCE'], + 'ptd_client_server', + 'server.template.conf') config.read(server_config_file) config['server']['ntpServer'] = env['CM_MLPERF_POWER_NTP_SERVER'] - config['server']['listen'] = env['CM_MLPERF_POWER_SERVER_ADDRESS'] + " " + env['CM_MLPERF_POWER_SERVER_PORT'] + config['server']['listen'] = env['CM_MLPERF_POWER_SERVER_ADDRESS'] + \ + " " + env['CM_MLPERF_POWER_SERVER_PORT'] config['ptd']['ptd'] = env['CM_MLPERF_PTD_PATH'] config['ptd']['interfaceFlag'] = env['CM_MLPERF_POWER_INTERFACE_FLAG'] config['ptd']['deviceType'] = env['CM_MLPERF_POWER_DEVICE_TYPE'] @@ -25,7 +30,10 @@ def preprocess(i): else: cmd_prefix = "sudo " - cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'server.py') +' -c power-server.conf' + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + os.path.join( + env['CM_MLPERF_POWER_SOURCE'], + 'ptd_client_server', + 'server.py') + ' -c power-server.conf' if env.get('CM_MLPERF_POWER_SERVER_USE_SCREEN', 'no') == 'yes': cmd = cmd_prefix + ' screen -d -m ' + cmd + ' ' else: @@ -33,7 +41,8 @@ def preprocess(i): env['RUN_CMD'] = cmd - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/script/run-mlperf-training-submission-checker/customize.py b/script/run-mlperf-training-submission-checker/customize.py index 1b66bb7515..606047b51d 100644 --- a/script/run-mlperf-training-submission-checker/customize.py +++ b/script/run-mlperf-training-submission-checker/customize.py @@ -3,30 +3,36 @@ import os import subprocess + def preprocess(i): os_info = i['os_info'] env = i['env'] submission_dir = env.get("CM_MLPERF_SUBMISSION_DIR", "") - version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','v3.1') + version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', 'v3.1') if submission_dir == "": return {'return': 1, 'error': 'Please set CM_MLPERF_SUBMISSION_DIR'} - submitter = env.get("CM_MLPERF_SUBMITTER", "") #"default") + submitter = env.get("CM_MLPERF_SUBMITTER", "") # "default") if ' ' in submitter: - return {'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + return { + 'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} - submission_checker_file = os.path.join(env['CM_MLPERF_LOGGING_REPO_PATH'], "scripts", "verify_for_" + version + "_training.sh") + submission_checker_file = os.path.join( + env['CM_MLPERF_LOGGING_REPO_PATH'], + "scripts", + "verify_for_" + version + "_training.sh") - extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS','') + extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') CMD = submission_checker_file + " " + submission_dir env['CM_RUN_CMD'] = CMD - return {'return':0} + return {'return': 0} + def postprocess(i): @@ -34,4 +40,4 @@ def postprocess(i): if env.get('CM_TAR_SUBMISSION_DIR'): env['CM_TAR_INPUT_DIR'] = env.get('CM_MLPERF_SUBMISSION_DIR', '$HOME') - return {'return':0} + return {'return': 0} diff --git a/script/run-terraform/customize.py b/script/run-terraform/customize.py index eeddbff60d..b1939f15c1 100644 --- a/script/run-terraform/customize.py +++ b/script/run-terraform/customize.py @@ -4,12 +4,15 @@ import shutil import json + def preprocess(i): os_info = i['os_info'] env = i['env'] script_dir = i['run_script_input']['path'] - config_dir = os.path.join(script_dir, env.get('CM_TERRAFORM_CONFIG_DIR_NAME', '')) + config_dir = os.path.join( + script_dir, env.get( + 'CM_TERRAFORM_CONFIG_DIR_NAME', '')) env['CM_TERRAFORM_CONFIG_DIR'] = config_dir cache_dir = os.getcwd() @@ -20,6 +23,7 @@ def preprocess(i): return {'return': 0} + def postprocess(i): env = i['env'] if env.get('CM_DESTROY_TERRAFORM'): @@ -47,7 +51,7 @@ def postprocess(i): 'action': 'run', 'tags': 'remote,run,ssh', 'env': { - }, + }, 'host': public_ip, 'user': user, 'skip_host_verify': True, @@ -62,18 +66,18 @@ def postprocess(i): "source ~/.profile", "cm pull repo ctuning@mlcommons-ck", "cm run script --tags=get,sys-utils-cm" - ] - } + ] + } if env.get('CM_TERRAFORM_RUN_COMMANDS'): run_cmds = env.get('CM_TERRAFORM_RUN_COMMANDS') for cmd in run_cmds: - cmd=cmd.replace(":", "=") - cmd=cmd.replace(";;", ",") + cmd = cmd.replace(":", "=") + cmd = cmd.replace(";;", ",") run_input['run_cmds'].append(cmd) r = cm.access(run_input) if r['return'] > 0: return r - #print(r) + # print(r) print_attr(instance_attributes, "id") print_attr(instance_attributes, "instance_type") print_attr(instance_attributes, "public_ip") @@ -82,6 +86,7 @@ def postprocess(i): return {'return': 0} + def print_attr(instance_attributes, key): if key in instance_attributes: print(key.upper() + ": " + str(instance_attributes[key])) diff --git a/script/run-vllm-server/customize.py b/script/run-vllm-server/customize.py index be1a988aa4..d9b7a26b02 100644 --- a/script/run-vllm-server/customize.py +++ b/script/run-vllm-server/customize.py @@ -1,5 +1,7 @@ from cmind import utils -import os, subprocess +import os +import subprocess + def preprocess(i): @@ -31,7 +33,8 @@ def preprocess(i): if pp_size: cmd_args += f" --api-key {api_key}" - distributed_executor_backend = env.get("CM_VLLM_SERVER_DIST_EXEC_BACKEND", False) + distributed_executor_backend = env.get( + "CM_VLLM_SERVER_DIST_EXEC_BACKEND", False) if distributed_executor_backend: cmd_args += f" --distributed-executor-backend {distributed_executor_backend}" @@ -147,7 +150,8 @@ def preprocess(i): if kv_cache_dtype: cmd_args += f" --kv-cache-dtype {kv_cache_dtype}" - quantization_param_path = env.get("CM_VLLM_SERVER_QUANTIZATION_PARAM_PATH", False) + quantization_param_path = env.get( + "CM_VLLM_SERVER_QUANTIZATION_PARAM_PATH", False) if quantization_param_path: cmd_args += f" --quantization-param-path {quantization_param_path}" @@ -155,7 +159,8 @@ def preprocess(i): if max_model_len: cmd_args += f" --max-model-len {max_model_len}" - guided_decoding_backend = env.get("CM_VLLM_SERVER_GUIDED_DECODING_BACKEND", False) + guided_decoding_backend = env.get( + "CM_VLLM_SERVER_GUIDED_DECODING_BACKEND", False) if guided_decoding_backend: cmd_args += f" --guided-decoding-backend {guided_decoding_backend}" @@ -163,11 +168,13 @@ def preprocess(i): if worker_use_ray: cmd_args += f" --worker-use-ray" - max_parallel_loading_workers = env.get("CM_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS", False) + max_parallel_loading_workers = env.get( + "CM_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS", False) if max_parallel_loading_workers: cmd_args += f" --max-parallel-loading-workers {max_parallel_loading_workers}" - ray_workers_use_nsight = env.get("CM_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT", False) + ray_workers_use_nsight = env.get( + "CM_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT", False) if ray_workers_use_nsight: cmd_args += f" --ray-workers-use-nsight" @@ -175,15 +182,18 @@ def preprocess(i): if block_size: cmd_args += f" --block-size {block_size}" - enable_prefix_caching = env.get("CM_VLLM_SERVER_ENABLE_PREFIX_CACHING", False) + enable_prefix_caching = env.get( + "CM_VLLM_SERVER_ENABLE_PREFIX_CACHING", False) if enable_prefix_caching: cmd_args += f" --enable-prefix-caching" - disable_sliding_window = env.get("CM_VLLM_SERVER_DISABLE_SLIDING_WINDOW", False) + disable_sliding_window = env.get( + "CM_VLLM_SERVER_DISABLE_SLIDING_WINDOW", False) if disable_sliding_window: cmd_args += f" --disable-sliding-window" - use_v2_block_manager = env.get("CM_VLLM_SERVER_USE_V2_BLOCK_MANAGER", False) + use_v2_block_manager = env.get( + "CM_VLLM_SERVER_USE_V2_BLOCK_MANAGER", False) if use_v2_block_manager: cmd_args += f" --use-v2-block-manager" @@ -199,15 +209,18 @@ def preprocess(i): if swap_space: cmd_args += f" --swap-space {swap_space}" - gpu_memory_utilization = env.get("CM_VLLM_SERVER_GPU_MEMORY_UTILIZATION", False) + gpu_memory_utilization = env.get( + "CM_VLLM_SERVER_GPU_MEMORY_UTILIZATION", False) if gpu_memory_utilization: cmd_args += f" --gpu-memory-utilization {gpu_memory_utilization}" - num_gpu_blocks_override = env.get("CM_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE", False) + num_gpu_blocks_override = env.get( + "CM_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE", False) if num_gpu_blocks_override: cmd_args += f" --num-gpu-blocks-override {num_gpu_blocks_override}" - max_num_batched_tokens = env.get("CM_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS", False) + max_num_batched_tokens = env.get( + "CM_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS", False) if max_num_batched_tokens: cmd_args += f" --max-num-batched-tokens {max_num_batched_tokens}" @@ -239,15 +252,18 @@ def preprocess(i): if enforce_eager: cmd_args += f" --enforce-eager" - max_context_len_to_capture = env.get("CM_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE", False) + max_context_len_to_capture = env.get( + "CM_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE", False) if max_context_len_to_capture: cmd_args += f" --max-context-len-to-capture {max_context_len_to_capture}" - max_seq_len_to_capture = env.get("CM_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE", False) + max_seq_len_to_capture = env.get( + "CM_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE", False) if max_seq_len_to_capture: cmd_args += f" --max-seq-len-to-capture {max_seq_len_to_capture}" - disable_custom_all_reduce = env.get("CM_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE", False) + disable_custom_all_reduce = env.get( + "CM_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE", False) if disable_custom_all_reduce: cmd_args += f" --disable-custom-all-reduce" @@ -259,7 +275,8 @@ def preprocess(i): if tokenizer_pool_type: cmd_args += f" --tokenizer-pool-type {tokenizer_pool_type}" - tokenizer_pool_extra_config = env.get("CM_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG", False) + tokenizer_pool_extra_config = env.get( + "CM_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG", False) if tokenizer_pool_extra_config: cmd_args += f" --tokenizer-pool-extra-config {tokenizer_pool_extra_config}" @@ -275,7 +292,8 @@ def preprocess(i): if max_lora_rank: cmd_args += f" --max-lora-rank {max_lora_rank}" - lora_extra_vocab_size = env.get("CM_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE", False) + lora_extra_vocab_size = env.get( + "CM_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE", False) if lora_extra_vocab_size: cmd_args += f" --lora-extra-vocab-size {lora_extra_vocab_size}" @@ -283,7 +301,8 @@ def preprocess(i): if lora_dtype: cmd_args += f" --lora-dtype {lora_dtype}" - long_lora_scaling_factors = env.get("CM_VLLM_SERVER_LONG_LORA_SCALING_FACTORS", False) + long_lora_scaling_factors = env.get( + "CM_VLLM_SERVER_LONG_LORA_SCALING_FACTORS", False) if long_lora_scaling_factors: cmd_args += f" --long-lora-scaling-factors {long_lora_scaling_factors}" @@ -295,7 +314,8 @@ def preprocess(i): if fully_sharded_loras: cmd_args += f" --fully-sharded-loras" - enable_prompt_adapter = env.get("CM_VLLM_SERVER_ENABLE_PROMPT_ADAPTER", False) + enable_prompt_adapter = env.get( + "CM_VLLM_SERVER_ENABLE_PROMPT_ADAPTER", False) if enable_prompt_adapter: cmd_args += f" --enable-prompt-adapter" @@ -303,7 +323,8 @@ def preprocess(i): if max_prompt_adapters: cmd_args += f" --max-prompt-adapters {max_prompt_adapters}" - max_prompt_adapter_token = env.get("CM_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN", False) + max_prompt_adapter_token = env.get( + "CM_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN", False) if max_prompt_adapter_token: cmd_args += f" --max-prompt-adapter-token {max_prompt_adapter_token}" @@ -311,11 +332,13 @@ def preprocess(i): if device: cmd_args += f" --device {device}" - scheduler_delay_factor = env.get("CM_VLLM_SERVER_SCHEDULER_DELAY_FACTOR", False) + scheduler_delay_factor = env.get( + "CM_VLLM_SERVER_SCHEDULER_DELAY_FACTOR", False) if scheduler_delay_factor: cmd_args += f" --scheduler-delay-factor {scheduler_delay_factor}" - enable_chunked_prefill = env.get("CM_VLLM_SERVER_ENABLE_CHUNKED_PREFILL", False) + enable_chunked_prefill = env.get( + "CM_VLLM_SERVER_ENABLE_CHUNKED_PREFILL", False) if enable_chunked_prefill: cmd_args += f" --enable-chunked-prefill" @@ -323,43 +346,53 @@ def preprocess(i): if speculative_model: cmd_args += f" --speculative-model {speculative_model}" - num_speculative_tokens = env.get("CM_VLLM_SERVER_NUM_SPECULATIVE_TOKENS", False) + num_speculative_tokens = env.get( + "CM_VLLM_SERVER_NUM_SPECULATIVE_TOKENS", False) if num_speculative_tokens: cmd_args += f" --num-speculative-tokens {num_speculative_tokens}" - speculative_draft_tensor_parallel_size = env.get("CM_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE", False) + speculative_draft_tensor_parallel_size = env.get( + "CM_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE", False) if speculative_draft_tensor_parallel_size: cmd_args += f" --speculative-draft-tensor-parallel-size {speculative_draft_tensor_parallel_size}" - speculative_max_model_len = env.get("CM_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN", False) + speculative_max_model_len = env.get( + "CM_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN", False) if speculative_max_model_len: cmd_args += f" --speculative-max-model-len {speculative_max_model_len}" - speculative_disable_by_batch_size = env.get("CM_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE", False) + speculative_disable_by_batch_size = env.get( + "CM_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE", False) if speculative_disable_by_batch_size: cmd_args += f" --speculative-disable-by-batch-size {speculative_disable_by_batch_size}" - ngram_prompt_lookup_max = env.get("CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX", False) + ngram_prompt_lookup_max = env.get( + "CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX", False) if ngram_prompt_lookup_max: cmd_args += f" --ngram-prompt-lookup-max {ngram_prompt_lookup_max}" - ngram_prompt_lookup_min = env.get("CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN", False) + ngram_prompt_lookup_min = env.get( + "CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN", False) if ngram_prompt_lookup_min: cmd_args += f" --ngram-prompt-lookup-min {ngram_prompt_lookup_min}" - spec_decoding_acceptance_method = env.get("CM_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD", False) + spec_decoding_acceptance_method = env.get( + "CM_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD", False) if spec_decoding_acceptance_method: cmd_args += f" --spec-decoding-acceptance-method {spec_decoding_acceptance_method}" - typical_acceptance_sampler_posterior_threshold = env.get("CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD", False) + typical_acceptance_sampler_posterior_threshold = env.get( + "CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD", False) if typical_acceptance_sampler_posterior_threshold: cmd_args += f" --typical-acceptance-sampler-posterior-threshold {typical_acceptance_sampler_posterior_threshold}" - typical_acceptance_sampler_posterior_alpha = env.get("CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA", False) + typical_acceptance_sampler_posterior_alpha = env.get( + "CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA", False) if typical_acceptance_sampler_posterior_alpha: cmd_args += f" --typical-acceptance-sampler-posterior-alpha {typical_acceptance_sampler_posterior_alpha}" - model_loader_extra_config = env.get("CM_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG", False) + model_loader_extra_config = env.get( + "CM_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG", False) if model_loader_extra_config: cmd_args += f" --model-loader-extra-config {model_loader_extra_config}" @@ -371,11 +404,13 @@ def preprocess(i): if served_model_name: cmd_args += f" --served-model-name {served_model_name}" - qlora_adapter_name_or_path = env.get("CM_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH", False) + qlora_adapter_name_or_path = env.get( + "CM_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH", False) if qlora_adapter_name_or_path: cmd_args += f" --qlora-adapter-name-or-path {qlora_adapter_name_or_path}" - otlp_traces_endpoint = env.get("CM_VLLM_SERVER_OTLP_TRACES_ENDPOINT", False) + otlp_traces_endpoint = env.get( + "CM_VLLM_SERVER_OTLP_TRACES_ENDPOINT", False) if otlp_traces_endpoint: cmd_args += f" --otlp-traces-endpoint {otlp_traces_endpoint}" @@ -383,7 +418,8 @@ def preprocess(i): if engine_use_ray: cmd_args += f" --engine-use-ray" - disable_log_requests = env.get("CM_VLLM_SERVER_DISABLE_LOG_REQUESTS", False) + disable_log_requests = env.get( + "CM_VLLM_SERVER_DISABLE_LOG_REQUESTS", False) if disable_log_requests: cmd_args += f" --disable-log-requests" @@ -396,10 +432,11 @@ def preprocess(i): env['CM_VLLM_RUN_CMD'] = cmd - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/runtime-system-infos/customize.py b/script/runtime-system-infos/customize.py index a21ded5ce8..3428ccf3f4 100644 --- a/script/runtime-system-infos/customize.py +++ b/script/runtime-system-infos/customize.py @@ -1,7 +1,8 @@ from cmind import utils import os import shutil -import psutil # used to measure the system infos(have not tested for obtaining gpu info) +# used to measure the system infos(have not tested for obtaining gpu info) +import psutil import csv # used to write the measurements to csv format as txt file from datetime import datetime, timezone import time @@ -9,26 +10,30 @@ import sys # format of time measurement in mlperf logs -#:::MLLOG {"key": "power_begin", "value": "07-20-2024 17:54:38.800", "time_ms": 1580.314812, "namespace": "mlperf::logging", "event_type": "POINT_IN_TIME", "metadata": {"is_error": false, "is_warning": false, "file": "loadgen.cc", "line_no": 564, "pid": 9473, "tid": 9473}} -#:::MLLOG {"key": "power_end", "value": "07-20-2024 17:54:39.111", "time_ms": 1580.314812, "namespace": "mlperf::logging", "event_type": "POINT_IN_TIME", "metadata": {"is_error": false, "is_warning": false, "file": "loadgen.cc", "line_no": 566, "pid": 9473, "tid": 9473}} +# :::MLLOG {"key": "power_begin", "value": "07-20-2024 17:54:38.800", "time_ms": 1580.314812, "namespace": "mlperf::logging", "event_type": "POINT_IN_TIME", "metadata": {"is_error": false, "is_warning": false, "file": "loadgen.cc", "line_no": 564, "pid": 9473, "tid": 9473}} +# :::MLLOG {"key": "power_end", "value": "07-20-2024 17:54:39.111", "time_ms": 1580.314812, "namespace": "mlperf::logging", "event_type": "POINT_IN_TIME", "metadata": {"is_error": false, "is_warning": false, "file": "loadgen.cc", "line_no": 566, "pid": 9473, "tid": 9473}} # inorder to safely close when recieving interrupt signal # argument sig: signal number # argument frame: current stack frame + + def signal_handler(sig, frame): print("Signal received, closing the system information file safely.") f.close() sys.exit(0) + # Register signal handlers for SIGTERM signal.signal(signal.SIGTERM, signal_handler) + def preprocess(i): os_info = i['os_info'] if os_info['platform'] == 'windows': - return {'return':1, 'error': 'Windows is not supported in this script yet'} + return {'return': 1, 'error': 'Windows is not supported in this script yet'} env = i['env'] @@ -47,7 +52,11 @@ def preprocess(i): print("Started measuring system info!") - csv_headers = ['timestamp', 'cpu_utilisation', 'total_memory_gb', 'used_memory_gb'] + csv_headers = [ + 'timestamp', + 'cpu_utilisation', + 'total_memory_gb', + 'used_memory_gb'] # done to be made available to signal_handler function in case of kill signals # as of now handles for only SIGTERM @@ -76,10 +85,11 @@ def preprocess(i): time.sleep(interval) f.close() - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/save-mlperf-inference-implementation-state/customize.py b/script/save-mlperf-inference-implementation-state/customize.py index be3be96798..07e7169559 100644 --- a/script/save-mlperf-inference-implementation-state/customize.py +++ b/script/save-mlperf-inference-implementation-state/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,27 +15,30 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - if not state.get('mlperf-inference-implementation'): #No state information. Just returning + if not state.get( + 'mlperf-inference-implementation'): # No state information. Just returning return {'return': 0} if env.get('CM_MLPERF_README', "") == "yes": import cmind as cm inp = i['input'] - script_tags = state['mlperf-inference-implementation'].get('script_tags', '') - script_adr = state['mlperf-inference-implementation'].get('script_adr', {}) + script_tags = state['mlperf-inference-implementation'].get( + 'script_tags', '') + script_adr = state['mlperf-inference-implementation'].get( + 'script_adr', {}) if script_tags != '': cm_input = {'action': 'run', - 'automation': 'script', - 'tags': script_tags, - 'adr': script_adr, - 'env': env, - 'print_deps': True, - 'quiet': True, - 'silent': True, - 'fake_run': True - } + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'env': env, + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True + } r = cm.access(cm_input) if r['return'] > 0: @@ -54,10 +58,11 @@ def preprocess(i): state['mlperf-inference-implementation']['version_info'] = version_info - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/set-device-settings-qaic/customize.py b/script/set-device-settings-qaic/customize.py index 48d065c84e..3f3e179f71 100644 --- a/script/set-device-settings-qaic/customize.py +++ b/script/set-device-settings-qaic/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -21,19 +22,23 @@ def preprocess(i): ecc_template['request'].append({}) ecc_template['request'][0]['qid'] = device ecc_template['request'][0]['dev_config'] = {} - ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request'] = {} - ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'] = [] - ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'].append("RAS_DDR_ECC") - with open("request_"+device+".json", "w") as f: + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request'] = { + } + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'] = [ + ] + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'].append( + "RAS_DDR_ECC") + with open("request_" + device + ".json", "w") as f: f.write(json.dumps(ecc_template)) if env.get('CM_QAIC_VC', '') != '': env['CM_QAIC_VC_HEX'] = hex(int(env['CM_QAIC_VC'])) - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/set-echo-off-win/customize.py b/script/set-echo-off-win/customize.py index ef9ba8b8dd..287737629c 100644 --- a/script/set-echo-off-win/customize.py +++ b/script/set-echo-off-win/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -10,16 +11,16 @@ def preprocess(i): # If windows, download here otherwise use run.sh if os_info['platform'] == 'windows': - script_prefix = state.get('script_prefix',[]) + script_prefix = state.get('script_prefix', []) - s='@echo off' + s = '@echo off' if s not in script_prefix: script_prefix.insert(0, s) state['script_prefix'] = script_prefix # Test to skip next dependency - #env = i['env'] - #env['CM_SKIP_SYS_UTILS'] = 'YES' + # env = i['env'] + # env['CM_SKIP_SYS_UTILS'] = 'YES' - return {'return':0} + return {'return': 0} diff --git a/script/set-performance-mode/customize.py b/script/set-performance-mode/customize.py index fca19d718c..288ebc9c05 100644 --- a/script/set-performance-mode/customize.py +++ b/script/set-performance-mode/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -14,10 +15,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/set-sqlite-dir/customize.py b/script/set-sqlite-dir/customize.py index 638e68ff0c..21ab396e48 100644 --- a/script/set-sqlite-dir/customize.py +++ b/script/set-sqlite-dir/customize.py @@ -1,9 +1,10 @@ import os + def postprocess(i): env = i['env'] env['CM_SQLITE_PATH'] = os.getcwd() - return {'return':0} + return {'return': 0} diff --git a/script/set-user-limits/customize.py b/script/set-user-limits/customize.py index 3b67e410b3..ff5767250e 100644 --- a/script/set-user-limits/customize.py +++ b/script/set-user-limits/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -20,10 +21,11 @@ def preprocess(i): env['CM_RUN_CMD'] = " && ".join(cmds) - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/set-venv/customize.py b/script/set-venv/customize.py index 1763fb00d3..757287bde0 100644 --- a/script/set-venv/customize.py +++ b/script/set-venv/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -19,7 +20,7 @@ def preprocess(i): name = env.get('CM_NAME', '') if name == '': artifacts = i.get('input', {}).get('artifacts', []) - if len(artifacts)>0: + if len(artifacts) > 0: name = artifacts[0] if name == '': name = 'default' @@ -32,34 +33,34 @@ def preprocess(i): activate_script2 = os.path.join(name, activate_script) if not os.path.isfile(activate_script2): - force_python_path = env.get('CM_SET_VENV_PYTHON','') + force_python_path = env.get('CM_SET_VENV_PYTHON', '') if force_python_path != '' and not os.path.isfile(force_python_path): - return {'return':1, 'error':'python executable not found: {}'.format(force_python_path)} + return {'return': 1, 'error': 'python executable not found: {}'.format( + force_python_path)} if os_info['platform'] == 'windows': python_path = 'python.exe' if force_python_path == '' else force_python_path - create_dir = ' & md {}\work' + create_dir = ' & md {}\\work' else: python_path = 'python3' if force_python_path == '' else force_python_path create_dir = ' ; mkdir {}/work' cmd = python_path + ' -m venv ' + name + create_dir.format(name) - print ('====================================================================') + print('====================================================================') - print ('Creating venv: "{}" ...'.format(cmd)) + print('Creating venv: "{}" ...'.format(cmd)) os.system(cmd) - if os.path.isfile(activate_script2): - script_file = 'venv-'+name + script_file = 'venv-' + name if os_info['platform'] == 'windows': script_file += '.bat' xcmd = script_file else: script_file += '.sh' - xcmd = 'source '+script_file + xcmd = 'source ' + script_file if not os.path.isfile(script_file): @@ -73,24 +74,28 @@ def preprocess(i): shell = env.get('CM_SET_VENV_SHELL', '') if shell != '': shell = shell.replace('CM_SET_VENV_WORK', 'work') - if shell == '': shell = 'cmd' - cmd = 'cd {} & call {} & set CM_REPOS=%CD%\{}\CM & {}\n'.format(name, activate_script, name, shell) + if shell == '': + shell = 'cmd' + cmd = 'cd {} & call {} & set CM_REPOS=%CD%\\{}\\CM & {}\n'.format( + name, activate_script, name, shell) else: - cmd = '#!/bin/bash\n\ncd {} ; source {} ; export CM_REPOS=$PWD/CM ; cd work\n'.format(name, activate_script) + cmd = '#!/bin/bash\n\ncd {} ; source {} ; export CM_REPOS=$PWD/CM ; cd work\n'.format( + name, activate_script) with open(script_file, 'w') as f: f.write(cmd) - print ('====================================================================') - print ('Please run the following command:') - print ('') - print (xcmd) - print ('====================================================================') + print('====================================================================') + print('Please run the following command:') + print('') + print(xcmd) + print('====================================================================') + + return {'return': 0} - return {'return':0} def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/tar-my-folder/customize.py b/script/tar-my-folder/customize.py index d405e76fdb..c1d8498b89 100644 --- a/script/tar-my-folder/customize.py +++ b/script/tar-my-folder/customize.py @@ -4,6 +4,7 @@ import subprocess from os.path import exists + def preprocess(i): os_info = i['os_info'] @@ -17,13 +18,15 @@ def preprocess(i): output_file = env.get("CM_TAR_OUTFILE", "") input_dirname = os.path.basename(input_dir) if output_file == "": - output_file = input_dirname+".tar.gz" + output_file = input_dirname + ".tar.gz" from pathlib import Path input_path = Path(input_dir) cd_dir = input_path.parent.absolute() - CMD = 'tar --directory '+str(cd_dir)+' -czf ' + os.path.join(output_dir, output_file) + ' ' + input_dirname + CMD = 'tar --directory ' + \ + str(cd_dir) + ' -czf ' + os.path.join(output_dir, + output_file) + ' ' + input_dirname print(CMD) ret = os.system(CMD) - print("Tar file "+os.path.join(output_dir, output_file)+ " created") + print("Tar file " + os.path.join(output_dir, output_file) + " created") - return {'return':ret} + return {'return': ret} diff --git a/script/test-cm-core/customize.py b/script/test-cm-core/customize.py index 831f0bdff0..c3289f3451 100644 --- a/script/test-cm-core/customize.py +++ b/script/test-cm-core/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,4 +14,4 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} diff --git a/script/test-cm-core/src/script/check.py b/script/test-cm-core/src/script/check.py index 7394406d8f..4883116c03 100644 --- a/script/test-cm-core/src/script/check.py +++ b/script/test-cm-core/src/script/check.py @@ -1,9 +1,11 @@ def check_return(r): if 'return' not in r: - raise Exception('CM access function should always return key \'return\'!') + raise Exception( + 'CM access function should always return key \'return\'!') if 'error' in r: raise Exception(r['error']) + def check_list(r, string, found=True): check_return(r) if 'list' not in r: diff --git a/script/test-cm-core/src/script/process_dockerfile.py b/script/test-cm-core/src/script/process_dockerfile.py index 62bf4d5f87..1d23ec6015 100644 --- a/script/test-cm-core/src/script/process_dockerfile.py +++ b/script/test-cm-core/src/script/process_dockerfile.py @@ -5,7 +5,7 @@ import json import yaml -files=sys.argv[1:] +files = sys.argv[1:] for file in files: if not os.path.isfile(file): @@ -26,5 +26,8 @@ uid = data['uid'] - r = cm.access({'action':'dockerfile', 'automation':'script', 'artifact': uid, 'quiet': 'yes'}) + r = cm.access({'action': 'dockerfile', + 'automation': 'script', + 'artifact': uid, + 'quiet': 'yes'}) checks.check_return(r) diff --git a/script/test-cm-core/src/script/process_readme.py b/script/test-cm-core/src/script/process_readme.py index e33f26528b..03daec7933 100644 --- a/script/test-cm-core/src/script/process_readme.py +++ b/script/test-cm-core/src/script/process_readme.py @@ -5,7 +5,7 @@ import json import yaml -files=sys.argv[1:] +files = sys.argv[1:] for file in files: if not os.path.isfile(file): @@ -22,5 +22,6 @@ data = yaml.safe_load(f) uid = data['uid'] - r = cm.access({'action':'doc', 'automation':'script', 'artifact': uid, 'quiet': 'yes'}) + r = cm.access({'action': 'doc', 'automation': 'script', + 'artifact': uid, 'quiet': 'yes'}) checks.check_return(r) diff --git a/script/test-cm-core/src/script/test_deps.py b/script/test-cm-core/src/script/test_deps.py index aaf19bc81b..37d75fd4c4 100644 --- a/script/test-cm-core/src/script/test_deps.py +++ b/script/test-cm-core/src/script/test_deps.py @@ -1,23 +1,25 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps import cmind as cm import check as checks -# MLPerf v3.0 inference is now very outdated and we are testing inference in separate tests +# MLPerf v3.0 inference is now very outdated and we are testing inference +# in separate tests -#r = cm.access({'action':'run', 'automation':'script', 'tags': 'generate-run-cmds,mlperf', 'adr': +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'generate-run-cmds,mlperf', 'adr': # {'loadgen': {'version': 'r3.0'}, 'compiler': {'tags': "gcc"}}, 'env': {'CM_MODEL': 'resnet50', # 'CM_DEVICE': 'cpu', 'CM_BACKEND': 'onnxruntime'}, 'quiet': 'yes'}) -#checks.check_return(r) +# checks.check_return(r) # -#r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'loadgen,version-r3.0,deps-python-non-virtual'}) -#checks.check_list(r, "loadgen,version-r3.0,deps-python-non-virtual") +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'loadgen,version-r3.0,deps-python-non-virtual'}) +# checks.check_list(r, "loadgen,version-r3.0,deps-python-non-virtual") # -#r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'inference,src,version-r3.0'}) -#checks.check_list(r, "inference,src,version-r3.0") +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'inference,src,version-r3.0'}) +# checks.check_list(r, "inference,src,version-r3.0") # -#r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_onnxruntime,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) -#checks.check_return(r) +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_onnxruntime,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) # -#r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_tf,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) -#checks.check_return(r) +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_tf,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) diff --git a/script/test-cm-core/src/script/test_docker.py b/script/test-cm-core/src/script/test_docker.py index 0663cd54e4..ad867a2a12 100644 --- a/script/test-cm-core/src/script/test_docker.py +++ b/script/test-cm-core/src/script/test_docker.py @@ -1,38 +1,39 @@ -# This test covers version, variation, compilation from src, add_deps_recursive, post_deps +# This test covers version, variation, compilation from src, +# add_deps_recursive, post_deps import cmind as cm import check as checks -r = cm.access({'action':'run', - 'automation':'script', +r = cm.access({'action': 'run', + 'automation': 'script', 'tags': 'run,docker,container', 'add_deps_recursive': { - 'compiler': {'tags': "gcc"} + 'compiler': {'tags': "gcc"} }, 'docker_cm_repo': 'mlcommons@cm4mlops', - 'image_name':'cm-script-app-image-classification-onnx-py', + 'image_name': 'cm-script-app-image-classification-onnx-py', 'env': { - 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', - 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04', - 'CM_DOCKER_IMAGE_REPO': 'cknowledge' + 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', + 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04', + 'CM_DOCKER_IMAGE_REPO': 'cknowledge' }, 'quiet': 'yes' - }) + }) checks.check_return(r) -r = cm.access({'action':'run', - 'automation':'script', +r = cm.access({'action': 'run', + 'automation': 'script', 'tags': 'run,docker,container', 'add_deps_recursive': { - 'compiler': {'tags': "gcc"} + 'compiler': {'tags': "gcc"} }, 'docker_cm_repo': 'mlcommons@cm4mlops', - 'image_name':'cm-script-app-image-classification-onnx-py', + 'image_name': 'cm-script-app-image-classification-onnx-py', 'env': { - 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', - 'CM_DOCKER_IMAGE_BASE': 'ubuntu:24.04', - 'CM_DOCKER_IMAGE_REPO': 'local' + 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', + 'CM_DOCKER_IMAGE_BASE': 'ubuntu:24.04', + 'CM_DOCKER_IMAGE_REPO': 'local' }, 'quiet': 'yes' - }) + }) checks.check_return(r) diff --git a/script/test-cm-core/src/script/test_features.py b/script/test-cm-core/src/script/test_features.py index d116cbd5bf..cde5e3ed21 100644 --- a/script/test-cm-core/src/script/test_features.py +++ b/script/test-cm-core/src/script/test_features.py @@ -5,17 +5,27 @@ import cmind as cm import check as checks -r = cm.access({'action':'run', 'automation':'script', 'tags': 'install,python-venv', 'name':'test', 'quiet': 'yes'}) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'name': 'test', + 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'get,python,virtual,name-test'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,python,virtual,name-test'}) checks.check_list(r, "get,python-venv") -r = cm.access({'action':'run', 'automation':'script', 'tags': 'get,dataset,preprocessed,imagenet,_NHWC', 'quiet': 'yes'}) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,dataset,preprocessed,imagenet,_NHWC', + 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'get,dataset,preprocessed,imagenet,-_NCHW'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NCHW'}) checks.check_list(r, "_NHWC") -r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'get,dataset,preprocessed,imagenet,-_NHWC'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NHWC'}) checks.check_list(r, "_NHWC", False) diff --git a/script/test-cm-core/src/script/test_install.py b/script/test-cm-core/src/script/test_install.py index 66fa164d50..d4fb93ec70 100644 --- a/script/test-cm-core/src/script/test_install.py +++ b/script/test-cm-core/src/script/test_install.py @@ -3,8 +3,13 @@ import cmind as cm import check as checks -r = cm.access({'action':'run', 'automation':'script', 'tags': 'python,src,install,_shared', 'version': '3.9.10', 'quiet': 'true'}) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'python,src,install,_shared', + 'version': '3.9.10', + 'quiet': 'true'}) checks.check_return(r) -r = cm.access({'action':'search', 'automation':'cache', 'tags': 'python,src,install,_shared,version-3.9.10'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'python,src,install,_shared,version-3.9.10'}) checks.check_list(r, "python,src,install,_shared,version-3.9.10") diff --git a/script/test-cm-core/src/test_cm.py b/script/test-cm-core/src/test_cm.py index 41fb402c22..821e1571d6 100644 --- a/script/test-cm-core/src/test_cm.py +++ b/script/test-cm-core/src/test_cm.py @@ -3,12 +3,15 @@ r = cm.access(['test', 'script']) if 'return' not in r: - raise Exception('CM access function should always return key \'return\'!') + raise Exception( + 'CM access function should always return key \'return\'!') exit(0) except ImportError as e: from sys import stderr from subprocess import call - print('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr) + print( + 'WARNING: CM module for python is not installed & jupyter notebooks will not be supported', + file=stderr) retcode = call(['cm', 'test', 'script']) exit(retcode) diff --git a/script/test-cm-core/src/test_search_speed.py b/script/test-cm-core/src/test_search_speed.py index 3086a83408..577c4f0b80 100644 --- a/script/test-cm-core/src/test_search_speed.py +++ b/script/test-cm-core/src/test_search_speed.py @@ -5,21 +5,22 @@ steps = 10 -print ('Running search with tags {} times ...'.format(steps)) +print('Running search with tags {} times ...'.format(steps)) for step in range(steps): start = time.time() - r = cm.access({'action':'search', - 'automation':'script', - 'tags':'detect,os'}) + r = cm.access({'action': 'search', + 'automation': 'script', + 'tags': 'detect,os'}) timer = time.time() - start - if r['return']>0: cm.error(r) + if r['return'] > 0: + cm.error(r) times.append(timer) step = 0 for t in times: step += 1 - print ("{}) {:0.3f} sec.".format(step, t)) + print("{}) {:0.3f} sec.".format(step, t)) diff --git a/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py b/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py index 404b69fe93..136f5e6f2e 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py @@ -1,28 +1,37 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps import cmind as cm from pathlib import Path import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa -r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': \ - {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', \ - 'mode': 'accuracy', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': \ - {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', \ - 'mode': 'performance', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'performance', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'install,python-venv', 'version': '3.10.8', 'name': 'mlperf' }) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'version': '3.10.8', + 'name': 'mlperf'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'submitter': 'Community', \ - 'implementation': 'cpp', 'hw_name': 'default', 'model': 'retinanet', 'backend': 'onnxruntime', 'device': 'cpu', 'scenario': 'Offline', \ - 'test_query_count': '10', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'submitter': 'Community', + 'implementation': 'cpp', 'hw_name': 'default', 'model': 'retinanet', 'backend': 'onnxruntime', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '10', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/script/test-cm-core/src/tutorials/test_tutorial_tvm.py b/script/test-cm-core/src/tutorials/test_tutorial_tvm.py index 9bf64562a6..38b812d6b1 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_tvm.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_tvm.py @@ -1,4 +1,5 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps import cmind as cm @@ -6,18 +7,22 @@ import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_dashboard', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py index 7876adff54..d6db8c798a 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py @@ -4,17 +4,23 @@ import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, \ - 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', \ - 'device': 'cpu', 'scenario': 'Offline', 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, \ - 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', \ - 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py index 74ead5c364..5554f3c6a0 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py @@ -1,4 +1,5 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps import cmind as cm @@ -6,17 +7,21 @@ import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/script/test-cm-script-pipeline/customize.py b/script/test-cm-script-pipeline/customize.py index 89311a22fa..d05326d931 100644 --- a/script/test-cm-script-pipeline/customize.py +++ b/script/test-cm-script-pipeline/customize.py @@ -3,13 +3,15 @@ from cmind import utils import os + def preprocess(i): - print ('') - print ('customize.py: preprocess') - print ('') + print('') + print('customize.py: preprocess') + print('') + + return {'return': 0} - return {'return':0} def postprocess(i): @@ -17,22 +19,22 @@ def postprocess(i): run_script_input = i['run_script_input'] env = i['env'] - print ('') - print ('customize.py: postprocess') - print ('') + print('') + print('customize.py: postprocess') + print('') - r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'run2'}) - if r['return']>0: + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'run2'}) + if r['return'] > 0: return r - return {'return':0} + return {'return': 0} -def detect_version(i): +def detect_version(i): - print ('') - print ('customize.py: detect_version') - print ('') - + print('') + print('customize.py: detect_version') + print('') - return {'return':0} + return {'return': 0} diff --git a/script/test-cm-scripts/customize.py b/script/test-cm-scripts/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/test-cm-scripts/customize.py +++ b/script/test-cm-scripts/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/test-debug/customize.py b/script/test-debug/customize.py index a3af87f648..4c1388a1a3 100644 --- a/script/test-debug/customize.py +++ b/script/test-debug/customize.py @@ -2,29 +2,34 @@ import os + def preprocess(i): os_info = i['os_info'] env = i['env'] meta = i['meta'] - print ("********************************************************") - print ('- Importing CM library ...') + print("********************************************************") + print('- Importing CM library ...') import cmind - print (' SUCCESS!') - - cmind.utils.debug_here(__file__, port=5678, text='Debugging customize.py!', env=env, env_debug_uid='8d96cd9fa4734204').breakpoint() + print(' SUCCESS!') - print ('') - print ('- List CM repos ...') - print ('') - r = cmind.access({'action':'show', 'automation':'repo', 'out':'con'}) - print ('') - print (' SUCCESS!') - print ("********************************************************") + cmind.utils.debug_here( + __file__, + port=5678, + text='Debugging customize.py!', + env=env, + env_debug_uid='8d96cd9fa4734204').breakpoint() + print('') + print('- List CM repos ...') + print('') + r = cmind.access({'action': 'show', 'automation': 'repo', 'out': 'con'}) + print('') + print(' SUCCESS!') + print("********************************************************") - return {'return':0} + return {'return': 0} def postprocess(i): @@ -32,4 +37,4 @@ def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/test-debug/python/main.py b/script/test-debug/python/main.py index f913765647..a00bf6b346 100644 --- a/script/test-debug/python/main.py +++ b/script/test-debug/python/main.py @@ -4,21 +4,23 @@ # Developer(s): Grigori Fursin """ +import cmind.utils import os import json -print ("Hello World 1") +print("Hello World 1") env = os.environ -import json -print ('') -print (json.dumps(dict(env), indent=2)) +print('') +print(json.dumps(dict(env), indent=2)) # Import cmind to test break points -import cmind.utils if os.environ.get('CM_TMP_DEBUG_UID', '') == '45a7c3a500d24a63': - cmind.utils.debug_here(__file__, port=5678, text='Debugging main.py!').breakpoint() + cmind.utils.debug_here( + __file__, + port=5678, + text='Debugging main.py!').breakpoint() -print ('') -print ("Hello World 2") +print('') +print("Hello World 2") diff --git a/script/test-download-and-extract-artifacts/customize.py b/script/test-download-and-extract-artifacts/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/test-download-and-extract-artifacts/customize.py +++ b/script/test-download-and-extract-artifacts/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/test-dummy/customize.py b/script/test-dummy/customize.py index d12f9b3e1d..273999d460 100644 --- a/script/test-dummy/customize.py +++ b/script/test-dummy/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] @@ -13,10 +14,11 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - return {'return':0} + return {'return': 0} + def postprocess(i): env = i['env'] - return {'return':0} + return {'return': 0} diff --git a/script/test-mlperf-inference-retinanet/customize.py b/script/test-mlperf-inference-retinanet/customize.py index 14e20d1bf2..1baf6756a2 100644 --- a/script/test-mlperf-inference-retinanet/customize.py +++ b/script/test-mlperf-inference-retinanet/customize.py @@ -1,13 +1,14 @@ from cmind import utils import os + def preprocess(i): os_info = i['os_info'] env = i['env'] meta = i['meta'] - return {'return':0} + return {'return': 0} def postprocess(i): @@ -15,4 +16,4 @@ def postprocess(i): env = i['env'] state = i['state'] - return {'return':0} + return {'return': 0} diff --git a/script/truncate-mlperf-inference-accuracy-log/customize.py b/script/truncate-mlperf-inference-accuracy-log/customize.py index d13d504ff8..fba057e493 100644 --- a/script/truncate-mlperf-inference-accuracy-log/customize.py +++ b/script/truncate-mlperf-inference-accuracy-log/customize.py @@ -4,6 +4,7 @@ import subprocess from os.path import exists + def preprocess(i): os_info = i['os_info'] @@ -12,14 +13,14 @@ def preprocess(i): if submission_dir == "": print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") - return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} + return {'return': 1, 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} submitter = env.get("CM_MLPERF_SUBMITTER", "CTuning") os.system("rm -rf " + submission_dir + "_logs") CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", - "truncate_accuracy_log.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --backup '" + submission_dir + "_logs'" + "truncate_accuracy_log.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --backup '" + submission_dir + "_logs'" env['CM_RUN_CMD'] = CMD - return {'return':0} + return {'return': 0} diff --git a/script/wrapper-reproduce-octoml-tinyml-submission/customize.py b/script/wrapper-reproduce-octoml-tinyml-submission/customize.py index f859dd1a9d..1e5beb50fb 100644 --- a/script/wrapper-reproduce-octoml-tinyml-submission/customize.py +++ b/script/wrapper-reproduce-octoml-tinyml-submission/customize.py @@ -2,6 +2,7 @@ import os import cmind as cm + def preprocess(i): os_info = i['os_info'] @@ -9,29 +10,34 @@ def preprocess(i): state = i['state'] inp = i['input'] if 'CM_FLASH_BOARD' in env: - script_tags = "flash,tiny" + script_tags = "flash,tiny" else: - script_tags = "reproduce,tiny,mlperf,octoml" - boards = ["NUCLEO", "NRF" ] - microtvm_variants = { "cmsis_nn": [ "ad", "ic", "vww", "kws" ], "native": [ "ic", "ad", "vww", "kws"] } + script_tags = "reproduce,tiny,mlperf,octoml" + boards = ["NUCLEO", "NRF"] + microtvm_variants = { + "cmsis_nn": [ + "ad", "ic", "vww", "kws"], "native": [ + "ic", "ad", "vww", "kws"]} for board in boards: for microtvm_variant in microtvm_variants: if board == "NRF" and microtvm_variant == "native": continue for model in microtvm_variants[microtvm_variant]: - variation_tags_string="_"+board+",_"+microtvm_variant+",_"+model + variation_tags_string = "_" + board + ",_" + microtvm_variant + ",_" + model tags = script_tags + "," + variation_tags_string if 'CM_RECREATE_BINARY' in env: - r = cm.access({'action':'rm', 'automation':'cache', 'tags': tags, 'force': 'true'}) + r = cm.access( + {'action': 'rm', 'automation': 'cache', 'tags': tags, 'force': 'true'}) if r['return'] > 0: return r - r = cm.access({'action':'run', 'automation':'script', 'tags': tags, 'quiet': 'true', 'env': env, - 'input': inp, 'state': state, 'add_deps': inp.get('add_deps', {}), 'add_deps_recursive': - inp.get('add_deps_recursive', {})}) + r = cm.access({'action': 'run', 'automation': 'script', 'tags': tags, 'quiet': 'true', 'env': env, + 'input': inp, 'state': state, 'add_deps': inp.get('add_deps', {}), 'add_deps_recursive': + inp.get('add_deps_recursive', {})}) if r['return'] > 0: return r - return {'return':0} + return {'return': 0} + def postprocess(i): - return {'return':0} + return {'return': 0} diff --git a/setup.py b/setup.py index 26dce3f544..b57c0bf3e7 100644 --- a/setup.py +++ b/setup.py @@ -43,12 +43,12 @@ def is_package_installed(self, package_name): sys.modules[package_name] = module spec.loader.exec_module(module) else: - pkg_resources.get_distribution(package_name) # Fallback for < 3.8 + pkg_resources.get_distribution( + package_name) # Fallback for < 3.8 return True except PackageNotFoundError: return False - def install_system_packages(self): # List of packages to install via system package manager packages = [] @@ -63,10 +63,10 @@ def install_system_packages(self): if not curl_status: packages.append("curl") - name='venv' + name = 'venv' if name in sys.modules: - pass #nothing needed + pass # nothing needed elif self.is_package_installed(name): pass else: @@ -79,19 +79,22 @@ def install_system_packages(self): if manager == "apt-get": # Check if 'sudo' is available if shutil.which('sudo'): - subprocess.check_call(['sudo', 'apt-get', 'update']) - subprocess.check_call(['sudo', 'apt-get', 'install', '-y'] + packages) + subprocess.check_call( + ['sudo', 'apt-get', 'update']) + subprocess.check_call( + ['sudo', 'apt-get', 'install', '-y'] + packages) else: print("sudo not found, trying without sudo.") try: subprocess.check_call(['apt-get', 'update']) - subprocess.check_call(['apt-get', 'install', '-y'] + packages) + subprocess.check_call( + ['apt-get', 'install', '-y'] + packages) except subprocess.CalledProcessError: - print(f"Installation of {packages} without sudo failed. Please install these packages manually to continue!") + print( + f"Installation of {packages} without sudo failed. Please install these packages manually to continue!") elif self.system == 'Windows': - print(f"Please install the following packages manually: {packages}") - - + print( + f"Please install the following packages manually: {packages}") def detect_package_manager(self): package_managers = { @@ -113,7 +116,8 @@ def get_package_manager_details(self): manager = self.detect_package_manager() if manager: try: - version_output = subprocess.check_output([manager, '--version'], stderr=subprocess.STDOUT).decode('utf-8') + version_output = subprocess.check_output( + [manager, '--version'], stderr=subprocess.STDOUT).decode('utf-8') return manager, version_output.split('\n')[0] except subprocess.CalledProcessError: return manager, 'Version information not available' @@ -123,33 +127,43 @@ def get_package_manager_details(self): # Checks if command exists(for installing required packages). # If the command exists, which returns 0, making the function return True. # If the command does not exist, which returns a non-zero value, making the function return False. - # NOTE: The standard output and standard error streams are redirected to PIPES so that it could be captured in future if needed. + # NOTE: The standard output and standard error streams are redirected to + # PIPES so that it could be captured in future if needed. def command_exists(self, command): if self.system == "Linux" or self.system == 'Darwin': - return subprocess.call(['which', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 + return subprocess.call( + ['which', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 elif self.system == "Windows": - return subprocess.call([command, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) == 0 + return subprocess.call( + [command, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) == 0 def custom_function(self): commit_hash = get_commit_hash() import cmind - #r = cmind.access({'action':'rm', 'automation':'repo', 'data_uoa':'mlcommons@cm4mlops', 'force': True}) - r = cmind.access({'action':'pull', 'automation':'repo', 'artifact':'mlcommons@cm4mlops', 'branch': 'mlperf-inference', 'checkout': commit_hash}) - #r = cmind.access({'action':'pull', 'automation':'repo', 'artifact':'mlcommons@cm4mlops', 'checkout': commit_hash}) + r = cmind.access({'action':'rm', 'automation':'repo', 'artifact':'mlcommons@cm4mlops', 'force': True, 'all': True}) + r = cmind.access({'action': 'pull', + 'automation': 'repo', + 'artifact': 'mlcommons@cm4mlops', + 'branch': 'mlperf-inference', + 'checkout': commit_hash}) + # r = cmind.access({'action':'pull', 'automation':'repo', 'artifact':'mlcommons@cm4mlops', 'checkout': commit_hash}) print(r) if r['return'] > 0: return r['return'] def get_sys_platform(self): - self.system = platform.system() + self.system = platform.system() # Read long description and version + + def read_file(file_name, default=""): if os.path.isfile(file_name): with open(file_name, "r") as f: return f.read().strip() return default + def get_commit_hash(): try: with open(os.path.join(os.path.dirname(__file__), 'git_commit_hash.txt'), 'r') as f: @@ -157,6 +171,7 @@ def get_commit_hash(): except FileNotFoundError: return "unknown" + long_description = read_file("README.md", "No description available.") version_ = read_file("VERSION", "0.3.1") @@ -175,7 +190,7 @@ def get_commit_hash(): "requests", "tabulate", "pyyaml" - ], + ], cmdclass={ 'install': CustomInstallCommand, }, diff --git a/tests/script/check.py b/tests/script/check.py index 26a86a309e..dd030c3bb8 100644 --- a/tests/script/check.py +++ b/tests/script/check.py @@ -1,9 +1,11 @@ def check_return(r): if 'return' not in r: - raise Exception('CM access function should always return key \'return\'!') + raise Exception( + 'CM access function should always return key \'return\'!') if 'error' in r: raise Exception(r['error']) + def check_list(r, string, found=True): check_return(r) if 'list' not in r: @@ -13,6 +15,7 @@ def check_list(r, string, found=True): if len(r['list']) > 0 and not found: raise Exception('CM search returned at lease one entry for ' + string) + def check_key_value(d, key, value, absent_ok=False): if not d.get(key): if absent_ok: @@ -20,4 +23,5 @@ def check_key_value(d, key, value, absent_ok=False): else: raise Exception(f"{key} is missing. Current values are {d}") elif d[key] != value: - raise Exception(f"{key} is not having the expected value of {value}. Current value is {d[key]}") + raise Exception( + f"{key} is not having the expected value of {value}. Current value is {d[key]}") diff --git a/tests/script/process_dockerfile.py b/tests/script/process_dockerfile.py index 9a58fce389..d9abd2abeb 100644 --- a/tests/script/process_dockerfile.py +++ b/tests/script/process_dockerfile.py @@ -5,7 +5,7 @@ import json import yaml -files=sys.argv[1:] +files = sys.argv[1:] for file in files: if not os.path.isfile(file): @@ -26,5 +26,8 @@ uid = data['uid'] - r = cm.access({'action':'dockerfile', 'automation':'script', 'artifact': uid, 'quiet': 'yes'}) + r = cm.access({'action': 'dockerfile', + 'automation': 'script', + 'artifact': uid, + 'quiet': 'yes'}) checks.check_return(r) diff --git a/tests/script/process_readme.py b/tests/script/process_readme.py index 492813a050..de7e04033e 100644 --- a/tests/script/process_readme.py +++ b/tests/script/process_readme.py @@ -5,7 +5,7 @@ import json import yaml -files=sys.argv[1:] +files = sys.argv[1:] for file in files: if not os.path.isfile(file): @@ -22,5 +22,6 @@ data = yaml.safe_load(f) uid = data['uid'] - r = cm.access({'action':'doc', 'automation':'script', 'artifact': uid, 'quiet': 'yes'}) + r = cm.access({'action': 'doc', 'automation': 'script', + 'artifact': uid, 'quiet': 'yes'}) checks.check_return(r) diff --git a/tests/script/process_tests.py b/tests/script/process_tests.py index a9a7d0e555..8012d097b6 100644 --- a/tests/script/process_tests.py +++ b/tests/script/process_tests.py @@ -5,7 +5,7 @@ import json import yaml -files=sys.argv[1:] +files = sys.argv[1:] for file in files: print(file) @@ -20,12 +20,12 @@ elif file.endswith(".yaml"): data = yaml.safe_load(f) if data.get('uid', '') == '': - continue #not a CM script meta + continue # not a CM script meta uid = data['uid'] ii = { - 'action':'test', 'automation':'script', 'artifact': uid, 'quiet': 'yes', 'out': 'con' - } + 'action': 'test', 'automation': 'script', 'artifact': uid, 'quiet': 'yes', 'out': 'con' + } if os.environ.get('DOCKER_CM_REPO', '') != '': ii['docker_cm_repo'] = os.environ['DOCKER_CM_REPO'] if os.environ.get('DOCKER_CM_REPO_BRANCH', '') != '': diff --git a/tests/script/test_deps.py b/tests/script/test_deps.py index aaf19bc81b..37d75fd4c4 100644 --- a/tests/script/test_deps.py +++ b/tests/script/test_deps.py @@ -1,23 +1,25 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps import cmind as cm import check as checks -# MLPerf v3.0 inference is now very outdated and we are testing inference in separate tests +# MLPerf v3.0 inference is now very outdated and we are testing inference +# in separate tests -#r = cm.access({'action':'run', 'automation':'script', 'tags': 'generate-run-cmds,mlperf', 'adr': +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'generate-run-cmds,mlperf', 'adr': # {'loadgen': {'version': 'r3.0'}, 'compiler': {'tags': "gcc"}}, 'env': {'CM_MODEL': 'resnet50', # 'CM_DEVICE': 'cpu', 'CM_BACKEND': 'onnxruntime'}, 'quiet': 'yes'}) -#checks.check_return(r) +# checks.check_return(r) # -#r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'loadgen,version-r3.0,deps-python-non-virtual'}) -#checks.check_list(r, "loadgen,version-r3.0,deps-python-non-virtual") +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'loadgen,version-r3.0,deps-python-non-virtual'}) +# checks.check_list(r, "loadgen,version-r3.0,deps-python-non-virtual") # -#r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'inference,src,version-r3.0'}) -#checks.check_list(r, "inference,src,version-r3.0") +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'inference,src,version-r3.0'}) +# checks.check_list(r, "inference,src,version-r3.0") # -#r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_onnxruntime,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) -#checks.check_return(r) +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_onnxruntime,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) # -#r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_tf,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) -#checks.check_return(r) +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_tf,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) diff --git a/tests/script/test_docker.py b/tests/script/test_docker.py index 3ea13608a3..991ef04030 100644 --- a/tests/script/test_docker.py +++ b/tests/script/test_docker.py @@ -1,22 +1,23 @@ -# This test covers version, variation, compilation from src, add_deps_recursive, post_deps +# This test covers version, variation, compilation from src, +# add_deps_recursive, post_deps import cmind as cm import check as checks -r = cm.access({'action':'run', - 'automation':'script', +r = cm.access({'action': 'run', + 'automation': 'script', 'tags': 'run,docker,container', 'add_deps_recursive': { - 'compiler': {'tags': "gcc"} + 'compiler': {'tags': "gcc"} }, - 'image_name':'cm-script-app-image-classification-onnx-py', + 'image_name': 'cm-script-app-image-classification-onnx-py', 'env': { - 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', - 'CM_MLOPS_REPO': 'mlcommons@cm4mlops', - 'CM_MLOPS_REPO_BRANCH': 'mlperf-inference', - 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04' + 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', + 'CM_MLOPS_REPO': 'mlcommons@cm4mlops', + 'CM_MLOPS_REPO_BRANCH': 'mlperf-inference', + 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04' }, 'quiet': 'yes' - }) + }) checks.check_return(r) diff --git a/tests/script/test_features.py b/tests/script/test_features.py index 0679099321..b29ee6a7a5 100644 --- a/tests/script/test_features.py +++ b/tests/script/test_features.py @@ -5,23 +5,34 @@ import cmind as cm import check as checks -r = cm.access({'action':'run', 'automation':'script', 'tags': 'install,python-venv', 'name':'test', 'quiet': 'yes'}) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'name': 'test', + 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'get,python,virtual,name-test'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,python,virtual,name-test'}) checks.check_list(r, "get,python-venv") -r = cm.access({'action':'run', 'automation':'script', 'tags': 'get,dataset,preprocessed,imagenet,_NHWC', 'quiet': 'yes'}) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,dataset,preprocessed,imagenet,_NHWC', + 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'get,dataset,preprocessed,imagenet,-_NCHW'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NCHW'}) checks.check_list(r, "_NHWC") -r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'get,dataset,preprocessed,imagenet,-_NHWC'}) -#checks.check_list(r, "-_NHWC", False) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NHWC'}) +# checks.check_list(r, "-_NHWC", False) -r = cm.access({'action':'run', 'automation': 'script', 'tags': 'test-scripts,_v1,_v2'}) +r = cm.access({'action': 'run', 'automation': 'script', + 'tags': 'test-scripts,_v1,_v2'}) new_env = r['new_env'] checks.check_key_value(new_env, "CM_VAR1", "combv1v2") checks.check_key_value(new_env, "CM_VAR2", "constv2") diff --git a/tests/script/test_install.py b/tests/script/test_install.py index 66fa164d50..d4fb93ec70 100644 --- a/tests/script/test_install.py +++ b/tests/script/test_install.py @@ -3,8 +3,13 @@ import cmind as cm import check as checks -r = cm.access({'action':'run', 'automation':'script', 'tags': 'python,src,install,_shared', 'version': '3.9.10', 'quiet': 'true'}) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'python,src,install,_shared', + 'version': '3.9.10', + 'quiet': 'true'}) checks.check_return(r) -r = cm.access({'action':'search', 'automation':'cache', 'tags': 'python,src,install,_shared,version-3.9.10'}) +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'python,src,install,_shared,version-3.9.10'}) checks.check_list(r, "python,src,install,_shared,version-3.9.10") diff --git a/tests/test_cm.py b/tests/test_cm.py index 41fb402c22..821e1571d6 100644 --- a/tests/test_cm.py +++ b/tests/test_cm.py @@ -3,12 +3,15 @@ r = cm.access(['test', 'script']) if 'return' not in r: - raise Exception('CM access function should always return key \'return\'!') + raise Exception( + 'CM access function should always return key \'return\'!') exit(0) except ImportError as e: from sys import stderr from subprocess import call - print('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr) + print( + 'WARNING: CM module for python is not installed & jupyter notebooks will not be supported', + file=stderr) retcode = call(['cm', 'test', 'script']) exit(retcode) diff --git a/tests/test_search_speed.py b/tests/test_search_speed.py index 3086a83408..577c4f0b80 100644 --- a/tests/test_search_speed.py +++ b/tests/test_search_speed.py @@ -5,21 +5,22 @@ steps = 10 -print ('Running search with tags {} times ...'.format(steps)) +print('Running search with tags {} times ...'.format(steps)) for step in range(steps): start = time.time() - r = cm.access({'action':'search', - 'automation':'script', - 'tags':'detect,os'}) + r = cm.access({'action': 'search', + 'automation': 'script', + 'tags': 'detect,os'}) timer = time.time() - start - if r['return']>0: cm.error(r) + if r['return'] > 0: + cm.error(r) times.append(timer) step = 0 for t in times: step += 1 - print ("{}) {:0.3f} sec.".format(step, t)) + print("{}) {:0.3f} sec.".format(step, t)) diff --git a/tests/tutorials/test_tutorial_retinanet.py b/tests/tutorials/test_tutorial_retinanet.py index 9ecb2a3bef..dcca78f205 100644 --- a/tests/tutorials/test_tutorial_retinanet.py +++ b/tests/tutorials/test_tutorial_retinanet.py @@ -1,28 +1,37 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps +import check as checks import cmind as cm from pathlib import Path import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': \ - {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', \ - 'mode': 'accuracy', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': \ - {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', \ - 'mode': 'performance', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'performance', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'install,python-venv', 'version': '3.10.8', 'name': 'mlperf' }) +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'version': '3.10.8', + 'name': 'mlperf'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'submitter': 'Community', \ - 'implementation': 'cpp', 'hw_name': 'default', 'model': 'retinanet', 'backend': 'onnxruntime', 'device': 'cpu', 'scenario': 'Offline', \ - 'test_query_count': '10', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'submitter': 'Community', + 'implementation': 'cpp', 'hw_name': 'default', 'model': 'retinanet', 'backend': 'onnxruntime', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '10', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/tests/tutorials/test_tutorial_tvm.py b/tests/tutorials/test_tutorial_tvm.py index f3857b8fad..6901a31693 100644 --- a/tests/tutorials/test_tutorial_tvm.py +++ b/tests/tutorials/test_tutorial_tvm.py @@ -1,23 +1,28 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps +import check as checks import cmind as cm from pathlib import Path import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/tests/tutorials/test_tutorial_tvm_pip_ge.py b/tests/tutorials/test_tutorial_tvm_pip_ge.py index adb7f960e8..f95521f440 100644 --- a/tests/tutorials/test_tutorial_tvm_pip_ge.py +++ b/tests/tutorials/test_tutorial_tvm_pip_ge.py @@ -1,20 +1,26 @@ +import check as checks import cmind as cm from pathlib import Path import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, \ - 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', \ - 'device': 'cpu', 'scenario': 'Offline', 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, \ - 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', \ - 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) diff --git a/tests/tutorials/test_tutorial_tvm_pip_vm.py b/tests/tutorials/test_tutorial_tvm_pip_vm.py index 208185fda7..ab9244bf01 100644 --- a/tests/tutorials/test_tutorial_tvm_pip_vm.py +++ b/tests/tutorials/test_tutorial_tvm_pip_vm.py @@ -1,22 +1,27 @@ -# This test covers version, variation, compilation from src, add_deps, add_deps_recursive, deps, post_deps +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps +import check as checks import cmind as cm from pathlib import Path import sys import os -sys.path.insert(1, os.path.join(Path(__file__).parent.parent.resolve(), "script")) -import check as checks +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) -r = cm.access({'action':'run', 'automation':'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': \ - {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', \ - 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ - 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r)