Skip to content

revert to use previous lint check ci yml #1

revert to use previous lint check ci yml

revert to use previous lint check ci yml #1

Triggered via push November 9, 2023 06:42
Status Startup failure
Total duration
Artifacts
pytest_check
pytest_check
Fit to window
Zoom out
Zoom in

Annotations

58 errors
Invalid workflow file: .github/workflows/bk_asw_batch_ci.yml#L65
The workflow is not valid. .github/workflows/bk_asw_batch_ci.yml (Line: 65, Col: 7): Unexpected value 'schedule'
/home/runner/work/hello_dgl/hello_dgl/checkJobStatus.py#L1
-# script to submit jobs to AWS Batch, queues and definitions are already existing and set up +# script to submit jobs to AWS Batch, queues and definitions are already existing and set up import argparse import random import re import sys import time
/home/runner/work/hello_dgl/hello_dgl/checkJobStatus.py#L10
from botocore.compat import total_seconds from botocore.config import Config job_type_info = { - 'CI-CPU': { - 'job_definition': 'hello_dgl', - 'job_queue': 'hello_dgl', + "CI-CPU": { + "job_definition": "hello_dgl", + "job_queue": "hello_dgl", }, } parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument('--profile', help='profile name of aws account.', type=str, - default=None) -parser.add_argument('--region', help='Default region when creating new connections', type=str, - default='us-west-2') -parser.add_argument('--name', help='name of the job', type=str, default='dummy') -parser.add_argument('--job-type', help='type of job to submit.', type=str, - choices=job_type_info.keys(), default='CI-CPU') -parser.add_argument('--command', help='command to run', type=str, - default='git rev-parse HEAD | tee stdout.log') -parser.add_argument('--wait', help='block wait until the job completes. ' - 'Non-zero exit code if job fails.', action='store_true') -parser.add_argument('--timeout', help='job timeout in seconds', default=10800, type=int) +parser.add_argument( + "--profile", help="profile name of aws account.", type=str, default=None +) +parser.add_argument( + "--region", + help="Default region when creating new connections", + type=str, + default="us-west-2", +) +parser.add_argument("--name", help="name of the job", type=str, default="dummy") +parser.add_argument( + "--job-type", + help="type of job to submit.", + type=str, + choices=job_type_info.keys(), + default="CI-CPU", +) +parser.add_argument( + "--command", + help="command to run", + type=str, + default="git rev-parse HEAD | tee stdout.log", +) +parser.add_argument( + "--wait", + help="block wait until the job completes. " "Non-zero exit code if job fails.", + action="store_true", +) +parser.add_argument("--timeout", help="job timeout in seconds", default=10800, type=int) -parser.add_argument('--source-ref', - help='ref in hello_DGL main github. e.g. master, refs/pull/500/head', - type=str, default='main') -parser.add_argument('--remote', - help='git repo address. https://github.com/dglai/hello_dgl.git', - type=str, default="https://github.com/dglai/hello_dgl.git") +parser.add_argument( + "--source-ref", + help="ref in hello_DGL main github. e.g. master, refs/pull/500/head", + type=str, + default="main", +) +parser.add_argument( + "--remote", + help="git repo address. https://github.com/dglai/hello_dgl.git", + type=str, + default="https://github.com/dglai/hello_dgl.git", +) parser.add_argument("--job-id", help="job id", type=str, default=None) parser.add_argument("--job-name", help="job name", type=str, default=None) args = parser.parse_args() print(args) session = boto3.Session(profile_name=args.profile, region_name=args.region) -config = Config( - retries = dict( - max_attempts = 5 - ) -) +config = Config(retries=dict(max_attempts=5)) -batch, cloudwatch = [session.client(service_name=sn, config=config) for sn in ['batch', 'logs']] +batch, cloudwatch = [ + session.client(service_name=sn, config=config) for sn in ["batch", "logs"] +] + def printLogs(logGroupName, logStreamName, startTime): - kwargs = {'logGroupName': logGroupName, - 'logStreamName': logStreamName, - 'startTime': startTime, - 'startFromHead': True} + kwargs = { + "logGroupName": logGroupName, + "logStreamName": logStreamName, + "startTime": startTime, + "startFromHead": True, + } lastTimestamp = startTime - 1 while True: logEvents = cloudwatch.get_log_events(**kwargs) - for event in logEvents['events']: - lastTimestamp = event['timestamp'] + for event in logEvents["events"]: + lastTimestamp = event["timestamp"] timestamp = datetime.utcfromtimestamp(lastTimestamp / 1000.0).isoformat() - print('[{}] {}'.format((timestamp + '.000')[:23] + 'Z', event['message'])) + print("[{}] {}".format((timestamp + ".000")[:23] + "Z", event["message"])) - nextToken = logEvents['nextForwardToken'] - if nextToken and kwargs.get('nextToken') != nextToken: - kwargs['nextToken'] = nextToken + nextToken = logEvents["nextForwardToken"] + if nextToken and kwargs.get("nextToken") != nextToken: + kwargs["nextToken"] = nextToken else: break return lastTimestamp
/home/runner/work/hello_dgl/hello_dgl/checkJobStatus.py#L82
endTime = int(total_seconds(datetime.utcnow() - datetime(1970, 1, 1))) * 1000 return endTime def main(): - spin = ['-', '/', '|', '\\', '-', '/', '|', '\\'] - logGroupName = '/aws/batch/job' # This is the group where aws batch logs are stored in Cloudwatch + spin = ["-", "/", "|", "\\", "-", "/", "|", "\\"] + logGroupName = "/aws/batch/job" # This is the group where aws batch logs are stored in Cloudwatch # Printing actions parameters print("GitHub SourceRef: ", args.source_ref) print("GitHub Remote: ", args.remote)
/home/runner/work/hello_dgl/hello_dgl/checkJobStatus.py#L100
status_set = set() startTime = 0 logStreamName = None describeJobsResponse = batch.describe_jobs(jobs=[jobId]) - status = describeJobsResponse['jobs'][0]['status'] - if status == 'SUCCEEDED' or status == 'FAILED': + status = describeJobsResponse["jobs"][0]["status"] + if status == "SUCCEEDED" or status == "FAILED": if logStreamName: startTime = printLogs(logGroupName, logStreamName, startTime) + 1 - print('=' * 80) - print('Job [{} - {}] {}'.format(jobName, jobId, status)) - sys.exit(status == 'FAILED') + print("=" * 80) + print("Job [{} - {}] {}".format(jobName, jobId, status)) + sys.exit(status == "FAILED") - elif status == 'RUNNING': - logStreamName = describeJobsResponse['jobs'][0]['container']['logStreamName'] + elif status == "RUNNING": + logStreamName = describeJobsResponse["jobs"][0]["container"]["logStreamName"] if not running: running = True - print('\rJob [{}, {}] is RUNNING.'.format(jobName, jobId)) + print("\rJob [{}, {}] is RUNNING.".format(jobName, jobId)) if logStreamName: - print('Output [{}]:\n {}'.format(logStreamName, '=' * 80)) + print("Output [{}]:\n {}".format(logStreamName, "=" * 80)) if logStreamName: startTime = printLogs(logGroupName, logStreamName, startTime) + 1 elif status not in status_set: status_set.add(status) - print('\rJob [%s - %s] is %-9s... %s' % (jobName, jobId, status, spin[spinner % len(spin)]),) + print( + "\rJob [%s - %s] is %-9s... %s" + % (jobName, jobId, status, spin[spinner % len(spin)]), + ) sys.stdout.flush() spinner += 1 print(f"Job status: {status}") -if __name__ == '__main__': +if __name__ == "__main__": main()
/home/runner/work/hello_dgl/hello_dgl/submitJob.py#L1
-# script to submit jobs to AWS Batch, queues and definitions are already existing and set up +# script to submit jobs to AWS Batch, queues and definitions are already existing and set up import argparse import random import re import sys import time
/home/runner/work/hello_dgl/hello_dgl/submitJob.py#L10
from botocore.compat import total_seconds from botocore.config import Config job_type_info = { - 'CI-CPU': { - 'job_definition': 'hello_dgl', - 'job_queue': 'hello_dgl', + "CI-CPU": { + "job_definition": "hello_dgl", + "job_queue": "hello_dgl", }, } parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument('--profile', help='profile name of aws account.', type=str, - default=None) -parser.add_argument('--region', help='Default region when creating new connections', type=str, - default='us-west-2') -parser.add_argument('--name', help='name of the job', type=str, default='dummy') -parser.add_argument('--job-type', help='type of job to submit.', type=str, - choices=job_type_info.keys(), default='CI-CPU') -parser.add_argument('--command', help='command to run', type=str, - default='git rev-parse HEAD | tee stdout.log') -parser.add_argument('--wait', help='block wait until the job completes. ' - 'Non-zero exit code if job fails.', action='store_true') -parser.add_argument('--timeout', help='job timeout in seconds', default=10800, type=int) +parser.add_argument( + "--profile", help="profile name of aws account.", type=str, default=None +) +parser.add_argument( + "--region", + help="Default region when creating new connections", + type=str, + default="us-west-2", +) +parser.add_argument("--name", help="name of the job", type=str, default="dummy") +parser.add_argument( + "--job-type", + help="type of job to submit.", + type=str, + choices=job_type_info.keys(), + default="CI-CPU", +) +parser.add_argument( + "--command", + help="command to run", + type=str, + default="git rev-parse HEAD | tee stdout.log", +) +parser.add_argument( + "--wait", + help="block wait until the job completes. " "Non-zero exit code if job fails.", + action="store_true", +) +parser.add_argument("--timeout", help="job timeout in seconds", default=10800, type=int) -parser.add_argument('--source-ref', - help='ref in hello_DGL main github. e.g. master, refs/pull/500/head', - type=str, default='main') -parser.add_argument('--remote', - help='git repo address. https://github.com/dglai/hello_dgl.git', - type=str, default="https://github.com/dglai/hello_dgl.git") +parser.add_argument( + "--source-ref", + help="ref in hello_DGL main github. e.g. master, refs/pull/500/head", + type=str, + default="main", +) +parser.add_argument( + "--remote", + help="git repo address. https://github.com/dglai/hello_dgl.git", + type=str, + default="https://github.com/dglai/hello_dgl.git", +) args = parser.parse_args() session = boto3.Session(profile_name=args.profile, region_name=args.region) -config = Config( - retries = dict( - max_attempts = 5 - ) -) +config = Config(retries=dict(max_attempts=5)) -batch, cloudwatch = [session.client(service_name=sn, config=config) for sn in ['batch', 'logs']] +batch, cloudwatch = [ + session.client(service_name=sn, config=config) for sn in ["batch", "logs"] +] + def printLogs(logGroupName, logStreamName, startTime): - kwargs = {'logGroupName': logGroupName, - 'logStreamName': logStreamName, - 'startTime': startTime, - 'startFromHead': True} + kwargs = { + "logGroupName": logGroupName, + "logStreamName": logStreamName, + "startTime": startTime, + "startFromHead": True, + } lastTimestamp = startTime - 1 while True: logEvents = cloudwatch.get_log_events(**kwargs) - for event in logEvents['events']: - lastTimestamp = event['timestamp'] + for event in logEvents["events"]: + lastTimestamp = event["timestamp"] timestamp = datetime.utcfromtimestamp(lastTimestamp / 1000.0).isoformat() - print('[{}] {}'.format((timestamp + '.000')[:23] + 'Z', event['message'])) + print("[{}] {}".format((timestamp + ".000")[:23] + "Z", event["message"])) - nextToken = logEvents['nextForwardToken'] - if nextToken and kwargs.get('nextToken') != nextToken: - kwargs['nextToken'] = nextToken + nextToken = logEvents["nextForwardToken"] + if nextToken and kwargs.get("nextToken") != nextToken: + kwargs["nextToken"] = nextToken else: break return lastTimestamp
/home/runner/work/hello_dgl/hello_dgl/submitJob.py#L79
endTime = int(total_seconds(datetime.utcnow() - datetime(1970, 1, 1))) * 1000 return endTime def main(): - spin = ['-', '/', '|', '\\', '-', '/', '|', '\\'] - logGroupName = '/aws/batch/job' # This is the group where aws batch logs are stored in Cloudwatch + spin = ["-", "/", "|", "\\", "-", "/", "|", "\\"] + logGroupName = "/aws/batch/job" # This is the group where aws batch logs are stored in Cloudwatch - jobName = re.sub('[^A-Za-z0-9_\-]', '', args.name)[:128] # Enforce AWS Batch jobName rules + jobName = re.sub("[^A-Za-z0-9_\-]", "", args.name)[ + :128 + ] # Enforce AWS Batch jobName rules jobType = args.job_type - jobQueue = job_type_info[jobType]['job_queue'] - jobDefinition = job_type_info[jobType]['job_definition'] + jobQueue = job_type_info[jobType]["job_queue"] + jobDefinition = job_type_info[jobType]["job_definition"] wait = args.wait # Printing actions parameters print("GitHub SourceRef: ", args.source_ref) print("GitHub Remote: ", args.remote) parameters = { - 'COMMAND': f"\"{args.command}\"", # wrap command with double quotation mark, so that batch can treat it as a single command - 'SOURCE_REF': args.source_ref, - 'REMOTE': args.remote, + "COMMAND": f'"{args.command}"', # wrap command with double quotation mark, so that batch can treat it as a single command + "SOURCE_REF": args.source_ref, + "REMOTE": args.remote, } kwargs = dict( jobName=jobName, jobQueue=jobQueue, jobDefinition=jobDefinition, parameters=parameters, ) if args.timeout is not None: - kwargs['timeout'] = {'attemptDurationSeconds': args.timeout} + kwargs["timeout"] = {"attemptDurationSeconds": args.timeout} submitJobResponse = batch.submit_job(**kwargs) - jobId = submitJobResponse['jobId'] - print('Submitted job [{} - {}] to the job queue [{}]'.format(jobName, jobId, jobQueue)) + jobId = submitJobResponse["jobId"] + print( + "Submitted job [{} - {}] to the job queue [{}]".format(jobName, jobId, jobQueue) + ) spinner = 0 running = False status_set = set() startTime = 0 logStreamName = None while wait: time.sleep(10) # Wait for 10 seconds to fetch job data from batch service describeJobsResponse = batch.describe_jobs(jobs=[jobId]) - status = describeJobsResponse['jobs'][0]['status'] - if status == 'SUCCEEDED' or status == 'FAILED': + status = describeJobsResponse["jobs"][0]["status"] + if status == "SUCCEEDED" or status == "FAILED": if logStreamName: startTime = printLogs(logGroupName, logStreamName, startTime) + 1 - print('=' * 80) - print('Job [{} - {}] {}'.format(jobName, jobId, status)) - sys.exit(status == 'FAILED') + print("=" * 80) + print("Job [{} - {}] {}".format(jobName, jobId, status)) + sys.exit(status == "FAILED") - elif status == 'RUNNING': - logStreamName = describeJobsResponse['jobs'][0]['container']['logStreamName'] + elif status == "RUNNING": + logStreamName = describeJobsResponse["jobs"][0]["container"][ + "logStreamName" + ] if not running: running = True - print('\rJob [{}, {}] is RUNNING.'.format(jobName, jobId)) + print("\rJob [{}, {}] is RUNNING.".format(jobName, jobId)) if logStreamName: - print('Output [{}]:\n {}'.format(logStreamName, '=' * 80)) + print("Output [{}]:\n {}".format(logStreamName, "=" * 80)) if logStreamName: startTime = printLogs(logGroupName, logStreamName, startTime) + 1 elif status not in status_set: status_set.add(status) - print('\rJob [%s - %s] is %-9s... %s' % (jobName, jobId, status, spin[spinner % len(spin)]),) + print( + "\rJob [%s - %s] is %-9s... %s" + % (jobName, jobId, status, spin[spinner % len(spin)]), + ) sys.stdout.flush() spinner += 1 -if __name__ == '__main__': +if __name__ == "__main__": main()
checkJobStatus.py#L1
Line too long (92 > 79 characters) (E501)
checkJobStatus.py#L1
Trailing whitespace (W291)
checkJobStatus.py#L3
'random' imported but unused (F401)
checkJobStatus.py#L4
're' imported but unused (F401)
checkJobStatus.py#L6
'time' imported but unused (F401)
checkJobStatus.py#L21
Line too long (88 > 79 characters) (E501)
checkJobStatus.py#L25
Line too long (94 > 79 characters) (E501)
checkJobStatus.py#L27
Line too long (80 > 79 characters) (E501)
checkJobStatus.py#L34
Line too long (88 > 79 characters) (E501)
checkJobStatus.py#L37
Line too long (89 > 79 characters) (E501)
checkJobStatus.py#L40
Line too long (84 > 79 characters) (E501)
checkJobStatus.py#L51
Unexpected spaces around keyword / parameter equals (E251)
checkJobStatus.py#L51
Unexpected spaces around keyword / parameter equals (E251)
checkJobStatus.py#L52
Unexpected spaces around keyword / parameter equals (E251)
checkJobStatus.py#L52
Unexpected spaces around keyword / parameter equals (E251)
checkJobStatus.py#L56
Line too long (96 > 79 characters) (E501)
checkJobStatus.py#L58
Expected 2 blank lines, found 1 (E302)
checkJobStatus.py#L70
Line too long (85 > 79 characters) (E501)
checkJobStatus.py#L71
Line too long (86 > 79 characters) (E501)
checkJobStatus.py#L82
Line too long (81 > 79 characters) (E501)
checkJobStatus.py#L88
At least two spaces before inline comment (E261)
checkJobStatus.py#L88
Line too long (101 > 79 characters) (E501)
checkJobStatus.py#L114
Line too long (85 > 79 characters) (E501)
checkJobStatus.py#L124
Line too long (101 > 79 characters) (E501)
submitJob.py#L1
Line too long (92 > 79 characters) (E501)
submitJob.py#L1
Trailing whitespace (W291)
submitJob.py#L3
'random' imported but unused (F401)
submitJob.py#L21
Line too long (88 > 79 characters) (E501)
submitJob.py#L25
Line too long (94 > 79 characters) (E501)
submitJob.py#L27
Line too long (80 > 79 characters) (E501)
submitJob.py#L34
Line too long (88 > 79 characters) (E501)
submitJob.py#L37
Line too long (89 > 79 characters) (E501)
submitJob.py#L40
Line too long (84 > 79 characters) (E501)
submitJob.py#L48
Unexpected spaces around keyword / parameter equals (E251)
submitJob.py#L48
Unexpected spaces around keyword / parameter equals (E251)
submitJob.py#L49
Unexpected spaces around keyword / parameter equals (E251)
submitJob.py#L49
Unexpected spaces around keyword / parameter equals (E251)
submitJob.py#L53
Line too long (96 > 79 characters) (E501)
submitJob.py#L55
Expected 2 blank lines, found 1 (E302)
submitJob.py#L67
Line too long (85 > 79 characters) (E501)
submitJob.py#L68
Line too long (86 > 79 characters) (E501)
submitJob.py#L79
Line too long (81 > 79 characters) (E501)
submitJob.py#L85
At least two spaces before inline comment (E261)
submitJob.py#L85
Line too long (101 > 79 characters) (E501)
submitJob.py#L87
Invalid escape sequence '\-' (W605)
submitJob.py#L87
Line too long (95 > 79 characters) (E501)
submitJob.py#L98
Line too long (132 > 79 characters) (E501)
submitJob.py#L113
Line too long (91 > 79 characters) (E501)
submitJob.py#L121
Line too long (82 > 79 characters) (E501)
submitJob.py#L126
Line too long (81 > 79 characters) (E501)