From 77a7c292376868d4e70b9d8cb496890832fdd623 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 3 Nov 2024 17:44:10 -0800 Subject: [PATCH 1/4] keep up-to-date with recent pytorch Signed-off-by: youkaichao --- depyf/explain/utils.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/depyf/explain/utils.py b/depyf/explain/utils.py index 1399455c..4938fdcb 100644 --- a/depyf/explain/utils.py +++ b/depyf/explain/utils.py @@ -90,16 +90,27 @@ def __init__(self, original_code, module, cache): cpp_guard = False + # starting from https://github.com/pytorch/pytorch/pull/138896 , + # pytorch uses `guard_manager` instead of `check_fn` to store the + # guards + attr_name = "guard_manager" if hasattr(cache, "guard_manager") else "check_fn" + + guard_manager = getattr(cache, attr_name) + try: - from torch._dynamo.guards import GuardManager - cpp_guard = isinstance(cache.check_fn, GuardManager) + klass = getattr(torch._dynamo.guards, "GuardManagerWrapper", None) or \ + getattr(torch._C._dynamo.guards, "GuardManager", None) + assert klass is not None + cpp_guard = isinstance(guard_manager, klass) except Exception: pass if not cpp_guard: - guard = cache.check_fn.code_parts - freevar_names = cache.check_fn.__code__.co_freevars - freevar_values = [x.cell_contents for x in cache.check_fn.__closure__] + # for old version of pytorch, + # `guard_manager` is a plain python function + guard = guard_manager.code_parts + freevar_names = guard_manager.__code__.co_freevars + freevar_values = [x.cell_contents for x in guard_manager.__closure__] else: # keep the logic synced with # https://github.com/pytorch/pytorch/blob/7b6b10417d8616ebd7a42b06528c5c2b2fded55a/torch/_dynamo/guards.py#L262 @@ -118,14 +129,14 @@ def visit(root, ans): for child in root.get_child_managers(): visit(child, ans) guard = [] - root = cache.check_fn.root + root = guard_manager.root visit(root, guard) - if cache.check_fn.closure_vars is None: + if guard_manager.closure_vars is None: freevar_names = tuple() freevar_values = [] else: - freevar_names = tuple(cache.check_fn.closure_vars.keys()) - freevar_values = list(cache.check_fn.closure_vars.values()) + freevar_names = tuple(guard_manager.closure_vars.keys()) + freevar_values = list(guard_manager.closure_vars.values()) self.guard = guard self.freevars = {name: value for name, value in zip(freevar_names, freevar_values)} From 8302f89ee7db32cde87a0de80e85b992d0a1cff8 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 3 Nov 2024 18:22:02 -0800 Subject: [PATCH 2/4] update tests Signed-off-by: youkaichao --- .github/workflows/test_pytorch.yml | 2 +- tests/test_pytorch/cmp_output.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_pytorch.yml b/.github/workflows/test_pytorch.yml index 1940d318..a00cde0a 100644 --- a/.github/workflows/test_pytorch.yml +++ b/.github/workflows/test_pytorch.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] # Python 3.11 bug waits for fix https://github.com/thuml/depyf/actions/runs/7004325219/job/19051829613 . + python-version: ["3.9", "3.10", "3.11"] # Python 3.11 bug waits for fix https://github.com/thuml/depyf/actions/runs/7004325219/job/19051829613 . steps: - uses: actions/checkout@v3 diff --git a/tests/test_pytorch/cmp_output.py b/tests/test_pytorch/cmp_output.py index 6e11415d..94b49448 100644 --- a/tests/test_pytorch/cmp_output.py +++ b/tests/test_pytorch/cmp_output.py @@ -7,8 +7,8 @@ def exclude_files(files, keys): expected_full_code = sorted(glob.glob("tests/depyf_output/*/full_code_*.py")) expected_full_code = [x[len("tests/"):] for x in expected_full_code] -output_full_code = exclude_files(output_full_code, ["insert_deferred_runtime_asserts", "AFTER POST GRAD"]) -expected_full_code = exclude_files(expected_full_code, ["insert_deferred_runtime_asserts", "AFTER POST GRAD"]) +output_full_code = exclude_files(output_full_code, ["insert_deferred_runtime_asserts", "AFTER POST GRAD", "tensorify_python_scalars"]) +expected_full_code = exclude_files(expected_full_code, ["insert_deferred_runtime_asserts", "AFTER POST GRAD", "tensorify_python_scalars"]) msg = "Unexpected files:\n" for x in set(output_full_code) - set(expected_full_code): @@ -29,8 +29,8 @@ def exclude_files(files, keys): expected_files.sort() expected_files = [x[len("tests/"):] for x in expected_files] -output_files = exclude_files(output_files, ["insert_deferred_runtime_asserts", "AFTER POST GRAD"]) -expected_files = exclude_files(expected_files, ["insert_deferred_runtime_asserts", "AFTER POST GRAD"]) +output_files = exclude_files(output_files, ["insert_deferred_runtime_asserts", "AFTER POST GRAD", "tensorify_python_scalars"]) +expected_files = exclude_files(expected_files, ["insert_deferred_runtime_asserts", "AFTER POST GRAD", "tensorify_python_scalars"]) msg = f"len(output_files)={len(output_files)}, len(expected_files)={len(expected_files)}.\n" msg += "Unexpected files:\n" From 3956c5b71921d6b7af3b9dd8f932cf2f7ee059ed Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 3 Nov 2024 18:35:41 -0800 Subject: [PATCH 3/4] update tests Signed-off-by: youkaichao --- .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_11 Backward graph 0.py | 20 +- .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- .../__compiled_fn_11 Joint graph 0.py | 22 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 20 + .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + .../__compiled_fn_7 Backward graph 0.py | 21 +- .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 10 +- .../__compiled_fn_7 Joint graph 0.py | 26 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 30 ++ ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_1_for_toy_function.py | 4 +- .../full_code_for_toy_function_0.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_1.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 15 + .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 12 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 9 + .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 8 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 15 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_1_for_toy_function.py | 4 +- .../full_code_for_toy_function_0.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_1.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- .../full_code_for_toy_function_0.py | 143 +++++- .../full_code_for_toy_function_1.py | 143 +++++- .../full_code_for_toy_function_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 15 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 9 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- .../full_code_for_toy_function_0.py | 143 +++++- .../full_code_for_toy_function_1.py | 143 +++++- .../full_code_for_toy_function_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_11 Captured Graph 0.py | 10 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + .../__compiled_fn_7 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_1_for_toy_function.py | 4 +- .../full_code_for_toy_function_0.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_1.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_11 Captured Graph 0.py | 10 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + .../__compiled_fn_7 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_1_for_toy_function.py | 4 +- .../full_code_for_toy_function_0.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_1.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- .../full_code_for_toy_function_0.py | 143 +++++- .../full_code_for_toy_function_1.py | 143 +++++- .../full_code_for_toy_function_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- .../full_code_for_toy_function_0.py | 143 +++++- .../full_code_for_toy_function_1.py | 143 +++++- .../full_code_for_toy_function_2.py | 143 +++++- .../__compiled_fn_1 AFTER POST GRAD 0.py | 15 - .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- .../__compiled_fn_1 kernel 0.py | 87 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_11 AFTER POST GRAD 0.py | 12 +- .../__compiled_fn_11 AFTER POST GRAD 1.py | 18 + .../__compiled_fn_11 Backward graph 0.py | 20 +- .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- .../__compiled_fn_11 Joint graph 0.py | 22 +- .../__compiled_fn_11 kernel 0.py | 53 ++- .../__compiled_fn_11 kernel 1.py | 105 +++++ ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 20 + .../__compiled_fn_5 AFTER POST GRAD 0.py | 9 - .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + .../__compiled_fn_7 AFTER POST GRAD 0.py | 10 +- .../__compiled_fn_7 AFTER POST GRAD 1.py | 21 +- .../__compiled_fn_7 Backward graph 0.py | 21 +- .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 10 +- .../__compiled_fn_7 Joint graph 0.py | 26 +- .../__compiled_fn_7 kernel 0.py | 91 ++-- .../__compiled_fn_7 kernel 1.py | 70 ++- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 30 ++ ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_1_for_toy_function.py | 4 +- .../full_code_for_toy_function_0.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_1.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- .../__compiled_fn_1 kernel 0.py | 85 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 15 + .../__compiled_fn_11 AFTER POST GRAD 0.py | 12 +- .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- .../__compiled_fn_11 kernel 0.py | 53 ++- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 12 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 9 + .../__compiled_fn_7 AFTER POST GRAD 0.py | 8 +- .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 8 +- .../__compiled_fn_7 kernel 0.py | 89 ++-- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 15 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_1_for_toy_function.py | 4 +- .../full_code_for_toy_function_0.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_1.py | 438 ++++++++++++++++-- .../full_code_for_toy_function_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 AFTER POST GRAD 0.py | 6 +- .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- .../__compiled_fn_1 kernel 0.py | 87 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_5 AFTER POST GRAD 0.py | 4 +- .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- .../full_code_for_toy_function_0.py | 143 +++++- .../full_code_for_toy_function_1.py | 143 +++++- .../full_code_for_toy_function_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- .../__compiled_fn_1 kernel 0.py | 85 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 15 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 9 + ...orch_dynamo_resume_in_toy_function_at_5.py | 5 +- .../full_code_for_toy_function_0.py | 143 +++++- .../full_code_for_toy_function_1.py | 143 +++++- .../full_code_for_toy_function_2.py | 143 +++++- .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_11 Backward graph 0.py | 20 +- .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- .../__compiled_fn_11 Joint graph 0.py | 22 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 20 + .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + .../__compiled_fn_7 Backward graph 0.py | 21 +- .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 10 +- .../__compiled_fn_7 Joint graph 0.py | 26 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 30 ++ ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../__transformed_code_1_for_forward.py | 4 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 438 ++++++++++++++++-- .../full_code_for_forward_1.py | 438 ++++++++++++++++-- .../full_code_for_forward_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...mpiled_fn_1 tensorify_python_scalars 0.py} | 4 +- .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 12 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...mpiled_fn_5 tensorify_python_scalars 0.py} | 2 +- .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 8 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 15 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../__transformed_code_1_for_forward.py | 4 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 438 ++++++++++++++++-- .../full_code_for_forward_1.py | 438 ++++++++++++++++-- .../full_code_for_forward_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../full_code_for_forward_0.py | 143 +++++- .../full_code_for_forward_1.py | 143 +++++- .../full_code_for_forward_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...mpiled_fn_1 tensorify_python_scalars 0.py} | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...mpiled_fn_5 tensorify_python_scalars 0.py} | 2 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../full_code_for_forward_0.py | 143 +++++- .../full_code_for_forward_1.py | 143 +++++- .../full_code_for_forward_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_11 Captured Graph 0.py | 10 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + .../__compiled_fn_7 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../__transformed_code_1_for_forward.py | 4 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 438 ++++++++++++++++-- .../full_code_for_forward_1.py | 438 ++++++++++++++++-- .../full_code_for_forward_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_11 Captured Graph 0.py | 10 +- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + .../__compiled_fn_7 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../__transformed_code_1_for_forward.py | 4 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 438 ++++++++++++++++-- .../full_code_for_forward_1.py | 438 ++++++++++++++++-- .../full_code_for_forward_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../full_code_for_forward_0.py | 143 +++++- .../full_code_for_forward_1.py | 143 +++++- .../full_code_for_forward_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../full_code_for_forward_0.py | 143 +++++- .../full_code_for_forward_1.py | 143 +++++- .../full_code_for_forward_2.py | 143 +++++- .../__compiled_fn_1 AFTER POST GRAD 0.py | 15 - .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- .../__compiled_fn_1 kernel 0.py | 87 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_11 AFTER POST GRAD 0.py | 12 - .../__compiled_fn_11 Backward graph 0.py | 20 +- .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- .../__compiled_fn_11 Joint graph 0.py | 22 +- .../__compiled_fn_11 kernel 0.py | 53 ++- .../__compiled_fn_11 kernel 1.py | 105 +++++ ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 20 + .../__compiled_fn_5 AFTER POST GRAD 0.py | 9 - .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + .../__compiled_fn_7 AFTER POST GRAD 0.py | 15 - .../__compiled_fn_7 AFTER POST GRAD 1.py | 19 - .../__compiled_fn_7 Backward graph 0.py | 21 +- .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 10 +- .../__compiled_fn_7 Joint graph 0.py | 26 +- .../__compiled_fn_7 kernel 0.py | 91 ++-- .../__compiled_fn_7 kernel 1.py | 70 ++- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 30 ++ ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../__transformed_code_1_for_forward.py | 4 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 438 ++++++++++++++++-- .../full_code_for_forward_1.py | 438 ++++++++++++++++-- .../full_code_for_forward_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- .../__compiled_fn_1 kernel 0.py | 85 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...mpiled_fn_1 tensorify_python_scalars 0.py} | 4 +- .../__compiled_fn_11 AFTER POST GRAD 0.py | 12 - .../__compiled_fn_11 Captured Graph 0.py | 10 +- .../__compiled_fn_11 Forward graph 0.py | 12 +- .../__compiled_fn_11 kernel 0.py | 53 ++- ...rred_runtime_asserts __compiled_fn_11 0.py | 15 + ...mpiled_fn_11 tensorify_python_scalars 0.py | 12 + .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...mpiled_fn_5 tensorify_python_scalars 0.py} | 2 +- .../__compiled_fn_7 AFTER POST GRAD 0.py | 15 - .../__compiled_fn_7 Captured Graph 0.py | 4 +- .../__compiled_fn_7 Forward graph 0.py | 8 +- .../__compiled_fn_7 kernel 0.py | 89 ++-- ...erred_runtime_asserts __compiled_fn_7 0.py | 18 + ...ompiled_fn_7 tensorify_python_scalars 0.py | 15 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../__transformed_code_1_for_forward.py | 4 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 438 ++++++++++++++++-- .../full_code_for_forward_1.py | 438 ++++++++++++++++-- .../full_code_for_forward_2.py | 438 ++++++++++++++++-- .../__compiled_fn_1 AFTER POST GRAD 0.py | 15 - .../__compiled_fn_1 Backward graph 0.py | 11 +- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 6 +- .../__compiled_fn_1 Joint graph 0.py | 8 +- .../__compiled_fn_1 kernel 0.py | 87 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...ompiled_fn_1 tensorify_python_scalars 0.py | 30 ++ .../__compiled_fn_5 AFTER POST GRAD 0.py | 9 - .../__compiled_fn_5 Backward graph 0.py | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 4 +- .../__compiled_fn_5 Joint graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...ompiled_fn_5 tensorify_python_scalars 0.py | 14 + ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../full_code_for_forward_0.py | 143 +++++- .../full_code_for_forward_1.py | 143 +++++- .../full_code_for_forward_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 4 +- .../__compiled_fn_1 Forward graph 0.py | 4 +- .../__compiled_fn_1 kernel 0.py | 85 ++-- ...erred_runtime_asserts __compiled_fn_1 0.py | 18 + ...mpiled_fn_1 tensorify_python_scalars 0.py} | 4 +- .../__compiled_fn_5 Captured Graph 0.py | 2 +- .../__compiled_fn_5 Forward graph 0.py | 2 +- .../__compiled_fn_5 kernel 0.py | 31 +- ...erred_runtime_asserts __compiled_fn_5 0.py | 12 + ...mpiled_fn_5 tensorify_python_scalars 0.py} | 2 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 5 +- .../full_code_for_forward_0.py | 143 +++++- .../full_code_for_forward_1.py | 143 +++++- .../full_code_for_forward_2.py | 143 +++++- .../__compiled_fn_1 Captured Graph 0.py | 2 +- .../__compiled_fn_1 Captured Graph 1.py | 11 + .../__transformed_code_0_for_f.py | 3 +- .../multiprocessing/full_code_for_f_0.py | 58 ++- .../multiprocessing/full_code_for_f_1.py | 58 ++- .../multiprocessing/full_code_for_f_10.py | 82 ++++ .../multiprocessing/full_code_for_f_11.py | 82 ++++ .../multiprocessing/full_code_for_f_2.py | 58 ++- .../multiprocessing/full_code_for_f_3.py | 82 ++++ .../multiprocessing/full_code_for_f_4.py | 82 ++++ .../multiprocessing/full_code_for_f_5.py | 82 ++++ .../multiprocessing/full_code_for_f_6.py | 82 ++++ .../multiprocessing/full_code_for_f_7.py | 82 ++++ .../multiprocessing/full_code_for_f_8.py | 82 ++++ .../multiprocessing/full_code_for_f_9.py | 82 ++++ 470 files changed, 23426 insertions(+), 3812 deletions(-) create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py delete mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 1.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py delete mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename tests/depyf_output/{debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py => debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py} (64%) create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename tests/depyf_output/{debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py => debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py} (57%) create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename tests/depyf_output/{debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py => debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py} (64%) create mode 100644 tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename tests/depyf_output/{debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py => debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py} (57%) create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename tests/depyf_output/{debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py => debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py} (63%) delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename tests/depyf_output/{debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py => debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py} (56%) delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py delete mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py rename tests/depyf_output/{debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py => debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py} (63%) create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py rename tests/depyf_output/{debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py => debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py} (56%) create mode 100644 tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_10.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_11.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_3.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_4.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_5.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_6.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_7.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_8.py create mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_9.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index eac62f56..75ed5eb7 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index df1dd8de..9d97d16d 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index 08dfc559..71225da5 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..71225da5 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py index 9ce4dce7..496fa11b 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py @@ -2,17 +2,17 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]", tangents_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return [mul_4, mul_3] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py index dfcbbb44..da5ddffd 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py index 95acd5c0..9595a545 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul); mul = None - return [mul_1, primals_1, primals_2] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py index 55d7f736..343df7ad 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py @@ -3,18 +3,18 @@ def forward(self, primals, tangents): - primals_1: "f32[8]"; primals_2: "f32[8]"; tangents_1: "f32[8]"; + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul) - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return pytree.tree_unflatten([mul_1, mul_4, mul_3], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..541c5b2b --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..343df7ad --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,20 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 25ad792f..e6db532c 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 88211d70..65d9fe55 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index 2bebad1d..7854b589 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..7854b589 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py index 4b28fe2c..803b21dd 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", div: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div, add); div = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [None, add_1, None, None] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py index 010de53e..6deec089 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py index 914e86ea..ced18d7f 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add); add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_2, div, primals_1] + return (div, lt, primals_2, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py index 9ccccac8..64287d39 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py @@ -6,25 +6,25 @@ def forward(self, primals, tangents): primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, None, add_1, None, None], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..6deec089 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..64287d39 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index e6ace39c..bc00f5f5 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py index 2eb6c73d..51445f0b 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 5793f71c..e7cd6503 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d37cf70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13d1c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d4c9510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13d1c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d37cf70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13d1c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d4c9510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13d1c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d33fbe0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13d1c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4344327408)) \ - and (___check_obj_id(G['torch'].abs, 4349810448)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4374424736) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4708278544) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13d33de10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13d1c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4344327408)) \ - and (___check_obj_id(G['torch'].abs, 4349810448)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4374424736) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4708278544) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 0b816064..985eebc3 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138274f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1381bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1383c9e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1381bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138274f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1381bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1383c9e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1381bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138237d00>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1381bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314115312)) \ - and (___check_obj_id(G['torch'].abs, 4319598352)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358089488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4408384928) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138235e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1381bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314115312)) \ - and (___check_obj_id(G['torch'].abs, 4319598352)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358089488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4408384928) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 42a0461c..1c05e2a7 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131374f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1312bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1314c9510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1312bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131374f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1312bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1314c9510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1312bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131337d00>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1312bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4347702512)) \ - and (___check_obj_id(G['torch'].abs, 4353185552)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180704) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4420968720) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131335e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1312bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4347702512)) \ - and (___check_obj_id(G['torch'].abs, 4353185552)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180704) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4420968720) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index d7e9b956..21d4d18b 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..21d4d18b --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py index dfcbbb44..da5ddffd 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py index 52a64f77..09af1230 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, -1); arg0_1 = None +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(arg1_1, mul); arg1_1 = mul = None - return (mul_1,) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..541c5b2b --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..09af1230 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 42d33ece..c7d3fcde 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..c7d3fcde --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,9 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py index 010de53e..6deec089 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py index 59029dea..a4ee2c65 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add); arg1_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..6deec089 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..a4ee2c65 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index e6ace39c..bc00f5f5 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py index 2eb6c73d..51445f0b 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index da63eaeb..119085af 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b65f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1324648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132720c10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1324648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b65f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1324648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132720c10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1324648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b7b50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1324648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4342262944)) \ - and (___check_obj_id(G['torch'].abs, 4362884960)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4336593600) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4409433424) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1324b5510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1324648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4342262944)) \ - and (___check_obj_id(G['torch'].abs, 4362884960)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4336593600) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4409433424) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 39931038..9201db6c 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116dba5f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116d64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117020670>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116d64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116dba5f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116d64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117020670>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116d64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116dbbb50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116d64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4381322480)) \ - and (___check_obj_id(G['torch'].abs, 4386805520)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329270432) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396851472) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116db9510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116d64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4381322480)) \ - and (___check_obj_id(G['torch'].abs, 4386805520)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329270432) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396851472) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 3464c1d1..a9ad0793 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118aba5f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x118a64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118d21870>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x118a64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118aba5f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x118a64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118d21870>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x118a64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118abbb50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x118a64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4355878128)) \ - and (___check_obj_id(G['torch'].abs, 4360787888)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353043216) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439842128) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118ab9510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x118a64ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4355878128)) \ - and (___check_obj_id(G['torch'].abs, 4360787888)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353043216) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439842128) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index eac62f56..75ed5eb7 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index df1dd8de..9d97d16d 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index 08dfc559..71225da5 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..71225da5 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 25ad792f..e6db532c 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 88211d70..65d9fe55 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index 2bebad1d..7854b589 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..7854b589 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index a54ab553..0088dbcb 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125274f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1251bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125235e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1251bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4309019888)) \ - and (___check_obj_id(G['torch'].abs, 4314502928)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359597216) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index e6b22a2e..906a7d7a 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e77cf70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11e6c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e739e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11e6c52d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4316278000)) \ - and (___check_obj_id(G['torch'].abs, 4321761040)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363938976) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4430405904) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index e2098bad..7d5c494b 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x165368f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15a8bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x165325e10>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15a8bd2d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4340919536)) \ - and (___check_obj_id(G['torch'].abs, 4346402576)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4414171840) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4878147056) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index d7e9b956..21d4d18b 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..21d4d18b --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 42d33ece..c7d3fcde 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..c7d3fcde --- /dev/null +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,9 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 42bb90c0..7ca19c60 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1244b65f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1244648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1244b5510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1244648b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4337167600)) \ - and (___check_obj_id(G['torch'].abs, 4342650640)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392955040) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4583498000) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 190b7b80..f18bee8e 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c2b7490>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c264ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c2b5510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c264ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4373916912)) \ - and (___check_obj_id(G['torch'].abs, 4389098880)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393970448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4720451216) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 23985f89..9d242092 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11c6b65f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c664ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11c6b5510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c664ca0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4312804592)) \ - and (___check_obj_id(G['torch'].abs, 4318287632)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4405144496) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4475493872) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py index dfcbbb44..da5ddffd 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..541c5b2b --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py index 010de53e..6deec089 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..6deec089 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index e6ace39c..bc00f5f5 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py index 2eb6c73d..51445f0b 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index c53f796b..3f8c75d8 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12984dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1297cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a913f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1297cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12984dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1297cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a913f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1297cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a53be0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1297cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4376161520)) \ - and (___check_obj_id(G['torch'].abs, 4381644560)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4361071376) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4407336432) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12976c040>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1297cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4376161520)) \ - and (___check_obj_id(G['torch'].abs, 4381644560)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4361071376) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4407336432) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index f092cd4c..61a075dd 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12174dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1216cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12228d3f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1216cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12174dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1216cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12228d3f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1216cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122257c70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1216cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4315540720)) \ - and (___check_obj_id(G['torch'].abs, 4321023360)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360875168) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4577206464) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12166c040>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1216cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4315540720)) \ - and (___check_obj_id(G['torch'].abs, 4321023360)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360875168) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4577206464) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index e144d1e5..ff93e73f 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12124dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1210cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12148ed40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1210cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12124dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1210cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12148ed40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1210cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121453c70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1210cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4370181360)) \ - and (___check_obj_id(G['torch'].abs, 4375664400)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4327599264) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4410482960) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12106c040>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1210cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4370181360)) \ - and (___check_obj_id(G['torch'].abs, 4375664400)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4327599264) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4410482960) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py index dfcbbb44..da5ddffd 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..541c5b2b --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py index 010de53e..6deec089 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..6deec089 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index e6ace39c..bc00f5f5 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py index 2eb6c73d..51445f0b 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index cdcf3d88..07a67260 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x110ac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4e0e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x110ac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x110ac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110b4e0e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x110ac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110d536d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x110ac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4311543024)) \ - and (___check_obj_id(G['torch'].abs, 4317026064)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387024032) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4445085968) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110a681f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x110ac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4311543024)) \ - and (___check_obj_id(G['torch'].abs, 4317026064)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387024032) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4445085968) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 97c7e9c1..22d335f1 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12214d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1220cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12238e950>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1220cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12214d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1220cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12238e950>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1220cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1223579a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1220cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4375833840)) \ - and (___check_obj_id(G['torch'].abs, 4381316480)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389956768) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4591886608) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12206c1f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1220cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4375833840)) \ - and (___check_obj_id(G['torch'].abs, 4381316480)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389956768) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4591886608) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index e5f5d2da..dd6ecc97 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1096f5510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10966fa30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1099369e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10966fa30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1096f5510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10966fa30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1099369e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10966fa30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1098ff910>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10966fa30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314983664)) \ - and (___check_obj_id(G['torch'].abs, 4320466704)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332317856) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4336640272) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1096141f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10966fa30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314983664)) \ - and (___check_obj_id(G['torch'].abs, 4320466704)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332317856) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4336640272) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index e978220c..124683c0 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c4dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x122acba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122a6c040>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x122acba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4350635248)) \ - and (___check_obj_id(G['torch'].abs, 4356118288)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357549216) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4597129248) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 37072b9e..c26b8317 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b24dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b1c3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b16c040>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b1c3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4343966960)) \ - and (___check_obj_id(G['torch'].abs, 4349450000)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389137168) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4475493792) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 54589389..25d7ae8e 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10ef4dab0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10edcba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10ed6c040>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10edcba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4316441840)) \ - and (___check_obj_id(G['torch'].abs, 4321924880)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325551264) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395802896) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index da73490e..d032b848 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11924d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1191c3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11916c1f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1191c3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4380880112)) \ - and (___check_obj_id(G['torch'].abs, 4386363152)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4367707296) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4434600128) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 71c4fca5..285858ee 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10fb4d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10fac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10fa6c1f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10fac3a30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4349586672)) \ - and (___check_obj_id(G['torch'].abs, 4353447856)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4351290528) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4401045776) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index d97408b2..343da990 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10d94d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10d8cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10d8681f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10d8cba30>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4316065008)) \ - and (___check_obj_id(G['torch'].abs, 4323039232)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384992416) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4389314832) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py deleted file mode 100644 index df1dd8de..00000000 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] - \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index e4230abb..2d7f4057 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index df1dd8de..9d97d16d 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index b7e83393..defa2f56 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py index 2797b423..f84427fe 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,13 +93,13 @@ def call(args): args.clear() assert_size_stride(primals_1, (10, ), (1, )) assert_size_stride(primals_2, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(primals_1, primals_2, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, buf0, ) + return (buf0, buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..defa2f56 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py index 95acd5c0..9595a545 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py @@ -2,11 +2,11 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul); mul = None - return [mul_1, primals_1, primals_2] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 1.py new file mode 100644 index 00000000..496fa11b --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 1.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py index 9ce4dce7..496fa11b 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py @@ -2,17 +2,17 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]", tangents_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return [mul_4, mul_3] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py index dfcbbb44..da5ddffd 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py index 95acd5c0..9595a545 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul); mul = None - return [mul_1, primals_1, primals_2] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py index 55d7f736..343df7ad 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py @@ -3,18 +3,18 @@ def forward(self, primals, tangents): - primals_1: "f32[8]"; primals_2: "f32[8]"; tangents_1: "f32[8]"; + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul) - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return pytree.tree_unflatten([mul_1, mul_4, mul_3], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py index 3452a06f..615dbd6f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['3_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,27 +20,40 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, - float* out_ptr0) + float* out_ptr0, + const int64_t ks0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); auto tmp2 = static_cast(-1.0); auto tmp3 = at::vec::Vectorized(tmp2); auto tmp4 = tmp1 * tmp3; auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -53,21 +64,23 @@ del async_compile def call(args): - primals_1, primals_2 = args + primals_1, primals_2, primals_3 = args args.clear() - assert_size_stride(primals_1, (8, ), (1, )) - assert_size_stride(primals_2, (8, ), (1, )) - buf0 = empty_strided_cpu((8, ), (1, ), torch.float32) - cpp_fused_mul_0(primals_2, primals_1, buf0) - return (buf0, primals_1, primals_2, ) + s0 = primals_1 + assert_size_stride(primals_2, (s0, ), (1, )) + assert_size_stride(primals_3, (s0, ), (1, )) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_mul_0(primals_3, primals_2, buf0, s0) + return (buf0, primals_2, primals_3, s0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance - primals_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + primals_1 = 8 primals_2 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - fn = lambda: call([primals_1, primals_2]) + primals_3 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py new file mode 100644 index 00000000..b0beb052 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py @@ -0,0 +1,105 @@ +# AOT ID: ['3_backward'] +from ctypes import c_void_p, c_long, c_int +import torch +import math +import random +import os +import tempfile +from math import inf, nan +from torch._inductor.hooks import run_intermediate_hooks +from torch._inductor.utils import maybe_profile +from torch._inductor.codegen.memory_planning import _align as align +from torch import device, empty_strided +from torch._inductor.async_compile import AsyncCompile +from torch._inductor.select_algorithm import extern_kernels +from torch._inductor.codegen.multi_kernel import MultiKernelCall + +aten = torch.ops.aten +inductor_ops = torch.ops.inductor +_quantized = torch.ops._quantized +assert_size_stride = torch._C._dynamo.guards.assert_size_stride +empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu +empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor +alloc_from_pool = torch.ops.inductor._alloc_from_pool +async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p + + +cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, + const float* in_ptr1, + const float* in_ptr2, + float* out_ptr0, + float* out_ptr1, + const int64_t ks0) +{ + { + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0)); + tmp8.store(out_ptr1 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp8.store(out_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } +} +''') + + +async_compile.wait(globals()) +del async_compile + +def call(args): + primals_1, primals_2, primals_3, tangents_1 = args + args.clear() + s0 = primals_1 + assert_size_stride(primals_2, (s0, ), (1, )) + assert_size_stride(primals_3, (s0, ), (1, )) + assert_size_stride(tangents_1, (s0, ), (1, )) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + buf1 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_mul_0(tangents_1, primals_2, primals_3, buf0, buf1, s0) + del primals_2 + del primals_3 + del tangents_1 + return (None, buf1, buf0, ) + + +def benchmark_compiled_module(times=10, repeat=10): + from torch._dynamo.testing import rand_strided + from torch._inductor.utils import print_performance + primals_1 = 8 + primals_2 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + primals_3 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + tangents_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + fn = lambda: call([primals_1, primals_2, primals_3, tangents_1]) + return print_performance(fn, times=times, repeat=repeat) + + +if __name__ == "__main__": + from torch._inductor.wrapper_benchmark import compiled_module_main + compiled_module_main('None', benchmark_compiled_module) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..541c5b2b --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..343df7ad --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,20 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py deleted file mode 100644 index 88211d70..00000000 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] - \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 25ad792f..e6db532c 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 88211d70..65d9fe55 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index 2bebad1d..7854b589 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py index 43d00e34..a2081e47 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..7854b589 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py index 914e86ea..ced18d7f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add); add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_2, div, primals_1] + return (div, lt, primals_2, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py index 683fe99c..732a1dfd 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", div: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div, add); div = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [None, add_1, None, None] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py index 683fe99c..732a1dfd 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", div: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div, add); div = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [None, add_1, None, None] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py index 010de53e..6deec089 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py index 914e86ea..ced18d7f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add); add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_2, div, primals_1] + return (div, lt, primals_2, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py index ba9b8d7a..ff9cdf79 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py @@ -6,25 +6,25 @@ def forward(self, primals, tangents): primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, None, add_1, None, None], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py index d4ba001a..643b9054 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['2_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,67 +20,68 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*', 'const long', 'const long'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2, - const long ks0, - const long ks1) + bool* out_ptr1, + float* out_ptr2, + const int64_t ks0, + const int64_t ks1) { - { - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks0, 8L))); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks0, 8L))); x0(ks0); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks1, 8L))); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks1, 8L))); x0(ks1); x0+=static_cast(1L)) + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } } } ''') @@ -98,13 +97,13 @@ def call(args): s1 = primals_3 assert_size_stride(primals_2, (s0, ), (1, )) assert_size_stride(primals_4, (s1, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_4, buf0, buf1, buf2, s0, s1) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_4, primals_2, buf1, buf2, buf0, s1, s0) del buf1 del primals_4 - return (buf0, buf2, primals_2, buf0, s0, ) + return (buf0, buf2, primals_2, s0, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py index 9492d6b9..4df30593 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py @@ -1,6 +1,5 @@ - # AOT ID: ['2_backward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,31 +20,32 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_mul_neg_sgn_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'const long'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_mul_neg_sgn_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, - const float* in_ptr2, float* out_ptr0, - const long ks0) + const int64_t ks0) { { - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks0, 8L))); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); - auto tmp8 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp1.abs(); auto tmp3 = static_cast(1.0); auto tmp4 = at::vec::Vectorized(tmp3); auto tmp5 = tmp2 + tmp4; auto tmp6 = tmp0 / tmp5; auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; auto tmp9 = tmp8 / tmp5; auto tmp10 = tmp7 * tmp9; auto tmp11 = @@ -60,33 +59,33 @@ ; auto tmp12 = tmp10 * tmp11; auto tmp13 = tmp6 + tmp12; - tmp13.store(out_ptr0 + static_cast(x0)); + tmp13.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks0, 8L))); x0(ks0); x0+=static_cast(1L)) + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp7 = in_ptr2[static_cast(x0)]; - auto tmp2 = std::abs(tmp1); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = tmp1.abs(); auto tmp3 = static_cast(1.0); - auto tmp4 = decltype(tmp2)(tmp2 + tmp3); - auto tmp5 = tmp0 / tmp4; - auto tmp6 = decltype(tmp0)(-tmp0); - auto tmp8 = tmp7 / tmp4; - auto tmp9 = decltype(tmp6)(tmp6 * tmp8); - auto tmp10 = + auto tmp4 = at::vec::Vectorized(tmp3); + auto tmp5 = tmp2 + tmp4; + auto tmp6 = tmp0 / tmp5; + auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; + auto tmp9 = tmp8 / tmp5; + auto tmp10 = tmp7 * tmp9; + auto tmp11 = [&]() { - auto left = tmp1 > 0 ? decltype(tmp1)(1) : decltype(tmp1)(0); - auto right = tmp1 < 0 ? decltype(tmp1)(1) : decltype(tmp1)(0); + auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); + auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); return left - right; } () ; - auto tmp11 = decltype(tmp9)(tmp9 * tmp10); - auto tmp12 = decltype(tmp5)(tmp5 + tmp11); - out_ptr0[static_cast(x0)] = tmp12; + auto tmp12 = tmp10 * tmp11; + auto tmp13 = tmp6 + tmp12; + tmp13.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -97,15 +96,13 @@ del async_compile def call(args): - primals_1, primals_2, div, tangents_1 = args + primals_1, primals_2, tangents_1 = args args.clear() s0 = primals_1 assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(div, (s0, ), (1, )) assert_size_stride(tangents_1, (s0, ), (1, )) buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, div, buf0, s0) - del div + cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, buf0, s0) del primals_2 del tangents_1 return (None, buf0, None, None, ) @@ -116,9 +113,8 @@ def benchmark_compiled_module(times=10, repeat=10): from torch._inductor.utils import print_performance primals_1 = 8 primals_2 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - div = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) tangents_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - fn = lambda: call([primals_1, primals_2, div, tangents_1]) + fn = lambda: call([primals_1, primals_2, tangents_1]) return print_performance(fn, times=times, repeat=repeat) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..6deec089 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..ff9cdf79 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index e6ace39c..bc00f5f5 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py index 2eb6c73d..51445f0b 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index f608f180..ab398d48 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559c3e20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15545a560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x155dcd5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15545a560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559c3e20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15545a560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x155dcd5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15545a560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559b3eb0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15545a560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4369575152)) \ - and (___check_obj_id(G['torch'].abs, 4375058192)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317375648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4385317136) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1559b3a30>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x15545a560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4369575152)) \ - and (___check_obj_id(G['torch'].abs, 4375058192)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317375648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4385317136) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 2c686bd1..d8ac57e6 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12be07e20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b566560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d2eedd0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b566560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12be07e20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b566560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d2eedd0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b566560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bfe4310>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b566560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4375948528)) \ - and (___check_obj_id(G['torch'].abs, 4381431568)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4339723104) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406287856) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bdc7a30>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12b566560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4375948528)) \ - and (___check_obj_id(G['torch'].abs, 4381431568)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4339723104) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406287856) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index c201c88f..52e84fa4 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157103e20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a666560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1576e6710>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a666560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157103e20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a666560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1576e6710>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a666560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1576396c0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a666560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4381469936)) \ - and (___check_obj_id(G['torch'].abs, 4386952976)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4404735136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4587692304) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1570c3a30>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a666560>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4381469936)) \ - and (___check_obj_id(G['torch'].abs, 4386952976)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4404735136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4587692304) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index d7e9b956..21d4d18b 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py index 2e399bae..466582ab 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,10 +93,10 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(arg0_1, arg1_1, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) del arg0_1 del arg1_1 return (buf0, buf2, ) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..21d4d18b --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py index 52a64f77..09af1230 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py @@ -2,11 +2,11 @@ -def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, -1); arg0_1 = None +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(arg1_1, mul); arg1_1 = mul = None - return (mul_1,) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py index dfcbbb44..da5ddffd 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py index 52a64f77..09af1230 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, -1); arg0_1 = None +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(arg1_1, mul); arg1_1 = mul = None - return (mul_1,) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py index 4499f78e..4447a169 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['3_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,27 +20,40 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, - float* out_ptr0) + float* out_ptr0, + const int64_t ks0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); auto tmp2 = static_cast(-1.0); auto tmp3 = at::vec::Vectorized(tmp2); auto tmp4 = tmp1 * tmp3; auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -53,23 +64,25 @@ del async_compile def call(args): - arg0_1, arg1_1 = args + arg0_1, arg1_1, arg2_1 = args args.clear() - assert_size_stride(arg0_1, (8, ), (1, )) - assert_size_stride(arg1_1, (8, ), (1, )) - buf0 = empty_strided_cpu((8, ), (1, ), torch.float32) - cpp_fused_mul_0(arg1_1, arg0_1, buf0) - del arg0_1 + s0 = arg0_1 + assert_size_stride(arg1_1, (s0, ), (1, )) + assert_size_stride(arg2_1, (s0, ), (1, )) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_mul_0(arg2_1, arg1_1, buf0, s0) del arg1_1 + del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance - arg0_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + arg0_1 = 8 arg1_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - fn = lambda: call([arg0_1, arg1_1]) + arg2_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..541c5b2b --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..09af1230 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 42d33ece..c7d3fcde 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py index ff6102a2..a17b3a1d 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..c7d3fcde --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,9 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py index 59029dea..a4ee2c65 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add); arg1_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py index 010de53e..6deec089 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py index 59029dea..a4ee2c65 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add); arg1_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py index 00f8fb4e..326bb816 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['2_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,67 +20,68 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*', 'const long', 'const long'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2, - const long ks0, - const long ks1) + bool* out_ptr1, + float* out_ptr2, + const int64_t ks0, + const int64_t ks1) { - { - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks0, 8L))); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks0, 8L))); x0(ks0); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks1, 8L))); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks1, 8L))); x0(ks1); x0+=static_cast(1L)) + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } } } ''') @@ -98,10 +97,10 @@ def call(args): s1 = arg2_1 assert_size_stride(arg1_1, (s0, ), (1, )) assert_size_stride(arg3_1, (s1, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg3_1, buf0, buf1, buf2, s0, s1) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg3_1, arg1_1, buf1, buf2, buf0, s1, s0) del arg1_1 del arg3_1 return (buf0, buf2, ) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..6deec089 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..a4ee2c65 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index e6ace39c..bc00f5f5 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py index 2eb6c73d..51445f0b 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 644ad776..804c11ca 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c828af0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bcecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cf5c160>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bcecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c828af0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bcecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cf5c160>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bcecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cbfc790>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bcecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4304448752)) \ - and (___check_obj_id(G['torch'].abs, 4309931792)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364692240) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4417822192) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12c7eb520>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bcecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4304448752)) \ - and (___check_obj_id(G['torch'].abs, 4309931792)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364692240) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4417822192) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index c49ea283..10722cb9 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d43caf0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c7f4dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12da50160>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c7f4dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d43caf0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c7f4dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12da50160>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c7f4dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d7f9870>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c7f4dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4373589232)) \ - and (___check_obj_id(G['torch'].abs, 4379072272)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393757856) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398080272) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d3ef520>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c7f4dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4373589232)) \ - and (___check_obj_id(G['torch'].abs, 4379072272)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393757856) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398080272) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 4121b4ec..ea76f58a 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12423caf0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1237f8dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1245fba30>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1237f8dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12423caf0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1237f8dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1245fba30>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1237f8dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1245f8790>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1237f8dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4344737008)) \ - and (___check_obj_id(G['torch'].abs, 4350220048)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326845600) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4374831136) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1241eb520>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1237f8dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4344737008)) \ - and (___check_obj_id(G['torch'].abs, 4350220048)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326845600) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4374831136) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py index df1dd8de..9d97d16d 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index e4230abb..2d7f4057 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index df1dd8de..9d97d16d 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index b7e83393..defa2f56 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py index 2797b423..f84427fe 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,13 +93,13 @@ def call(args): args.clear() assert_size_stride(primals_1, (10, ), (1, )) assert_size_stride(primals_2, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(primals_1, primals_2, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, buf0, ) + return (buf0, buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..defa2f56 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py index 88211d70..65d9fe55 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 25ad792f..e6db532c 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 88211d70..65d9fe55 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index 2bebad1d..7854b589 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py index 43d00e34..a2081e47 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..7854b589 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index b7e6eb30..b0a52a22 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11c382200>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11b966ef0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118cfcb80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11b966ef0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4369427696)) \ - and (___check_obj_id(G['torch'].abs, 4374910736)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328647840) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388462864) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index eb1455e6..084697a5 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f24ae60>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11e17ae60>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1192fcb80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11e17ae60>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4301647088)) \ - and (___check_obj_id(G['torch'].abs, 4307129728)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332842144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393705744) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 9fd48354..7e554919 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150b76ef0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x150366e60>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124decb80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x150366e60>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4349897968)) \ - and (___check_obj_id(G['torch'].abs, 4355381008)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350930080) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4422017296) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 47d8bb7e..2b323639 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index d7e9b956..21d4d18b 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py index 2e399bae..466582ab 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,10 +93,10 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(arg0_1, arg1_1, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) del arg0_1 del arg1_1 return (buf0, buf2, ) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..2b323639 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..21d4d18b --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index 87b754c9..47764cc5 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 42d33ece..c7d3fcde 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py index ff6102a2..a17b3a1d 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..47764cc5 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..c7d3fcde --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,9 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index f65a9cf8..4c38db0a 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 368b6ef2..2ace3f25 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13ba40af0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13aeecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13b8ef520>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13aeecdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314901744)) \ - and (___check_obj_id(G['torch'].abs, 4320384784)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354878624) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4370637072) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 58171981..05bef819 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124524af0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x123becdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1244eb520>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x123becdc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4312214768)) \ - and (___check_obj_id(G['torch'].abs, 4317697808)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350307488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4426211600) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 446ba64d..17dbf688 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d024af0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c700dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): - a = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cfeb520>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12c700dc0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4301057264)) \ - and (___check_obj_id(G['torch'].abs, 4306540304)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364889248) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4707229728) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index 6d8ff3a7..4f1b2ac2 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index def6bdd8..94842116 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index 8cd5b1c9..84ed7d99 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..84ed7d99 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py index 6b6db425..731e3483 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py @@ -2,17 +2,17 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]", tangents_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return [mul_4, mul_3] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py index 4adebe50..97dd87fe 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py index 2d78d92d..b19f551f 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul); mul = None - return [mul_1, primals_1, primals_2] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py index 8a46a8b7..aec22424 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py @@ -3,18 +3,18 @@ def forward(self, primals, tangents): - primals_1: "f32[8]"; primals_2: "f32[8]"; tangents_1: "f32[8]"; + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul) - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return pytree.tree_unflatten([mul_1, mul_4, mul_3], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..78cad323 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..aec22424 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,20 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 548a6e02..caa124fb 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 644e87da..f0055de7 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index efc8cefc..055b4058 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..055b4058 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py index 541d5db0..23637a4a 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", div: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div, add); div = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [None, add_1, None, None] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py index a712b834..67c99a8b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py index 8f022c04..ce83484b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add); add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_2, div, primals_1] + return (div, lt, primals_2, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py index 5143b8d0..5136219d 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py @@ -6,25 +6,25 @@ def forward(self, primals, tangents): primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, None, add_1, None, None], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..67c99a8b --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..5136219d --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py index 50965718..a1e40aaf 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 84d04001..3deb3869 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py index 78da8394..bc221cfa 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168692830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1685eeb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1689f8ca0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1685eeb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168692830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1685eeb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1689f8ca0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1685eeb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1686927a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1685eeb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4308233456)) \ - and (___check_obj_id(G['torch'].abs, 4313716496)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180384) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404190864) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16865f6d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1685eeb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4308233456)) \ - and (___check_obj_id(G['torch'].abs, 4313716496)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371180384) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404190864) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py index c229f501..66ef0877 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db96830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13daf2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f3f8ca0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13daf2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db96830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13daf2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f3f8ca0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13daf2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db96d40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13daf2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4315557104)) \ - and (___check_obj_id(G['torch'].abs, 4321040144)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4405177024) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4706180752) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13db636d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x13daf2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4315557104)) \ - and (___check_obj_id(G['torch'].abs, 4321040144)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4405177024) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4706180752) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py index 1f44d118..ee003628 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e96830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x125df2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1260f8160>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x125df2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e96830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x125df2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1260f8160>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x125df2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e96d40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x125df2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314508528)) \ - and (___check_obj_id(G['torch'].abs, 4319991168)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318194848) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4368539920) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125e636d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x125df2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4314508528)) \ - and (___check_obj_id(G['torch'].abs, 4319991168)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318194848) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4368539920) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index 3fcc2a27..b9545e37 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py similarity index 64% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py index 3fcc2a27..b9545e37 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py index 4adebe50..97dd87fe 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py index 99254237..2f386a3c 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, -1); arg0_1 = None +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(arg1_1, mul); arg1_1 = mul = None - return (mul_1,) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..78cad323 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..2f386a3c --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 4098bbbc..b036c79d 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py similarity index 57% rename from tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py index 4098bbbc..b036c79d 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py index a712b834..67c99a8b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py index 1bfbdad3..722d8681 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add); arg1_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..67c99a8b --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..722d8681 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py index 50965718..a1e40aaf 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 84d04001..3deb3869 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py index d7166e8b..911fb113 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116128f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11606d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1163335b0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11606d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x116128f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11606d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1163335b0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11606d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1163309d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11606d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4308315376)) \ - and (___check_obj_id(G['torch'].abs, 4313798416)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317784848) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373782160) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1160eed40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11606d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4308315376)) \ - and (___check_obj_id(G['torch'].abs, 4313798416)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317784848) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373782160) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 9a0f4250..6813ba93 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f728f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10f66d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f9375b0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10f66d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f728f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10f66d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f9375b0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10f66d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f9349d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10f66d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4318981280)) \ - and (___check_obj_id(G['torch'].abs, 4332476416)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323535632) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358053520) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f6eed40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10f66d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4318981280)) \ - and (___check_obj_id(G['torch'].abs, 4332476416)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323535632) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358053520) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 18214ef6..31c80f47 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122328f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12226d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226335b0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12226d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122328f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12226d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226335b0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12226d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226309d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12226d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4349930736)) \ - and (___check_obj_id(G['torch'].abs, 4355413776)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4385008800) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1222eed40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12226d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4349930736)) \ - and (___check_obj_id(G['torch'].abs, 4355413776)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4385008800) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4418871568) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index 6d8ff3a7..4f1b2ac2 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index def6bdd8..94842116 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index 8cd5b1c9..84ed7d99 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..84ed7d99 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 548a6e02..caa124fb 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 644e87da..f0055de7 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index efc8cefc..055b4058 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..055b4058 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py index 90c00ac9..ff0979d9 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128096830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f7f2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1280636d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f7f2b90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4316802288)) \ - and (___check_obj_id(G['torch'].abs, 4322285328)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350356640) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4427260176) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 3ecf1aed..78ff5a9e 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11dd9e830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11dcfab90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11dd6b6d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11dcfab90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4311231728)) \ - and (___check_obj_id(G['torch'].abs, 4316714768)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329401504) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406288576) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py index d7eb06fe..428c3c25 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1198d2830>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11982eb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11989f6d0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11982eb90>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4308954352)) \ - and (___check_obj_id(G['torch'].abs, 4314437392)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317146272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4321468688) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index 3fcc2a27..b9545e37 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py similarity index 64% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py index 3fcc2a27..b9545e37 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 4098bbbc..b036c79d 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py similarity index 57% rename from tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py index 4098bbbc..b036c79d 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py index 94dfcab6..6bdbcece 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a328f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a26d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a2ead40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x14a26d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4350651632)) \ - and (___check_obj_id(G['torch'].abs, 4356134672)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321143968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4408385808) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py index e2e1c4b3..3be67a04 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x123d28f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x123c6d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x123cead40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x123c6d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4373310704)) \ - and (___check_obj_id(G['torch'].abs, 4378793344)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386073760) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4411531536) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py index a5842997..c1b34b9f 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118528f70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11846d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1184ead40>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11846d1b0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4384009456)) \ - and (___check_obj_id(G['torch'].abs, 4389492496)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324682512) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394753680) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py index 4adebe50..97dd87fe 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..78cad323 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py index a712b834..67c99a8b 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..67c99a8b --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py index 50965718..a1e40aaf 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 84d04001..3deb3869 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py index 14dd42b7..68a8923e 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ca8b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c99dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11cf9d5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c99dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ca8b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c99dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11cf9d5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c99dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11cf9ec20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c99dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4335152368)) \ - and (___check_obj_id(G['torch'].abs, 4340635408)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321963168) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ca897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11c99dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4335152368)) \ - and (___check_obj_id(G['torch'].abs, 4340635408)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321963168) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py index 3ea6c464..210eb0bb 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12018b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120099bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12039d5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120099bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12018b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120099bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12039d5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120099bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12039ec20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120099bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4336856304)) \ - and (___check_obj_id(G['torch'].abs, 4342339344)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355796128) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4423065872) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1201897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120099bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4336856304)) \ - and (___check_obj_id(G['torch'].abs, 4342339344)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355796128) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4423065872) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py index 1dfb1d1c..04edf9c3 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115683370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x115599bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11599d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x115599bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115683370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x115599bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11599d510>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x115599bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11599ec20>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x115599bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4342738160)) \ - and (___check_obj_id(G['torch'].abs, 4351350784)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330286240) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1156817e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x115599bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4342738160)) \ - and (___check_obj_id(G['torch'].abs, 4351350784)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330286240) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4371685648) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py index 4adebe50..97dd87fe 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..78cad323 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py index a712b834..67c99a8b 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..67c99a8b --- /dev/null +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py index 50965718..a1e40aaf 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 84d04001..3deb3869 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 4acf9ae0..2b58c232 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x119f86dd0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x119e9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a2a37f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x119e9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x119f86dd0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x119e9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a2a37f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x119e9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a058820>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x119e9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4371606768)) \ - and (___check_obj_id(G['torch'].abs, 4377089808)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354895008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439843008) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x119f857e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x119e9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4371606768)) \ - and (___check_obj_id(G['torch'].abs, 4377089808)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354895008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439843008) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 661d0618..81be5374 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12038add0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120299bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12059f7f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120299bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12038add0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120299bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12059f7f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120299bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120454820>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120299bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4303351024)) \ - and (___check_obj_id(G['torch'].abs, 4308834064)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360973472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428308672) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1203897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x120299bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4303351024)) \ - and (___check_obj_id(G['torch'].abs, 4308834064)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4360973472) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428308672) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 452c87ee..08ddc00e 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11708add0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116e99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11729feb0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116e99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11708add0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116e99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11729feb0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116e99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117158820>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116e99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4379520240)) \ - and (___check_obj_id(G['torch'].abs, 4385003280)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321356480) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396850832) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1170897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x116e99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4379520240)) \ - and (___check_obj_id(G['torch'].abs, 4385003280)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321356480) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396850832) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py index fd1ac7ff..0f6e7f59 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10cb8b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10ca9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10cb897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10ca9dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4373605616)) \ - and (___check_obj_id(G['torch'].abs, 4379088656)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328762528) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4387414288) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 45c9f0bb..098cbab8 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10bc8b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10bb99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10bc897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10bb99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4348292336)) \ - and (___check_obj_id(G['torch'].abs, 4353775376)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358646944) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4362969360) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 23c687cb..4c0974e7 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10de8b370>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10dd99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10de897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10dd99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4340034800)) \ - and (___check_obj_id(G['torch'].abs, 4345517840)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328500384) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388462864) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py index 0a3543d1..a15de2b6 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11868add0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11859dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1186897e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11859dbd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4309921008)) \ - and (___check_obj_id(G['torch'].abs, 4315404048)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390382752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4440891664) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py index eb8c24d1..e288a62c 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bd86dd0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bc99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12bd857e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bc99bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4308348144)) \ - and (___check_obj_id(G['torch'].abs, 4313830784)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364758176) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4774338832) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py index 6ce4d338..725171a1 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10a586dd0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10a499bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10a5857e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x10a499bd0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4334857456)) \ - and (___check_obj_id(G['torch'].abs, 4340340496)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4319145120) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323467536) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py deleted file mode 100644 index def6bdd8..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index a5a384eb..a4ea69d4 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index def6bdd8..94842116 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index 770d8a8d..32406778 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py index 2797b423..f84427fe 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,13 +93,13 @@ def call(args): args.clear() assert_size_stride(primals_1, (10, ), (1, )) assert_size_stride(primals_2, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(primals_1, primals_2, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, buf0, ) + return (buf0, buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..32406778 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py deleted file mode 100644 index 2d78d92d..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 AFTER POST GRAD 0.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul); mul = None - return [mul_1, primals_1, primals_2] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py index 6b6db425..731e3483 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Backward graph 0.py @@ -2,17 +2,17 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]", tangents_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return [mul_4, mul_3] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py index 4adebe50..97dd87fe 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py index 2d78d92d..b19f551f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, primals_1: "f32[8]", primals_2: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul); mul = None - return [mul_1, primals_1, primals_2] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py index 8a46a8b7..aec22424 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 Joint graph 0.py @@ -3,18 +3,18 @@ def forward(self, primals, tangents): - primals_1: "f32[8]"; primals_2: "f32[8]"; tangents_1: "f32[8]"; + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(primals_1, -1); primals_1 = None + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(primals_2, mul) - mul_2: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); primals_2 = None - mul_3: "f32[8]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_4: "f32[8]" = torch.ops.aten.mul.Tensor(mul_2, -1); mul_2 = None - return pytree.tree_unflatten([mul_1, mul_4, mul_3], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py index 3452a06f..615dbd6f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['3_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,27 +20,40 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, - float* out_ptr0) + float* out_ptr0, + const int64_t ks0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); auto tmp2 = static_cast(-1.0); auto tmp3 = at::vec::Vectorized(tmp2); auto tmp4 = tmp1 * tmp3; auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -53,21 +64,23 @@ del async_compile def call(args): - primals_1, primals_2 = args + primals_1, primals_2, primals_3 = args args.clear() - assert_size_stride(primals_1, (8, ), (1, )) - assert_size_stride(primals_2, (8, ), (1, )) - buf0 = empty_strided_cpu((8, ), (1, ), torch.float32) - cpp_fused_mul_0(primals_2, primals_1, buf0) - return (buf0, primals_1, primals_2, ) + s0 = primals_1 + assert_size_stride(primals_2, (s0, ), (1, )) + assert_size_stride(primals_3, (s0, ), (1, )) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_mul_0(primals_3, primals_2, buf0, s0) + return (buf0, primals_2, primals_3, s0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance - primals_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + primals_1 = 8 primals_2 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - fn = lambda: call([primals_1, primals_2]) + primals_3 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py new file mode 100644 index 00000000..b0beb052 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 kernel 1.py @@ -0,0 +1,105 @@ +# AOT ID: ['3_backward'] +from ctypes import c_void_p, c_long, c_int +import torch +import math +import random +import os +import tempfile +from math import inf, nan +from torch._inductor.hooks import run_intermediate_hooks +from torch._inductor.utils import maybe_profile +from torch._inductor.codegen.memory_planning import _align as align +from torch import device, empty_strided +from torch._inductor.async_compile import AsyncCompile +from torch._inductor.select_algorithm import extern_kernels +from torch._inductor.codegen.multi_kernel import MultiKernelCall + +aten = torch.ops.aten +inductor_ops = torch.ops.inductor +_quantized = torch.ops._quantized +assert_size_stride = torch._C._dynamo.guards.assert_size_stride +empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu +empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor +alloc_from_pool = torch.ops.inductor._alloc_from_pool +async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p + + +cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, + const float* in_ptr1, + const float* in_ptr2, + float* out_ptr0, + float* out_ptr1, + const int64_t ks0) +{ + { + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0)); + tmp8.store(out_ptr1 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp8.store(out_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } +} +''') + + +async_compile.wait(globals()) +del async_compile + +def call(args): + primals_1, primals_2, primals_3, tangents_1 = args + args.clear() + s0 = primals_1 + assert_size_stride(primals_2, (s0, ), (1, )) + assert_size_stride(primals_3, (s0, ), (1, )) + assert_size_stride(tangents_1, (s0, ), (1, )) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + buf1 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_mul_0(tangents_1, primals_2, primals_3, buf0, buf1, s0) + del primals_2 + del primals_3 + del tangents_1 + return (None, buf1, buf0, ) + + +def benchmark_compiled_module(times=10, repeat=10): + from torch._dynamo.testing import rand_strided + from torch._inductor.utils import print_performance + primals_1 = 8 + primals_2 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + primals_3 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + tangents_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + fn = lambda: call([primals_1, primals_2, primals_3, tangents_1]) + return print_performance(fn, times=times, repeat=repeat) + + +if __name__ == "__main__": + from torch._inductor.wrapper_benchmark import compiled_module_main + compiled_module_main('None', benchmark_compiled_module) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..78cad323 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..aec22424 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,20 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py deleted file mode 100644 index 644e87da..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 548a6e02..caa124fb 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 644e87da..f0055de7 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index efc8cefc..055b4058 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py index 43d00e34..a2081e47 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..055b4058 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py deleted file mode 100644 index 8f022c04..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_2, div, primals_1] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py deleted file mode 100644 index 54105145..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 AFTER POST GRAD 1.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", div: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div, add); div = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [None, add_1, None, None] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py index 54105145..489e3e02 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", div: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div, add); div = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [None, add_1, None, None] + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py index a712b834..67c99a8b 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py index 8f022c04..ce83484b 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add); add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_2, div, primals_1] + return (div, lt, primals_2, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py index 698d1ed0..ef6c1324 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 Joint graph 0.py @@ -6,25 +6,25 @@ def forward(self, primals, tangents): primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_1: "f32[s0]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, None, add_1, None, None], self._out_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py index d4ba001a..643b9054 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['2_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,67 +20,68 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*', 'const long', 'const long'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2, - const long ks0, - const long ks1) + bool* out_ptr1, + float* out_ptr2, + const int64_t ks0, + const int64_t ks1) { - { - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks0, 8L))); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks0, 8L))); x0(ks0); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks1, 8L))); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks1, 8L))); x0(ks1); x0+=static_cast(1L)) + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } } } ''') @@ -98,13 +97,13 @@ def call(args): s1 = primals_3 assert_size_stride(primals_2, (s0, ), (1, )) assert_size_stride(primals_4, (s1, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_4, buf0, buf1, buf2, s0, s1) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_4, primals_2, buf1, buf2, buf0, s1, s0) del buf1 del primals_4 - return (buf0, buf2, primals_2, buf0, s0, ) + return (buf0, buf2, primals_2, s0, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py index 9492d6b9..4df30593 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 kernel 1.py @@ -1,6 +1,5 @@ - # AOT ID: ['2_backward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,31 +20,32 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_mul_neg_sgn_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'const long'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_mul_neg_sgn_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, - const float* in_ptr2, float* out_ptr0, - const long ks0) + const int64_t ks0) { { - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks0, 8L))); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); - auto tmp8 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp1.abs(); auto tmp3 = static_cast(1.0); auto tmp4 = at::vec::Vectorized(tmp3); auto tmp5 = tmp2 + tmp4; auto tmp6 = tmp0 / tmp5; auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; auto tmp9 = tmp8 / tmp5; auto tmp10 = tmp7 * tmp9; auto tmp11 = @@ -60,33 +59,33 @@ ; auto tmp12 = tmp10 * tmp11; auto tmp13 = tmp6 + tmp12; - tmp13.store(out_ptr0 + static_cast(x0)); + tmp13.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks0, 8L))); x0(ks0); x0+=static_cast(1L)) + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp7 = in_ptr2[static_cast(x0)]; - auto tmp2 = std::abs(tmp1); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = tmp1.abs(); auto tmp3 = static_cast(1.0); - auto tmp4 = decltype(tmp2)(tmp2 + tmp3); - auto tmp5 = tmp0 / tmp4; - auto tmp6 = decltype(tmp0)(-tmp0); - auto tmp8 = tmp7 / tmp4; - auto tmp9 = decltype(tmp6)(tmp6 * tmp8); - auto tmp10 = + auto tmp4 = at::vec::Vectorized(tmp3); + auto tmp5 = tmp2 + tmp4; + auto tmp6 = tmp0 / tmp5; + auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; + auto tmp9 = tmp8 / tmp5; + auto tmp10 = tmp7 * tmp9; + auto tmp11 = [&]() { - auto left = tmp1 > 0 ? decltype(tmp1)(1) : decltype(tmp1)(0); - auto right = tmp1 < 0 ? decltype(tmp1)(1) : decltype(tmp1)(0); + auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); + auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); return left - right; } () ; - auto tmp11 = decltype(tmp9)(tmp9 * tmp10); - auto tmp12 = decltype(tmp5)(tmp5 + tmp11); - out_ptr0[static_cast(x0)] = tmp12; + auto tmp12 = tmp10 * tmp11; + auto tmp13 = tmp6 + tmp12; + tmp13.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -97,15 +96,13 @@ del async_compile def call(args): - primals_1, primals_2, div, tangents_1 = args + primals_1, primals_2, tangents_1 = args args.clear() s0 = primals_1 assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(div, (s0, ), (1, )) assert_size_stride(tangents_1, (s0, ), (1, )) buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, div, buf0, s0) - del div + cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, buf0, s0) del primals_2 del tangents_1 return (None, buf0, None, None, ) @@ -116,9 +113,8 @@ def benchmark_compiled_module(times=10, repeat=10): from torch._inductor.utils import print_performance primals_1 = 8 primals_2 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - div = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) tangents_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - fn = lambda: call([primals_1, primals_2, div, tangents_1]) + fn = lambda: call([primals_1, primals_2, tangents_1]) return print_performance(fn, times=times, repeat=repeat) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..67c99a8b --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..ef6c1324 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; + + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None + mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py index 50965718..a1e40aaf 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 84d04001..3deb3869 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py index 9350f162..b6aecbd9 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12ed55630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12e7d0b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dd00b80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12e7d0b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12ed55630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12e7d0b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dd00b80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12e7d0b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13daee200>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12e7d0b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4343196912)) \ - and (___check_obj_id(G['torch'].abs, 4348679952)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392938656) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4427260176) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12ecdd360>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12e7d0b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4343196912)) \ - and (___check_obj_id(G['torch'].abs, 4348679952)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392938656) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4427260176) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py index 227a54a3..16867c2f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127f6d630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1278ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1303f8b80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1278ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127f6d630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1278ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1303f8b80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1278ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1302eee60>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1278ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4303760624)) \ - and (___check_obj_id(G['torch'].abs, 4309243664)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4398361360) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4570914448) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127f39360>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1278ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4303760624)) \ - and (___check_obj_id(G['torch'].abs, 4309243664)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4398361360) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4570914448) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py index 52a32632..5781b0d8 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x152571630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f6e8b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15d508b80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f6e8b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x152571630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f6e8b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15d508b80>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f6e8b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1528eee60>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f6e8b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4350635248)) \ - and (___check_obj_id(G['torch'].abs, 4364359600)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371787072) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424114208) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x152549360>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11f6e8b80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4350635248)) \ - and (___check_obj_id(G['torch'].abs, 4364359600)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4371787072) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424114208) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index 3fcc2a27..b9545e37 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py index 2e399bae..466582ab 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,10 +93,10 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(arg0_1, arg1_1, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) del arg0_1 del arg1_1 return (buf0, buf2, ) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py similarity index 63% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py index d7e9b956..b9545e37 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py deleted file mode 100644 index 99254237..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 AFTER POST GRAD 0.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, -1); arg0_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(arg1_1, mul); arg1_1 = mul = None - return (mul_1,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py index 4adebe50..97dd87fe 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Captured Graph 0.py @@ -2,14 +2,14 @@ -def forward(self, L_b_: "f32[8]", L_x_: "f32[8]"): +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): l_b_ = L_b_ l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[8]" = l_b_ * -1; l_b_ = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = l_x_ * b; l_x_ = b = None + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None return (mul_1,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py index 99254237..2f386a3c 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 Forward graph 0.py @@ -2,11 +2,11 @@ -def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, -1); arg0_1 = None +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(arg1_1, mul); arg1_1 = mul = None - return (mul_1,) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py index 4499f78e..4447a169 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['3_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,27 +20,40 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, - float* out_ptr0) + float* out_ptr0, + const int64_t ks0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); auto tmp2 = static_cast(-1.0); auto tmp3 = at::vec::Vectorized(tmp2); auto tmp4 = tmp1 * tmp3; auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -53,23 +64,25 @@ del async_compile def call(args): - arg0_1, arg1_1 = args + arg0_1, arg1_1, arg2_1 = args args.clear() - assert_size_stride(arg0_1, (8, ), (1, )) - assert_size_stride(arg1_1, (8, ), (1, )) - buf0 = empty_strided_cpu((8, ), (1, ), torch.float32) - cpp_fused_mul_0(arg1_1, arg0_1, buf0) - del arg0_1 + s0 = arg0_1 + assert_size_stride(arg1_1, (s0, ), (1, )) + assert_size_stride(arg2_1, (s0, ), (1, )) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_mul_0(arg2_1, arg1_1, buf0, s0) del arg1_1 + del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance - arg0_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + arg0_1 = 8 arg1_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) - fn = lambda: call([arg0_1, arg1_1]) + arg2_1 = rand_strided((8, ), (1, ), device='cpu', dtype=torch.float32) + fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py new file mode 100644 index 00000000..78cad323 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 pre insert_deferred_runtime_asserts __compiled_fn_11 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s0]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py new file mode 100644 index 00000000..2f386a3c --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11 tensorify_python_scalars 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 4098bbbc..b036c79d 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py index ff6102a2..a17b3a1d 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py similarity index 56% rename from tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py index 42d33ece..b036c79d 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py deleted file mode 100644 index 1bfbdad3..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 AFTER POST GRAD 0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add); arg1_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py index a712b834..67c99a8b 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]" l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.abs(l_a_) add: "f32[s0]" = abs_1 + 1; abs_1 = None x: "f32[s0]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py index 1bfbdad3..722d8681 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add); arg1_1 = add = None + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py index 00f8fb4e..326bb816 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['2_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,67 +20,68 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*', 'const long', 'const long'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2, - const long ks0, - const long ks1) + bool* out_ptr1, + float* out_ptr2, + const int64_t ks0, + const int64_t ks1) { - { - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks0, 8L))); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks0, 8L))); x0(ks0); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L*(c10::div_floor_integer(ks1, 8L))); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L*(c10::div_floor_integer(ks1, 8L))); x0(ks1); x0+=static_cast(1L)) + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } } } ''') @@ -98,10 +97,10 @@ def call(args): s1 = arg2_1 assert_size_stride(arg1_1, (s0, ), (1, )) assert_size_stride(arg3_1, (s1, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg3_1, buf0, buf1, buf2, s0, s1) + buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg3_1, arg1_1, buf1, buf2, buf0, s1, s0) del arg1_1 del arg3_1 return (buf0, buf2, ) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py new file mode 100644 index 00000000..67c99a8b --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 pre insert_deferred_runtime_asserts __compiled_fn_7 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.abs(l_a_) + add: "f32[s0]" = abs_1 + 1; abs_1 = None + x: "f32[s0]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py new file mode 100644 index 00000000..722d8681 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7 tensorify_python_scalars 0.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + + +def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (div, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py index 50965718..a1e40aaf 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py @@ -1,5 +1,7 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 84d04001..3deb3869 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,6 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 5c9d91f7..03e7e449 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121f4d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12163a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1223725f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12163a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121f4d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12163a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1223725f0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12163a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12230d5a0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12163a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4301712624)) \ - and (___check_obj_id(G['torch'].abs, 4307195264)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330629904) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4344422032) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121f4ce50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12163a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4301712624)) \ - and (___check_obj_id(G['torch'].abs, 4307195264)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330629904) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4344422032) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 26020ff9..1e2e8bea 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112b5d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x112236680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x113080430>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x112236680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112b5d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x112236680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x113080430>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x112236680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1130169e0>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x112236680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4302040304)) \ - and (___check_obj_id(G['torch'].abs, 4307523344)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388433056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404191504) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112b5ce50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x112236680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4302040304)) \ - and (___check_obj_id(G['torch'].abs, 4307523344)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388433056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404191504) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 7eeacb9c..df3f442d 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110e4d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11043e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -34,12 +92,69 @@ def transformed___resume_at_38_3(b, x): #============ end of __resume_at_38_3 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111178430>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11043e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -51,8 +166,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -70,12 +187,69 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110e4d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11043e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -87,8 +261,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,12 +280,69 @@ def transformed___resume_at_38_9(b, x): #============ end of __resume_at_38_9 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111178430>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11043e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_11*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -122,8 +354,10 @@ def __compiled_fn_11(*args, **kwargs): pass def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_11(b, x) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. + call_size(b, 0), b, x) + return __temp_17 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -141,16 +375,71 @@ def transformed___resume_at_30_8(b, x): #============ end of __resume_at_30_8 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11111aa70>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11043e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4306873584)) \ - and (___check_obj_id(G['torch'].abs, 4312356624)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) \ - and (2 <= L['a'].size()[0]) \ - and (2 <= L['b'].size()[0]) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391431328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395753744) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_7*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -162,21 +451,80 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(a.size(0), a, b.size(0), b) + __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils + .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, + 0), b) x = __temp_11 if __temp_12: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x110e4ce50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x11043e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4306873584)) \ - and (___check_obj_id(G['torch'].abs, 4312356624)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391431328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395753744) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py deleted file mode 100644 index def6bdd8..00000000 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py index a5a384eb..a4ea69d4 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Backward graph 0.py @@ -2,18 +2,19 @@ -def forward(self, primals_1: "f32[10]", div: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) +def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div, add); div = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return [add_1, None] + return (add_1, None) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py index def6bdd8..94842116 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Forward graph 0.py @@ -3,13 +3,13 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return [div, lt, primals_1, div] + return (div, lt, primals_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py index 770d8a8d..32406778 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 Joint graph 0.py @@ -6,16 +6,16 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None @@ -24,7 +24,7 @@ def forward(self, primals, tangents): sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py index 2797b423..f84427fe 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,13 +93,13 @@ def call(args): args.clear() assert_size_stride(primals_1, (10, ), (1, )) assert_size_stride(primals_2, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(primals_1, primals_2, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, buf0, ) + return (buf0, buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py new file mode 100644 index 00000000..32406778 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py deleted file mode 100644 index 644e87da..00000000 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py index 548a6e02..caa124fb 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Backward graph 0.py @@ -3,8 +3,8 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return [mul_2, mul_1] + return (mul_2, mul_1) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py index 644e87da..f0055de7 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return [mul, primals_1, primals_2] + return (mul, primals_1, primals_2) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py index efc8cefc..055b4058 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 Joint graph 0.py @@ -6,7 +6,7 @@ def forward(self, primals, tangents): primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py index 43d00e34..a2081e47 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_forward'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py new file mode 100644 index 00000000..055b4058 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -0,0 +1,14 @@ +from __future__ import annotations + + + +def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py index b0814dbd..512666c8 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x148f6d630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1487ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x148efd360>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x1487ecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4316507376)) \ - and (___check_obj_id(G['torch'].abs, 4321990416)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355566752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4399997200) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 7a002450..5bb30e92 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cd69630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bdacb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12cd3d360>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12bdacb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4371410160)) \ - and (___check_obj_id(G['torch'].abs, 4376892800)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325420192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4374831376) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 3005abf0..3daa7a59 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f46d630>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12dcecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f3fd360>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12dcecb80>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4302220528)) \ - and (___check_obj_id(G['torch'].abs, 4307703568)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389924000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4411531536) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py index 87116cd6..9e2b0f38 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Captured Graph 0.py @@ -6,12 +6,12 @@ def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): l_a_ = L_a_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.abs(l_a_) add: "f32[10]" = abs_1 + 1; abs_1 = None x: "f32[10]" = l_a_ / add; l_a_ = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = l_b_.sum(); l_b_ = None lt: "b8[]" = sum_1 < 0; sum_1 = None return (x, lt) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py index 3fcc2a27..b9545e37 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 Forward graph 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py index 2e399bae..466582ab 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['0_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,65 +20,66 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p -cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*', 'bool*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, - float* out_ptr1, - bool* out_ptr2) + bool* out_ptr1, + float* out_ptr2) { - { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) - { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = std::abs(tmp0); - auto tmp2 = static_cast(1.0); - auto tmp3 = decltype(tmp1)(tmp1 + tmp2); - auto tmp4 = tmp0 / tmp3; - out_ptr0[static_cast(x0)] = tmp4; - } - } { { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); tmp_acc0_vec = tmp_acc0_vec + tmp0; } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr1[static_cast(x0)]; - tmp_acc0 = tmp_acc0 + tmp0; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); } - tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); - out_ptr1[static_cast(0L)] = static_cast(tmp_acc0); + tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); + out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr1[static_cast(0L)]; + auto tmp0 = out_ptr0[static_cast(0LL)]; auto tmp1 = static_cast(0.0); auto tmp2 = tmp0 < tmp1; - out_ptr2[static_cast(0L)] = tmp2; + out_ptr1[static_cast(0LL)] = tmp2; + } + { + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } } } ''') @@ -94,10 +93,10 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - cpp_fused_abs_add_div_lt_sum_0(arg0_1, arg1_1, buf0, buf1, buf2) + buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) del arg0_1 del arg1_1 return (buf0, buf2, ) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py new file mode 100644 index 00000000..9e2b0f38 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + + +def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (x, lt) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py similarity index 63% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py index d7e9b956..b9545e37 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1 tensorify_python_scalars 0.py @@ -3,12 +3,12 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None return (div, lt) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py index c6372071..092bb929 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Captured Graph 0.py @@ -6,7 +6,7 @@ def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): l_x_ = L_x_ l_b_ = L_b_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py index 4098bbbc..b036c79d 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 Forward graph 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py index ff6102a2..a17b3a1d 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 kernel 0.py @@ -1,6 +1,5 @@ - # AOT ID: ['1_inference'] -from ctypes import c_void_p, c_long +from ctypes import c_void_p, c_long, c_int import torch import math import random @@ -10,7 +9,6 @@ from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align - from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels @@ -22,32 +20,33 @@ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda +empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu +reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool -reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() +empty_strided_p2p = torch._C._distributed_c10d._SymmetricMemory.empty_strided_p2p cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/sk/cskh5dx62fglpphcrl6723dnmowdabouerrzy3dmqcngbxwfa7bv.h" -extern "C" void kernel(const float* in_ptr0, +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(long x0=static_cast(0L); x0(8L); x0+=static_cast(8L)) + for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), 8); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), 8); + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); + tmp2.store(out_ptr0 + static_cast(x0)); } - #pragma omp simd simdlen(4) - for(long x0=static_cast(8L); x0(10L); x0+=static_cast(1L)) + for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) { - auto tmp0 = in_ptr0[static_cast(x0)]; - auto tmp1 = in_ptr1[static_cast(x0)]; - auto tmp2 = decltype(tmp0)(tmp0 * tmp1); - out_ptr0[static_cast(x0)] = tmp2; + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); } } } diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py new file mode 100644 index 00000000..092bb929 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 pre insert_deferred_runtime_asserts __compiled_fn_5 0.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py similarity index 56% rename from tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py rename to tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py index 42d33ece..b036c79d 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5 AFTER POST GRAD 0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5 tensorify_python_scalars 0.py @@ -3,7 +3,7 @@ def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None return (mul,) \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index 6b2c58d2..c167728a 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,4 +1,5 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py index a480754d..eb029876 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127d3d240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12563a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127d3ce50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12563a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4306349296)) \ - and (___check_obj_id(G['torch'].abs, 4311832336)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391611152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4578254320) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py index f11500c8..2bde9abb 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12db65240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12d13a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12db64e50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12d13a680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4374703344)) \ - and (___check_obj_id(G['torch'].abs, 4380185984)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4369083232) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4701986448) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py index ccb9f215..f363c215 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -1,10 +1,67 @@ +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125165240>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12462e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['b'], L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -16,8 +73,9 @@ def __compiled_fn_5(*args, **kwargs): pass def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): - a = None; self = None # this line helps the compiler to generate bytecode with at least the same number of local variables as the original function - return __compiled_fn_5(x, b) + a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function + __temp_7, = __compiled_fn_5(x, b) + return __temp_7 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -47,14 +105,71 @@ def transformed___resume_at_30_2(b, x): #============ end of __resume_at_30_2 ============# +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x125164e50>''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +FloatPow = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = '''. at 0x12462e680>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' +__math_isnan = '''''' +__numpy_isnan = '''None''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['a'], '_dynamo_dynamic_indices') == False) \ - and (hasattr(L['b'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_obj_id(G['torch'], 4378340592)) \ - and (___check_obj_id(G['torch'].abs, 4383823632)) \ - and (___check_tensors(L['a'], L['b'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() + __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['a'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False + __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4348963280) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4563574496) + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py index fc7c7b66..3b7d87c3 100644 --- a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py @@ -5,7 +5,7 @@ def forward(self, L_x_: "f32[5]"): l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:6 in f, code: return x + 1 add: "f32[5]" = l_x_ + 1; l_x_ = None return (add,) \ No newline at end of file diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py b/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py new file mode 100644 index 00000000..fc7c7b66 --- /dev/null +++ b/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py @@ -0,0 +1,11 @@ +from __future__ import annotations + + + +def forward(self, L_x_: "f32[5]"): + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 + add: "f32[5]" = l_x_ + 1; l_x_ = None + return (add,) + \ No newline at end of file diff --git a/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py b/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py index 7eb58947..b1630a3a 100644 --- a/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py +++ b/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py @@ -1,2 +1,3 @@ def __transformed_code_0_for_f(x): - return __compiled_fn_1(x) + __temp_2, = __compiled_fn_1(x) + return __temp_2 diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_0.py b/tests/depyf_output/multiprocessing/full_code_for_f_0.py index e6cd4721..f4b52df1 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_0.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_0.py @@ -1,14 +1,57 @@ # Note: the following variables are used inside the guard function. -___check_global_state = '''''' -___check_tensors = '''''' +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x175ce80d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -20,7 +63,8 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_f(x): - return __compiled_fn_1(x) + __temp_2, = __compiled_fn_1(x) + return __temp_2 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_1.py b/tests/depyf_output/multiprocessing/full_code_for_f_1.py index ff30fb9e..97cf3b86 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_1.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_1.py @@ -1,14 +1,57 @@ # Note: the following variables are used inside the guard function. -___check_global_state = '''''' -___check_tensors = '''''' +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x175ce80d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -20,7 +63,8 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_f(x): - return __compiled_fn_1(x) + __temp_2, = __compiled_fn_1(x) + return __temp_2 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_10.py b/tests/depyf_output/multiprocessing/full_code_for_f_10.py new file mode 100644 index 00000000..ccf9f683 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_10.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x143de4430>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_11.py b/tests/depyf_output/multiprocessing/full_code_for_f_11.py new file mode 100644 index 00000000..47f1840a --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_11.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x14fbe4430>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_2.py b/tests/depyf_output/multiprocessing/full_code_for_f_2.py index 5998c1e3..3bf694a6 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_2.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_2.py @@ -1,14 +1,57 @@ # Note: the following variables are used inside the guard function. -___check_global_state = '''''' -___check_tensors = '''''' +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x175ce80d0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): - return (___check_global_state()) \ - and (hasattr(L['x'], '_dynamo_dynamic_indices') == False) \ - and (utils_device.CURRENT_DEVICE == None) \ - and (___check_tensors(L['x'], tensor_check_names=tensor_check_names)) + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. # Captured Graph: Dynamo generated graph (debuggable when using eager backend). @@ -20,7 +63,8 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_f(x): - return __compiled_fn_1(x) + __temp_2, = __compiled_fn_1(x) + return __temp_2 # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_3.py b/tests/depyf_output/multiprocessing/full_code_for_f_3.py new file mode 100644 index 00000000..e0309310 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_3.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x1354dc3a0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_4.py b/tests/depyf_output/multiprocessing/full_code_for_f_4.py new file mode 100644 index 00000000..a84f42ca --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_4.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x1320e43a0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_5.py b/tests/depyf_output/multiprocessing/full_code_for_f_5.py new file mode 100644 index 00000000..ced483a3 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_5.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x1376e83a0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_6.py b/tests/depyf_output/multiprocessing/full_code_for_f_6.py new file mode 100644 index 00000000..fd435954 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_6.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x16d1e43a0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_7.py b/tests/depyf_output/multiprocessing/full_code_for_f_7.py new file mode 100644 index 00000000..912a0b46 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_7.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x1281e03a0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_8.py b/tests/depyf_output/multiprocessing/full_code_for_f_8.py new file mode 100644 index 00000000..3c13fac9 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_8.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x1496e83a0>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_9.py b/tests/depyf_output/multiprocessing/full_code_for_f_9.py new file mode 100644 index 00000000..eb9ebb10 --- /dev/null +++ b/tests/depyf_output/multiprocessing/full_code_for_f_9.py @@ -0,0 +1,82 @@ + +# Note: the following variables are used inside the guard function. +___check_tensors = '''None''' +___check_tensors_verbose = '''None''' +___check_global_state = '''''' +tensor_check_names = '''["L['x']"]''' +Abs = '''''' +Eq = '''''' +Ne = '''''' +Gt = '''''' +Lt = '''''' +Le = '''''' +Ge = '''''' +Min = '''''' +Max = '''''' +Mod = '''''' +PythonMod = '''''' +FloorDiv = '''''' +TrueDiv = '''''' +IsNonOverlappingAndDenseIndicator = '''''' +floor = '''''' +ceiling = '''''' +FloorToInt = '''''' +CeilToInt = '''''' +cast_symbool_to_symint_guardless = '''''' +RoundToInt = '''''' +RoundDecimal = '''''' +TruncToInt = '''''' +IntTrueDiv = '''''' +___check_type_id = '''''' +___check_obj_id = '''''' +___odict_getitem = '''''' +___key_to_id = '''''' +___dict_version = '''''' +___dict_contains = ''' at 0x1430dc430>''' +___tuple_iterator_len = '''''' +___tuple_iterator_getitem = '''''' +__math_isnan = '''''' +__numpy_isnan = '''''' +inf = '''inf''' +__load_module = '''''' +utils_device = '''''' +device = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +torch = '''''' +inspect = '''''' +def __guard_0_for_f(L, G, **___kwargs_ignored): + __guard_hit = True + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) + __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False + return __guard_hit + +# Note: please refer to the graph code in __compiled_fn_1*.py. +# Captured Graph: Dynamo generated graph (debuggable when using eager backend). +# Joint graph: joint forward+backward graph from aot autograd. +# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). +# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). +# AFTER XXX: graph processed by inductor (not debuggable). +def __compiled_fn_1(*args, **kwargs): + pass + +def __transformed_code_0_for_f(x): + __temp_2, = __compiled_fn_1(x) + return __temp_2 + + +# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. +def f(x): + return x + 1 + +def transformed_f(x): + __local_dict = {"x": x} + __global_dict = globals() + if __guard_0_for_f(__local_dict, __global_dict): + return __transformed_code_0_for_f(x) + # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. + return f(x) + +#============ end of f ============# From 346c5f08e862d134a31ea85b9570d750f541e13a Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 3 Nov 2024 18:49:30 -0800 Subject: [PATCH 4/4] update tests Signed-off-by: youkaichao --- .../__compiled_fn_1 Captured Graph 0.py | 2 +- ...rred_runtime_asserts __compiled_fn_1 0.py} | 2 +- .../multiprocessing/full_code_for_f_0.py | 30 ++++--- .../multiprocessing/full_code_for_f_1.py | 30 ++++--- .../multiprocessing/full_code_for_f_10.py | 82 ------------------- .../multiprocessing/full_code_for_f_11.py | 82 ------------------- .../multiprocessing/full_code_for_f_2.py | 30 ++++--- .../multiprocessing/full_code_for_f_3.py | 82 ------------------- .../multiprocessing/full_code_for_f_4.py | 82 ------------------- .../multiprocessing/full_code_for_f_5.py | 82 ------------------- .../multiprocessing/full_code_for_f_6.py | 82 ------------------- .../multiprocessing/full_code_for_f_7.py | 82 ------------------- .../multiprocessing/full_code_for_f_8.py | 82 ------------------- .../multiprocessing/full_code_for_f_9.py | 82 ------------------- 14 files changed, 56 insertions(+), 776 deletions(-) rename tests/depyf_output/multiprocessing/{__compiled_fn_1 Captured Graph 1.py => __compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py} (58%) delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_10.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_11.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_3.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_4.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_5.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_6.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_7.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_8.py delete mode 100644 tests/depyf_output/multiprocessing/full_code_for_f_9.py diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py b/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py index 3b7d87c3..06186698 100644 --- a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py +++ b/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 0.py @@ -5,7 +5,7 @@ def forward(self, L_x_: "f32[5]"): l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:6 in f, code: return x + 1 + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 add: "f32[5]" = l_x_ + 1; l_x_ = None return (add,) \ No newline at end of file diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py b/tests/depyf_output/multiprocessing/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py similarity index 58% rename from tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py rename to tests/depyf_output/multiprocessing/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py index fc7c7b66..06186698 100644 --- a/tests/depyf_output/multiprocessing/__compiled_fn_1 Captured Graph 1.py +++ b/tests/depyf_output/multiprocessing/__compiled_fn_1 pre insert_deferred_runtime_asserts __compiled_fn_1 0.py @@ -5,7 +5,7 @@ def forward(self, L_x_: "f32[5]"): l_x_ = L_x_ - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 add: "f32[5]" = l_x_ + 1; l_x_ = None return (add,) \ No newline at end of file diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_0.py b/tests/depyf_output/multiprocessing/full_code_for_f_0.py index f4b52df1..a95fd45d 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_0.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_0.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x123441bd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -17,38 +17,44 @@ PythonMod = '''''' FloorDiv = '''''' TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' +FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = ''' at 0x175ce80d0>''' +___dict_contains = '''. at 0x1232f5240>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''''' +__numpy_isnan = '''None''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False return __guard_hit diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_1.py b/tests/depyf_output/multiprocessing/full_code_for_f_1.py index 97cf3b86..788edff6 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_1.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_1.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x126841bd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -17,38 +17,44 @@ PythonMod = '''''' FloorDiv = '''''' TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' +FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = ''' at 0x175ce80d0>''' +___dict_contains = '''. at 0x1267f1240>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''''' +__numpy_isnan = '''None''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False return __guard_hit diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_10.py b/tests/depyf_output/multiprocessing/full_code_for_f_10.py deleted file mode 100644 index ccf9f683..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_10.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x143de4430>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_11.py b/tests/depyf_output/multiprocessing/full_code_for_f_11.py deleted file mode 100644 index 47f1840a..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_11.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x14fbe4430>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_2.py b/tests/depyf_output/multiprocessing/full_code_for_f_2.py index 3bf694a6..620cd9fa 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_2.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_2.py @@ -2,8 +2,8 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120a41bd0>''' Abs = '''''' Eq = '''''' Ne = '''''' @@ -17,38 +17,44 @@ PythonMod = '''''' FloorDiv = '''''' TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' +PowByNatural = '''''' +IsNonOverlappingAndDenseIndicator = '''''' floor = '''''' ceiling = '''''' FloorToInt = '''''' +FloatPow = '''''' CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' +cast_symbool_to_symint_guardless = '''''' RoundToInt = '''''' RoundDecimal = '''''' TruncToInt = '''''' IntTrueDiv = '''''' +FloatTrueDiv = '''''' +ToFloat = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = ''' at 0x175ce80d0>''' +___dict_contains = '''. at 0x1209f1240>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' -__numpy_isnan = '''''' +__numpy_isnan = '''None''' inf = '''inf''' -__load_module = '''''' +__load_module = '''''' utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' +___from_numpy = '''''' +___as_tensor = '''''' torch = '''''' inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:479 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() + __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False return __guard_hit diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_3.py b/tests/depyf_output/multiprocessing/full_code_for_f_3.py deleted file mode 100644 index e0309310..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_3.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x1354dc3a0>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_4.py b/tests/depyf_output/multiprocessing/full_code_for_f_4.py deleted file mode 100644 index a84f42ca..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_4.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x1320e43a0>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_5.py b/tests/depyf_output/multiprocessing/full_code_for_f_5.py deleted file mode 100644 index ced483a3..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_5.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x1376e83a0>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_6.py b/tests/depyf_output/multiprocessing/full_code_for_f_6.py deleted file mode 100644 index fd435954..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_6.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x16d1e43a0>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_7.py b/tests/depyf_output/multiprocessing/full_code_for_f_7.py deleted file mode 100644 index 912a0b46..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_7.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x1281e03a0>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_8.py b/tests/depyf_output/multiprocessing/full_code_for_f_8.py deleted file mode 100644 index 3c13fac9..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_8.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x1496e83a0>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============# diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_9.py b/tests/depyf_output/multiprocessing/full_code_for_f_9.py deleted file mode 100644 index eb9ebb10..00000000 --- a/tests/depyf_output/multiprocessing/full_code_for_f_9.py +++ /dev/null @@ -1,82 +0,0 @@ - -# Note: the following variables are used inside the guard function. -___check_tensors = '''None''' -___check_tensors_verbose = '''None''' -___check_global_state = '''''' -tensor_check_names = '''["L['x']"]''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -___check_type_id = '''''' -___check_obj_id = '''''' -___odict_getitem = '''''' -___key_to_id = '''''' -___dict_version = '''''' -___dict_contains = ''' at 0x1430dc430>''' -___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -__math_isnan = '''''' -__numpy_isnan = '''''' -inf = '''inf''' -__load_module = '''''' -utils_device = '''''' -device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' -def __guard_0_for_f(L, G, **___kwargs_ignored): - __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:460 in init_ambient_guards - __guard_hit = __guard_hit and ___check_global_state() - __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) - __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - return __guard_hit - -# Note: please refer to the graph code in __compiled_fn_1*.py. -# Captured Graph: Dynamo generated graph (debuggable when using eager backend). -# Joint graph: joint forward+backward graph from aot autograd. -# Forward graph: forward graph from aot autograd (debuggable when using aot_eager backend). -# Backward graph: backward graph from aot autograd (debuggable when using aot_eager backend). -# AFTER XXX: graph processed by inductor (not debuggable). -def __compiled_fn_1(*args, **kwargs): - pass - -def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 - - -# Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. -def f(x): - return x + 1 - -def transformed_f(x): - __local_dict = {"x": x} - __global_dict = globals() - if __guard_0_for_f(__local_dict, __global_dict): - return __transformed_code_0_for_f(x) - # Note: this function might well not be executed directly. It might well be transformed again, i.e. adding one more guards and transformed code. - return f(x) - -#============ end of f ============#