Skip to content

Instantly share code, notes, and snippets.

@mlazos
Created September 7, 2022 09:18
Show Gist options
  • Save mlazos/2f13681e3cc6c43b3911f336327032de to your computer and use it in GitHub Desktop.
Save mlazos/2f13681e3cc6c43b3911f336327032de to your computer and use it in GitHub Desktop.
/scratch/mlazos/py39/lib/python3.9/site-packages/torchvision-0.14.0a0+1d0786b-py3.9-linux-x86_64.egg/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: /scratch/mlazos/py39/lib/python3.9/site-packages/torchvision-0.14.0a0+1d0786b-py3.9-linux-x86_64.egg/torchvision/image.so: undefined symbol: _ZN2at4_ops19empty_memory_format4callEN3c108ArrayRefIlEENS2_8optionalINS2_10ScalarTypeEEENS5_INS2_6LayoutEEENS5_INS2_6DeviceEEENS5_IbEENS5_INS2_12MemoryFormatEEE
warn(f"Failed to load image Python extension: {e}")
WARNING:torchinductor.lowering:make_fallback(aten.native_group_norm_backward): a decomposition exists, we should switch to it
torchinductor.scheduler: [CRITICAL] Error in codegen for ComputedBuffer(name='buf0', layout=FixedLayout('cuda', torch.float32, size=[200, 200], stride=[200, 1]), data=Pointwise(
'cuda',
torch.float32,
relu(constant(1, torch.float32) + constant(1, torch.float32)),
ranges=[200, 200],
origins={relu}
))
Traceback (most recent call last):
File "/scratch/mlazos/test/scratch.py", line 22, in <module>
test_backend_error()
File "/scratch/mlazos/torchdynamo/torchdynamo/eval_frame.py", line 156, in _fn
return fn(*args, **kwargs)
File "/scratch/mlazos/test/scratch.py", line 15, in test_backend_error
@torchdynamo.optimize("inductor")
File "/scratch/mlazos/torchdynamo/torchdynamo/eval_frame.py", line 156, in _fn
return fn(*args, **kwargs)
File "/scratch/mlazos/functorch/functorch/_src/aot_autograd.py", line 880, in forward
return compiled_f(
File "/scratch/mlazos/functorch/functorch/_src/aot_autograd.py", line 866, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/scratch/mlazos/functorch/functorch/_src/aot_autograd.py", line 444, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/scratch/mlazos/functorch/functorch/_src/aot_autograd.py", line 270, in aot_dispatch_base
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
File "/scratch/mlazos/torchdynamo/torchdynamo/utils.py", line 50, in time_wrapper
r = func(*args, **kwargs)
File "/scratch/mlazos/torchdynamo/torchinductor/compile_fx.py", line 173, in fw_compiler
return compile_fx_inner(
File "/scratch/mlazos/torchdynamo/torchdynamo/debug_utils.py", line 257, in debug_wrapper
raise e
File "/scratch/mlazos/torchdynamo/torchdynamo/debug_utils.py", line 245, in debug_wrapper
compiled_fn = compiler(gm, example_inputs, **kwargs)
File "/scratch/mlazos/torchdynamo/torchinductor/debug.py", line 182, in inner
return fn(*args, **kwargs)
File "/scratch/mlazos/torchdynamo/torchinductor/compile_fx.py", line 58, in compile_fx_inner
compiled_fn = wrap(graph.compile_to_fn())
File "/scratch/mlazos/torchdynamo/torchinductor/graph.py", line 333, in compile_to_fn
return self.compile_to_module().call
File "/scratch/mlazos/torchdynamo/torchdynamo/utils.py", line 50, in time_wrapper
r = func(*args, **kwargs)
File "/scratch/mlazos/torchdynamo/torchinductor/graph.py", line 319, in compile_to_module
code = self.codegen()
File "/scratch/mlazos/torchdynamo/torchinductor/graph.py", line 312, in codegen
self.scheduler.codegen()
File "/scratch/mlazos/torchdynamo/torchdynamo/utils.py", line 50, in time_wrapper
r = func(*args, **kwargs)
File "/scratch/mlazos/torchdynamo/torchinductor/scheduler.py", line 1026, in codegen
self.get_backend(device).codegen_nodes(node.get_nodes())
File "/scratch/mlazos/torchdynamo/torchinductor/codegen/triton.py", line 1102, in codegen_nodes
return self.codegen_node_schedule(node_schedule, numel, rnumel)
File "/scratch/mlazos/torchdynamo/torchinductor/codegen/triton.py", line 1126, in codegen_node_schedule
node.codegen(kernel.split_and_set_ranges(node.get_ranges()))
File "/scratch/mlazos/torchdynamo/torchinductor/scheduler.py", line 328, in codegen
self._body(*index_vars)
File "/scratch/mlazos/torchdynamo/torchinductor/ir.py", line 3297, in __call__
result = self.root_block()
File "/scratch/mlazos/torchdynamo/torchinductor/ir.py", line 3394, in __call__
return self.make_gm().forward(V.get_ops_handler())
File "<eval_with_key>.7", line 8, in forward
File "/scratch/mlazos/torchdynamo/torchinductor/codegen/common.py", line 584, in inner
self.compute, getattr(parent_handler, name)(*args, **kwargs)
File "/scratch/mlazos/test/scratch.py", line 8, in new_relu
assert False
AssertionError
import torch
import torchdynamo
import torchinductor.codegen.triton as tl
def new_relu(cls, x):
assert False
# mess up inductor to create an error
tl.TritonOverrides.relu = new_relu
@torchdynamo.optimize("inductor")
def test_backend_error():
y = torch.ones(200, 200, device=torch.device("cuda"))
x = torch.ones(200, 200, device=torch.device("cuda"))
return torch.relu(x + y)
test_backend_error()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment