mirror of
https://github.com/nod-ai/SHARK-Studio.git
synced 2026-01-10 22:38:01 -05:00
Fix input tensors with non-floating point dtype in the lockstep tracer (#328)
This commit is contained in:
@@ -65,14 +65,18 @@ class TorchMLIRLockstepTensor(TorchMLIRTensor):
|
||||
nt = elem.detach().data.numpy()
|
||||
if not nt.flags["C_CONTIGUOUS"]:
|
||||
nt = np.ascontiguousarray(nt, dtype=nt.dtype)
|
||||
r.elem = backend.transfer_from_torch_to_device(torch.Tensor(nt))
|
||||
r.elem = backend.transfer_from_torch_to_device(
|
||||
torch.from_numpy(nt)
|
||||
)
|
||||
elif isinstance(elem, torch.Tensor):
|
||||
r = make_wrapper_subclass_from_torch_tensor(cls, elem, **kwargs)
|
||||
# Ditto TODO: Find a better way to handle this
|
||||
nt = elem.numpy()
|
||||
if not nt.flags["C_CONTIGUOUS"]:
|
||||
nt = np.ascontiguousarray(nt, dtype=nt.dtype)
|
||||
r.elem = backend.transfer_from_torch_to_device(torch.Tensor(nt))
|
||||
r.elem = backend.transfer_from_torch_to_device(
|
||||
torch.from_numpy(nt)
|
||||
)
|
||||
# This branch handles the case when a python scalar is passed to some op
|
||||
# or is returned from some aten op, such as _local_scalar_dense.
|
||||
elif isinstance(elem, (int, float, bool)):
|
||||
|
||||
Reference in New Issue
Block a user