Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions torch_xla/csrc/tensor_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,13 @@ bool XLATensorImpl::is_contiguous_custom(at::MemoryFormat memory_format) const {
return true;
}

c10::SymBool XLATensorImpl::sym_is_contiguous_custom(
at::MemoryFormat memory_format) const {
// Storage is always contiguous, but the tensor metadata is_contiguous_ might
// be false due to the update in the functionalization layer..
return true;
}

void XLATensorImpl::SetupSizeProperties() {
size_t generation = tensor_->generation();
if (generation != generation_) {
Expand Down
7 changes: 5 additions & 2 deletions torch_xla/csrc/tensor_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,10 @@ class XLATensorImpl : public c10::TensorImpl {

int64_t numel_custom() const override;

bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
// TODO add override once https://github.com/pytorch/pytorch/pull/155590 lands
// and remove is_contiguous_custom.
bool is_contiguous_custom(at::MemoryFormat memory_format) const;
c10::SymBool sym_is_contiguous_custom(at::MemoryFormat memory_format) const;

const at::Storage& storage() const override;

Expand All @@ -72,4 +75,4 @@ class XLATensorImpl : public c10::TensorImpl {

} // namespace torch_xla

#endif // XLA_TORCH_XLA_CSRC_TENSOR_IMPL_H_
#endif // XLA_TORCH_XLA_CSRC_TENSOR_IMPL_H_
Loading