Skip to content

Commit

Permalink
[Exception] [3/N] Replace torch::NotImplementedError and torch::LinAl…
Browse files Browse the repository at this point in the history
…gError with C10 counterparts. (pytorch#116824)

Pull Request resolved: pytorch#116824
Approved by: https://github.com/albanD
  • Loading branch information
cyyever authored and pytorchmergebot committed Jan 11, 2024
1 parent 89ef426 commit 2b5a201
Show file tree
Hide file tree
Showing 11 changed files with 98 additions and 126 deletions.
33 changes: 15 additions & 18 deletions torch/csrc/DataLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,21 +175,19 @@ static PyObject* THPModule_errorIfAnyWorkerFails(
// of pids we are interested in.
static PyObject* THPModule_setWorkerPIDs(PyObject* module, PyObject* args) {
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 2) {
throw TypeError("_set_worker_pids expects exactly 2 arguments.");
}
TORCH_CHECK_TYPE(
PyTuple_GET_SIZE(args) == 2,
"_set_worker_pids expects exactly 2 arguments.");
int64_t key = THPUtils_unpackLong(PyTuple_GET_ITEM(args, 0));
if (worker_pids.find(key) != worker_pids.end()) {
throw ValueError(
"_set_worker_pids should be called only once for each _BaseDataLoaderIter.");
}
TORCH_CHECK_VALUE(
worker_pids.find(key) == worker_pids.end(),
"_set_worker_pids should be called only once for each _BaseDataLoaderIter.");
PyObject* child_pids = PyTuple_GET_ITEM(args, 1);
if (!PyTuple_Check(child_pids)) {
throw TypeError(
"_set_worker_pids expects a tuple for child_pids, but got %s.",
Py_TYPE(child_pids)->tp_name);
}

TORCH_CHECK_TYPE(
PyTuple_Check(child_pids),
"_set_worker_pids expects a tuple for child_pids, but got ",
Py_TYPE(child_pids)->tp_name,
".");
std::set<pid_t> pids_set = {};
auto size = PyTuple_GET_SIZE(child_pids);
for (const auto idx : c10::irange(size)) {
Expand All @@ -210,11 +208,10 @@ static PyObject* THPModule_removeWorkerPIDs(

int64_t key = THPUtils_unpackLong(loader_id);
auto it = worker_pids.find(key);
if (it == worker_pids.end()) {
throw ValueError(fmt::format(
"Cannot find worker information for _BaseDataLoaderIter with id {}",
key));
}
TORCH_CHECK_VALUE(
it != worker_pids.end(),
"Cannot find worker information for _BaseDataLoaderIter with id ",
key);
worker_pids.erase(it);

Py_RETURN_NONE;
Expand Down
14 changes: 0 additions & 14 deletions torch/csrc/Exceptions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -245,27 +245,13 @@ ValueError::ValueError(const char* format, ...) {
va_end(fmt_args);
}

NotImplementedError::NotImplementedError(const char* format, ...) {
va_list fmt_args{};
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}

AttributeError::AttributeError(const char* format, ...) {
va_list fmt_args{};
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}

LinAlgError::LinAlgError(const char* format, ...) {
va_list fmt_args{};
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}

void PyWarningHandler::InternalHandler::process(const c10::Warning& warning) {
warning_buffer_.push_back(warning);
}
Expand Down
17 changes: 0 additions & 17 deletions torch/csrc/Exceptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -306,15 +306,6 @@ struct ValueError : public PyTorchError {
}
};

// Translates to Python NotImplementedError
struct NotImplementedError : public PyTorchError {
NotImplementedError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
NotImplementedError() = default;
PyObject* python_type() override {
return PyExc_NotImplementedError;
}
};

// Translates to Python AttributeError
struct AttributeError : public PyTorchError {
AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
Expand All @@ -323,14 +314,6 @@ struct AttributeError : public PyTorchError {
}
};

// Translates to Python LinAlgError
struct LinAlgError : public PyTorchError {
LinAlgError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
PyObject* python_type() override {
return THPException_LinAlgError;
}
};

// ATen warning handler for Python
struct PyWarningHandler {
// Move actual handler into a separate class with a noexcept
Expand Down
26 changes: 14 additions & 12 deletions torch/csrc/autograd/python_variable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -607,10 +607,11 @@ static PyObject* THPVariable_as_subclass(
ParsedArgs<1> parsed_args{};
auto r = parser.parse(_self, args, kwargs, parsed_args);
PyObject* cls = r.pyobject(0);
if (!PyType_Check(cls)) {
throw torch::TypeError(
"cls must be a type (got %s)", Py_TYPE(cls)->tp_name);
}
TORCH_CHECK_TYPE(
PyType_Check(cls),
"cls must be a type (got ",
Py_TYPE(cls)->tp_name,
")");
return THPVariable_NewWithVar(
(PyTypeObject*)cls,
self.alias(),
Expand All @@ -629,10 +630,11 @@ static PyObject* THPVariable_make_subclass(
ParsedArgs<7> parsed_args{};
auto r = parser.parse(args, kwargs, parsed_args);
PyObject* cls = r.pyobject(0);
if (!PyType_Check(cls)) {
throw torch::TypeError(
"cls must be a type (got %s)", Py_TYPE(cls)->tp_name);
}
TORCH_CHECK_TYPE(
PyType_Check(cls),
"cls must be a type (got ",
Py_TYPE(cls)->tp_name,
")");
// guard completely turns off torch dispatch modes, doesn't just pop off the
// stack
torch_dispatch_mode::StashTorchDispatchStackGuard td_g;
Expand Down Expand Up @@ -948,10 +950,10 @@ int THPVariable_set_data(THPVariable* self, PyObject* data, void* unused) {
}
TORCH_CHECK(
data, "Deleting tensor data is not allowed. Delete tensor instead!");
if (!THPVariable_Check(data)) {
throw torch::TypeError(
"Variable data has to be a tensor, but got %s", Py_TYPE(data)->tp_name);
}
TORCH_CHECK_TYPE(
THPVariable_Check(data),
"Variable data has to be a tensor, but got ",
Py_TYPE(data)->tp_name);

THPVariable_Unpack(self).set_data(THPVariable_Unpack(data));
return 0;
Expand Down
22 changes: 12 additions & 10 deletions torch/csrc/cuda/python_nccl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,6 @@
#include <c10/cuda/CUDAGuard.h>
#include <c10/util/irange.h>

#include <sstream>
#include <unordered_map>

using namespace at;
using namespace torch;
using namespace torch::cuda::nccl;
Expand Down Expand Up @@ -289,9 +286,11 @@ PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args) {
}

static inline at::Tensor extract_tensor(PyObject* obj) {
if (!THPVariable_Check(obj)) {
throw torch::TypeError("expected Tensor (got %s)", Py_TYPE(obj)->tp_name);
}
TORCH_CHECK_TYPE(
THPVariable_Check(obj),
"expected Tensor (got ",
Py_TYPE(obj)->tp_name,
")");
return THPVariable_Unpack(obj);
}

Expand All @@ -307,10 +306,13 @@ static inline std::vector<at::Tensor> extract_tensors(PyObject* obj) {
}
for (Py_ssize_t i = 0; i < length; i++) {
PyObject* item = PySequence_Fast_GET_ITEM(seq.get(), i);
if (!THPVariable_Check(item)) {
throw torch::TypeError(
"expected Tensor at %d (got %s)", (int)i, Py_TYPE(item)->tp_name);
}
TORCH_CHECK_TYPE(
THPVariable_Check(item),
"expected Tensor at ",
i,
" (got ",
Py_TYPE(item)->tp_name,
")");
list.emplace_back(THPVariable_Unpack(item));
}
return list;
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/jit/python/script_init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <torch/csrc/jit/serialization/import.h>
#include <torch/csrc/jit/testing/file_check.h>

#include <c10/util/Exception.h>
#include <c10/util/intrusive_ptr.h>
#include <c10/util/irange.h>
#include <torch/csrc/jit/frontend/parser.h>
Expand Down Expand Up @@ -972,7 +973,7 @@ void initJitScriptBindings(PyObject* module) {
[mm_name](const Object& self, py::args args, py::kwargs kwargs) {
auto method = self.find_method(mm_name);
if (!method) {
throw NotImplementedError(
throw c10::NotImplementedError(
"'%s' is not implemented for %s",
mm_name,
self.type()->str().c_str());
Expand Down
9 changes: 4 additions & 5 deletions torch/csrc/python_dimname.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,10 @@ at::Dimname THPDimname_parse(PyObject* obj) {
return at::Dimname::wildcard();
}

if (!THPUtils_checkString(obj)) {
throw torch::TypeError(
"expected None or string for Dimname but got %s",
Py_TYPE(obj)->tp_name);
}
TORCH_CHECK_TYPE(
THPUtils_checkString(obj),
"expected None or string for Dimname but got ",
Py_TYPE(obj)->tp_name);

if (!THPUtils_isInterned(obj)) {
// internStringInPlace decrefs obj and increfs the result. Because we're
Expand Down
27 changes: 12 additions & 15 deletions torch/csrc/tensor/python_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,21 +64,17 @@ static Backend default_backend = Backend::CPU;
static void py_bind_tensor_types(
const std::vector<PyTensorType*>& tensor_types);

static TypeError unavailable_type(const PyTensorType& type) {
return TypeError(
"type %s not available. Torch not compiled with CUDA enabled.",
type.name);
}

static PyObject* Tensor_new(
PyTypeObject* type,
PyObject* args,
PyObject* kwargs) {
HANDLE_TH_ERRORS
auto& tensor_type = *((PyTensorType*)type);
if (tensor_type.is_cuda && !torch::utils::cuda_enabled()) {
throw unavailable_type(tensor_type);
}
TORCH_CHECK_TYPE(
!tensor_type.is_cuda || torch::utils::cuda_enabled(),
"type ",
tensor_type.name,
" not available. Torch not compiled with CUDA enabled.")
if (tensor_type.is_cuda) {
TORCH_WARN_ONCE(
"The torch.cuda.*DtypeTensor constructors are no longer recommended. "
Expand Down Expand Up @@ -249,9 +245,8 @@ static THPObjectPtr get_storage_obj(Backend backend, ScalarType dtype) {
auto storage_name = std::string(toString(dtype)) + "Storage";
THPObjectPtr storage(
PyObject_GetAttrString(module_obj.get(), storage_name.c_str()));
if (!storage.get()) {
throw TypeError("couldn't find storage object %s", storage_name.c_str());
}
TORCH_CHECK_TYPE(
storage.get(), "couldn't find storage object ", storage_name);
return storage;
}

Expand Down Expand Up @@ -455,9 +450,11 @@ void py_set_default_tensor_type(PyObject* obj) {
PyTensorType_Check(obj),
"invalid type object: only floating-point types are supported as the default type");
PyTensorType* type = (PyTensorType*)obj;
if (type->is_cuda && !torch::utils::cuda_enabled()) {
throw unavailable_type(*type);
}
TORCH_CHECK_TYPE(
!type->is_cuda || torch::utils::cuda_enabled(),
"type ",
type->name,
" not available. Torch not compiled with CUDA enabled.")
set_default_tensor_type(type->get_backend(), type->get_scalar_type());
}

Expand Down
9 changes: 6 additions & 3 deletions torch/csrc/utils/python_arg_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -742,10 +742,13 @@ bool is_tensor_list_and_append_overloaded(
tuple ? PyTuple_GET_ITEM(obj, idx) : PyList_GET_ITEM(obj, idx);
if (!is_tensor_and_append_overloaded(iobj, overloaded_args)) {
if (throw_error) {
throw TypeError(
"expected Tensor as element %d in argument %d, but got %s",
static_cast<int>(idx),
TORCH_CHECK_TYPE(
false,
"expected Tensor as element ",
idx,
" in argument ",
argnum,
", but got ",
Py_TYPE(iobj)->tp_name);
}
return false;
Expand Down
56 changes: 28 additions & 28 deletions torch/csrc/utils/tensor_apply.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,27 +67,26 @@ const Tensor& apply_(const Tensor& self, PyObject* fn) {
if (self.is_meta()) {
return self; // Just skip
}
if (!self.device().is_cpu()) {
throw TypeError("apply_ is only implemented on CPU tensors");
}
TORCH_CHECK_TYPE(
self.device().is_cpu(), "apply_ is only implemented on CPU tensors");
auto scalarType = self.scalar_type();
recursive_apply<1>(self.sizes(), scalarType, 0, fn, {{self}});
return self;
}

const Tensor& map_(const Tensor& self, const Tensor& other_, PyObject* fn) {
if (!other_.options().type_equal(self.options())) {
throw TypeError(
"map_: expected %s for 'other' (got %s)",
self.toString().c_str(),
other_.toString().c_str());
}
TORCH_CHECK_TYPE(
other_.options().type_equal(self.options()),
"map_: expected ",
self.toString(),
" for 'other' (got ",
other_.toString(),
")");
if (self.is_meta()) {
return self; // Just skip
}
if (!self.device().is_cpu()) {
throw TypeError("map_ is only implemented on CPU tensors");
}
TORCH_CHECK_TYPE(
self.device().is_cpu(), "map_ is only implemented on CPU tensors");
c10::MaybeOwned<Tensor> other = expand_inplace(self, other_, "map_");
auto scalarType = self.scalar_type();
recursive_apply<2>(self.sizes(), scalarType, 0, fn, {{self, *other}});
Expand All @@ -99,25 +98,26 @@ const Tensor& map2_(
const Tensor& x_,
const Tensor& y_,
PyObject* fn) {
if (!x_.options().type_equal(self.options())) {
throw TypeError(
"map2_: expected %s for argument 'x' (got %s)",
self.toString().c_str(),
x_.toString().c_str());
}
if (!y_.options().type_equal(self.options())) {
throw TypeError(
"map2_: expected %s for argument 'y' (got %s)",
self.toString().c_str(),
y_.toString().c_str());
}
TORCH_CHECK_TYPE(
x_.options().type_equal(self.options()),
"map2_: expected ",
self.toString(),
" for argument 'x' (got ",
x_.toString(),
")");
TORCH_CHECK_TYPE(
y_.options().type_equal(self.options()),
"map2_: expected ",
self.toString(),
" for argument 'y' (got ",
y_.toString(),
")");
if (self.is_meta()) {
return self; // Just skip
}
if (!self.device().is_cpu() || !x_.device().is_cpu() ||
!y_.device().is_cpu()) {
throw TypeError("map2_ is only implemented on CPU tensors");
}
TORCH_CHECK_TYPE(
(self.device().is_cpu() && x_.device().is_cpu() && y_.device().is_cpu()),
"map2_ is only implemented on CPU tensors");
auto others = expand_inplace(self, x_, y_, "map2_");
auto scalarType = self.scalar_type();
recursive_apply<3>(
Expand Down
8 changes: 5 additions & 3 deletions torch/csrc/utils/tensor_numpy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,11 @@ at::Tensor tensor_from_numpy(
if (!is_numpy_available()) {
throw std::runtime_error("Numpy is not available");
}
if (!PyArray_Check(obj)) {
throw TypeError("expected np.ndarray (got %s)", Py_TYPE(obj)->tp_name);
}
TORCH_CHECK_TYPE(
PyArray_Check(obj),
"expected np.ndarray (got ",
Py_TYPE(obj)->tp_name,
")");
auto array = (PyArrayObject*)obj;

// warn_if_not_writable is true when a copy of numpy variable is created.
Expand Down

0 comments on commit 2b5a201

Please sign in to comment.