misc: Merge branch v20.1.0.3 hotfix into develop
[gem5.git] / ext / pybind11 / include / pybind11 / eigen.h
1 /*
2 pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices
3
4 Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
5
6 All rights reserved. Use of this source code is governed by a
7 BSD-style license that can be found in the LICENSE file.
8 */
9
10 #pragma once
11
12 #include "numpy.h"
13
14 #if defined(__INTEL_COMPILER)
15 # pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
16 #elif defined(__GNUG__) || defined(__clang__)
17 # pragma GCC diagnostic push
18 # pragma GCC diagnostic ignored "-Wconversion"
19 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
20 # ifdef __clang__
21 // Eigen generates a bunch of implicit-copy-constructor-is-deprecated warnings with -Wdeprecated
22 // under Clang, so disable that warning here:
23 # pragma GCC diagnostic ignored "-Wdeprecated"
24 # endif
25 # if __GNUC__ >= 7
26 # pragma GCC diagnostic ignored "-Wint-in-bool-context"
27 # endif
28 #endif
29
30 #if defined(_MSC_VER)
31 # pragma warning(push)
32 # pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
33 # pragma warning(disable: 4996) // warning C4996: std::unary_negate is deprecated in C++17
34 #endif
35
36 #include <Eigen/Core>
37 #include <Eigen/SparseCore>
38
39 // Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
40 // move constructors that break things. We could detect this an explicitly copy, but an extra copy
41 // of matrices seems highly undesirable.
42 static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7");
43
44 PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
45
46 // Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
47 using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
48 template <typename MatrixType> using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
49 template <typename MatrixType> using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
50
51 PYBIND11_NAMESPACE_BEGIN(detail)
52
53 #if EIGEN_VERSION_AT_LEAST(3,3,0)
54 using EigenIndex = Eigen::Index;
55 #else
56 using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
57 #endif
58
59 // Matches Eigen::Map, Eigen::Ref, blocks, etc:
60 template <typename T> using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>, std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
61 template <typename T> using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
62 template <typename T> using is_eigen_dense_plain = all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
63 template <typename T> using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
64 // Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
65 // basically covers anything that can be assigned to a dense matrix but that don't have a typical
66 // matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
67 // SelfAdjointView fall into this category.
68 template <typename T> using is_eigen_other = all_of<
69 is_template_base_of<Eigen::EigenBase, T>,
70 negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>
71 >;
72
73 // Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
74 template <bool EigenRowMajor> struct EigenConformable {
75 bool conformable = false;
76 EigenIndex rows = 0, cols = 0;
77 EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
78 bool negativestrides = false; // If true, do not use stride!
79
80 EigenConformable(bool fits = false) : conformable{fits} {}
81 // Matrix type:
82 EigenConformable(EigenIndex r, EigenIndex c,
83 EigenIndex rstride, EigenIndex cstride) :
84 conformable{true}, rows{r}, cols{c} {
85 // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
86 if (rstride < 0 || cstride < 0) {
87 negativestrides = true;
88 } else {
89 stride = {EigenRowMajor ? rstride : cstride /* outer stride */,
90 EigenRowMajor ? cstride : rstride /* inner stride */ };
91 }
92 }
93 // Vector type:
94 EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
95 : EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {}
96
97 template <typename props> bool stride_compatible() const {
98 // To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
99 // matching strides, or a dimension size of 1 (in which case the stride value is irrelevant)
100 return
101 !negativestrides &&
102 (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() ||
103 (EigenRowMajor ? cols : rows) == 1) &&
104 (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() ||
105 (EigenRowMajor ? rows : cols) == 1);
106 }
107 operator bool() const { return conformable; }
108 };
109
110 template <typename Type> struct eigen_extract_stride { using type = Type; };
111 template <typename PlainObjectType, int MapOptions, typename StrideType>
112 struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> { using type = StrideType; };
113 template <typename PlainObjectType, int Options, typename StrideType>
114 struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> { using type = StrideType; };
115
116 // Helper struct for extracting information from an Eigen type
117 template <typename Type_> struct EigenProps {
118 using Type = Type_;
119 using Scalar = typename Type::Scalar;
120 using StrideType = typename eigen_extract_stride<Type>::type;
121 static constexpr EigenIndex
122 rows = Type::RowsAtCompileTime,
123 cols = Type::ColsAtCompileTime,
124 size = Type::SizeAtCompileTime;
125 static constexpr bool
126 row_major = Type::IsRowMajor,
127 vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
128 fixed_rows = rows != Eigen::Dynamic,
129 fixed_cols = cols != Eigen::Dynamic,
130 fixed = size != Eigen::Dynamic, // Fully-fixed size
131 dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
132
133 template <EigenIndex i, EigenIndex ifzero> using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
134 static constexpr EigenIndex inner_stride = if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
135 outer_stride = if_zero<StrideType::OuterStrideAtCompileTime,
136 vector ? size : row_major ? cols : rows>::value;
137 static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
138 static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
139 static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
140
141 // Takes an input array and determines whether we can make it fit into the Eigen type. If
142 // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
143 // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
144 static EigenConformable<row_major> conformable(const array &a) {
145 const auto dims = a.ndim();
146 if (dims < 1 || dims > 2)
147 return false;
148
149 if (dims == 2) { // Matrix type: require exact match (or dynamic)
150
151 EigenIndex
152 np_rows = a.shape(0),
153 np_cols = a.shape(1),
154 np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
155 np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
156 if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))
157 return false;
158
159 return {np_rows, np_cols, np_rstride, np_cstride};
160 }
161
162 // Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever
163 // is used, we want the (single) numpy stride value.
164 const EigenIndex n = a.shape(0),
165 stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
166
167 if (vector) { // Eigen type is a compile-time vector
168 if (fixed && size != n)
169 return false; // Vector size mismatch
170 return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
171 }
172 else if (fixed) {
173 // The type has a fixed size, but is not a vector: abort
174 return false;
175 }
176 else if (fixed_cols) {
177 // Since this isn't a vector, cols must be != 1. We allow this only if it exactly
178 // equals the number of elements (rows is Dynamic, and so 1 row is allowed).
179 if (cols != n) return false;
180 return {1, n, stride};
181 }
182 else {
183 // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
184 if (fixed_rows && rows != n) return false;
185 return {n, 1, stride};
186 }
187 }
188
189 static constexpr bool show_writeable = is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
190 static constexpr bool show_order = is_eigen_dense_map<Type>::value;
191 static constexpr bool show_c_contiguous = show_order && requires_row_major;
192 static constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major;
193
194 static constexpr auto descriptor =
195 _("numpy.ndarray[") + npy_format_descriptor<Scalar>::name +
196 _("[") + _<fixed_rows>(_<(size_t) rows>(), _("m")) +
197 _(", ") + _<fixed_cols>(_<(size_t) cols>(), _("n")) +
198 _("]") +
199 // For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to be
200 // satisfied: writeable=True (for a mutable reference), and, depending on the map's stride
201 // options, possibly f_contiguous or c_contiguous. We include them in the descriptor output
202 // to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to
203 // see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you
204 // *gave* a numpy.ndarray of the right type and dimensions.
205 _<show_writeable>(", flags.writeable", "") +
206 _<show_c_contiguous>(", flags.c_contiguous", "") +
207 _<show_f_contiguous>(", flags.f_contiguous", "") +
208 _("]");
209 };
210
211 // Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
212 // otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
213 template <typename props> handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
214 constexpr ssize_t elem_size = sizeof(typename props::Scalar);
215 array a;
216 if (props::vector)
217 a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base);
218 else
219 a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() },
220 src.data(), base);
221
222 if (!writeable)
223 array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
224
225 return a.release();
226 }
227
228 // Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
229 // reference the Eigen object's data with `base` as the python-registered base class (if omitted,
230 // the base will be set to None, and lifetime management is up to the caller). The numpy array is
231 // non-writeable if the given type is const.
232 template <typename props, typename Type>
233 handle eigen_ref_array(Type &src, handle parent = none()) {
234 // none here is to get past array's should-we-copy detection, which currently always
235 // copies when there is no base. Setting the base to None should be harmless.
236 return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
237 }
238
239 // Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy
240 // array that references the encapsulated data with a python-side reference to the capsule to tie
241 // its destruction to that of any dependent python objects. Const-ness is determined by whether or
242 // not the Type of the pointer given is const.
243 template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
244 handle eigen_encapsulate(Type *src) {
245 capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
246 return eigen_ref_array<props>(*src, base);
247 }
248
249 // Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
250 // types.
251 template<typename Type>
252 struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
253 using Scalar = typename Type::Scalar;
254 using props = EigenProps<Type>;
255
256 bool load(handle src, bool convert) {
257 // If we're in no-convert mode, only load if given an array of the correct type
258 if (!convert && !isinstance<array_t<Scalar>>(src))
259 return false;
260
261 // Coerce into an array, but don't do type conversion yet; the copy below handles it.
262 auto buf = array::ensure(src);
263
264 if (!buf)
265 return false;
266
267 auto dims = buf.ndim();
268 if (dims < 1 || dims > 2)
269 return false;
270
271 auto fits = props::conformable(buf);
272 if (!fits)
273 return false;
274
275 // Allocate the new type, then build a numpy reference into it
276 value = Type(fits.rows, fits.cols);
277 auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
278 if (dims == 1) ref = ref.squeeze();
279 else if (ref.ndim() == 1) buf = buf.squeeze();
280
281 int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
282
283 if (result < 0) { // Copy failed!
284 PyErr_Clear();
285 return false;
286 }
287
288 return true;
289 }
290
291 private:
292
293 // Cast implementation
294 template <typename CType>
295 static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
296 switch (policy) {
297 case return_value_policy::take_ownership:
298 case return_value_policy::automatic:
299 return eigen_encapsulate<props>(src);
300 case return_value_policy::move:
301 return eigen_encapsulate<props>(new CType(std::move(*src)));
302 case return_value_policy::copy:
303 return eigen_array_cast<props>(*src);
304 case return_value_policy::reference:
305 case return_value_policy::automatic_reference:
306 return eigen_ref_array<props>(*src);
307 case return_value_policy::reference_internal:
308 return eigen_ref_array<props>(*src, parent);
309 default:
310 throw cast_error("unhandled return_value_policy: should not happen!");
311 };
312 }
313
314 public:
315
316 // Normal returned non-reference, non-const value:
317 static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
318 return cast_impl(&src, return_value_policy::move, parent);
319 }
320 // If you return a non-reference const, we mark the numpy array readonly:
321 static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
322 return cast_impl(&src, return_value_policy::move, parent);
323 }
324 // lvalue reference return; default (automatic) becomes copy
325 static handle cast(Type &src, return_value_policy policy, handle parent) {
326 if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
327 policy = return_value_policy::copy;
328 return cast_impl(&src, policy, parent);
329 }
330 // const lvalue reference return; default (automatic) becomes copy
331 static handle cast(const Type &src, return_value_policy policy, handle parent) {
332 if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
333 policy = return_value_policy::copy;
334 return cast(&src, policy, parent);
335 }
336 // non-const pointer return
337 static handle cast(Type *src, return_value_policy policy, handle parent) {
338 return cast_impl(src, policy, parent);
339 }
340 // const pointer return
341 static handle cast(const Type *src, return_value_policy policy, handle parent) {
342 return cast_impl(src, policy, parent);
343 }
344
345 static constexpr auto name = props::descriptor;
346
347 operator Type*() { return &value; }
348 operator Type&() { return value; }
349 operator Type&&() && { return std::move(value); }
350 template <typename T> using cast_op_type = movable_cast_op_type<T>;
351
352 private:
353 Type value;
354 };
355
356 // Base class for casting reference/map/block/etc. objects back to python.
357 template <typename MapType> struct eigen_map_caster {
358 private:
359 using props = EigenProps<MapType>;
360
361 public:
362
363 // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
364 // to stay around), but we'll allow it under the assumption that you know what you're doing (and
365 // have an appropriate keep_alive in place). We return a numpy array pointing directly at the
366 // ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note
367 // that this means you need to ensure you don't destroy the object in some other way (e.g. with
368 // an appropriate keep_alive, or with a reference to a statically allocated matrix).
369 static handle cast(const MapType &src, return_value_policy policy, handle parent) {
370 switch (policy) {
371 case return_value_policy::copy:
372 return eigen_array_cast<props>(src);
373 case return_value_policy::reference_internal:
374 return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
375 case return_value_policy::reference:
376 case return_value_policy::automatic:
377 case return_value_policy::automatic_reference:
378 return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
379 default:
380 // move, take_ownership don't make any sense for a ref/map:
381 pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
382 }
383 }
384
385 static constexpr auto name = props::descriptor;
386
387 // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
388 // types but not bound arguments). We still provide them (with an explicitly delete) so that
389 // you end up here if you try anyway.
390 bool load(handle, bool) = delete;
391 operator MapType() = delete;
392 template <typename> using cast_op_type = MapType;
393 };
394
395 // We can return any map-like object (but can only load Refs, specialized next):
396 template <typename Type> struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>>
397 : eigen_map_caster<Type> {};
398
399 // Loader for Ref<...> arguments. See the documentation for info on how to make this work without
400 // copying (it requires some extra effort in many cases).
401 template <typename PlainObjectType, typename StrideType>
402 struct type_caster<
403 Eigen::Ref<PlainObjectType, 0, StrideType>,
404 enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>
405 > : public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
406 private:
407 using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
408 using props = EigenProps<Type>;
409 using Scalar = typename props::Scalar;
410 using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
411 using Array = array_t<Scalar, array::forcecast |
412 ((props::row_major ? props::inner_stride : props::outer_stride) == 1 ? array::c_style :
413 (props::row_major ? props::outer_stride : props::inner_stride) == 1 ? array::f_style : 0)>;
414 static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
415 // Delay construction (these have no default constructor)
416 std::unique_ptr<MapType> map;
417 std::unique_ptr<Type> ref;
418 // Our array. When possible, this is just a numpy array pointing to the source data, but
419 // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible
420 // layout, or is an array of a type that needs to be converted). Using a numpy temporary
421 // (rather than an Eigen temporary) saves an extra copy when we need both type conversion and
422 // storage order conversion. (Note that we refuse to use this temporary copy when loading an
423 // argument for a Ref<M> with M non-const, i.e. a read-write reference).
424 Array copy_or_ref;
425 public:
426 bool load(handle src, bool convert) {
427 // First check whether what we have is already an array of the right type. If not, we can't
428 // avoid a copy (because the copy is also going to do type conversion).
429 bool need_copy = !isinstance<Array>(src);
430
431 EigenConformable<props::row_major> fits;
432 if (!need_copy) {
433 // We don't need a converting copy, but we also need to check whether the strides are
434 // compatible with the Ref's stride requirements
435 auto aref = reinterpret_borrow<Array>(src);
436
437 if (aref && (!need_writeable || aref.writeable())) {
438 fits = props::conformable(aref);
439 if (!fits) return false; // Incompatible dimensions
440 if (!fits.template stride_compatible<props>())
441 need_copy = true;
442 else
443 copy_or_ref = std::move(aref);
444 }
445 else {
446 need_copy = true;
447 }
448 }
449
450 if (need_copy) {
451 // We need to copy: If we need a mutable reference, or we're not supposed to convert
452 // (either because we're in the no-convert overload pass, or because we're explicitly
453 // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
454 if (!convert || need_writeable) return false;
455
456 Array copy = Array::ensure(src);
457 if (!copy) return false;
458 fits = props::conformable(copy);
459 if (!fits || !fits.template stride_compatible<props>())
460 return false;
461 copy_or_ref = std::move(copy);
462 loader_life_support::add_patient(copy_or_ref);
463 }
464
465 ref.reset();
466 map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner())));
467 ref.reset(new Type(*map));
468
469 return true;
470 }
471
472 operator Type*() { return ref.get(); }
473 operator Type&() { return *ref; }
474 template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;
475
476 private:
477 template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
478 Scalar *data(Array &a) { return a.mutable_data(); }
479
480 template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
481 const Scalar *data(Array &a) { return a.data(); }
482
483 // Attempt to figure out a constructor of `Stride` that will work.
484 // If both strides are fixed, use a default constructor:
485 template <typename S> using stride_ctor_default = bool_constant<
486 S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
487 std::is_default_constructible<S>::value>;
488 // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
489 // Eigen::Stride, and use it:
490 template <typename S> using stride_ctor_dual = bool_constant<
491 !stride_ctor_default<S>::value && std::is_constructible<S, EigenIndex, EigenIndex>::value>;
492 // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
493 // it (passing whichever stride is dynamic).
494 template <typename S> using stride_ctor_outer = bool_constant<
495 !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
496 S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic &&
497 std::is_constructible<S, EigenIndex>::value>;
498 template <typename S> using stride_ctor_inner = bool_constant<
499 !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
500 S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
501 std::is_constructible<S, EigenIndex>::value>;
502
503 template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
504 static S make_stride(EigenIndex, EigenIndex) { return S(); }
505 template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
506 static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); }
507 template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
508 static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); }
509 template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
510 static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); }
511
512 };
513
514 // type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
515 // EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
516 // load() is not supported, but we can cast them into the python domain by first copying to a
517 // regular Eigen::Matrix, then casting that.
518 template <typename Type>
519 struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
520 protected:
521 using Matrix = Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
522 using props = EigenProps<Matrix>;
523 public:
524 static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
525 handle h = eigen_encapsulate<props>(new Matrix(src));
526 return h;
527 }
528 static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); }
529
530 static constexpr auto name = props::descriptor;
531
532 // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
533 // types but not bound arguments). We still provide them (with an explicitly delete) so that
534 // you end up here if you try anyway.
535 bool load(handle, bool) = delete;
536 operator Type() = delete;
537 template <typename> using cast_op_type = Type;
538 };
539
540 template<typename Type>
541 struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
542 using Scalar = typename Type::Scalar;
543 using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
544 using Index = typename Type::Index;
545 static constexpr bool rowMajor = Type::IsRowMajor;
546
547 bool load(handle src, bool) {
548 if (!src)
549 return false;
550
551 auto obj = reinterpret_borrow<object>(src);
552 object sparse_module = module_::import("scipy.sparse");
553 object matrix_type = sparse_module.attr(
554 rowMajor ? "csr_matrix" : "csc_matrix");
555
556 if (!type::handle_of(obj).is(matrix_type)) {
557 try {
558 obj = matrix_type(obj);
559 } catch (const error_already_set &) {
560 return false;
561 }
562 }
563
564 auto values = array_t<Scalar>((object) obj.attr("data"));
565 auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
566 auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
567 auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
568 auto nnz = obj.attr("nnz").cast<Index>();
569
570 if (!values || !innerIndices || !outerIndices)
571 return false;
572
573 value = Eigen::MappedSparseMatrix<Scalar, Type::Flags, StorageIndex>(
574 shape[0].cast<Index>(), shape[1].cast<Index>(), nnz,
575 outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data());
576
577 return true;
578 }
579
580 static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
581 const_cast<Type&>(src).makeCompressed();
582
583 object matrix_type = module_::import("scipy.sparse").attr(
584 rowMajor ? "csr_matrix" : "csc_matrix");
585
586 array data(src.nonZeros(), src.valuePtr());
587 array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
588 array innerIndices(src.nonZeros(), src.innerIndexPtr());
589
590 return matrix_type(
591 std::make_tuple(data, innerIndices, outerIndices),
592 std::make_pair(src.rows(), src.cols())
593 ).release();
594 }
595
596 PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", "scipy.sparse.csc_matrix[")
597 + npy_format_descriptor<Scalar>::name + _("]"));
598 };
599
600 PYBIND11_NAMESPACE_END(detail)
601 PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
602
603 #if defined(__GNUG__) || defined(__clang__)
604 # pragma GCC diagnostic pop
605 #elif defined(_MSC_VER)
606 # pragma warning(pop)
607 #endif