diff --git a/Package.swift b/Package.swift index 52fb6ff0..9b6c7715 100644 --- a/Package.swift +++ b/Package.swift @@ -352,7 +352,8 @@ let package = Package( Arch.OS.python(), ], exclude: [ - /* prefer pybindimath */ + /* metapy builds pyimath. */ + "src/python/PyBindImath", "src/python/PyImath", "src/python/PyImathTest", "src/python/PyImathNumpy", @@ -527,6 +528,7 @@ let package = Package( exclude: getConfig(for: .mpy).exclude, publicHeadersPath: "include/python", cxxSettings: [ + .headerSearchPath("include/python/PyImath"), .headerSearchPath("include/python/PyAlembic"), .headerSearchPath("include/python/PyOIIO"), ] diff --git a/Sources/MetaPy/PyImath/PyImath.cpp b/Sources/MetaPy/PyImath/PyImath.cpp new file mode 100644 index 00000000..23fab379 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImath.cpp @@ -0,0 +1,27 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImath.h" +#include "PyImathExport.h" + +namespace PyImath { + +template <> PYIMATH_EXPORT const char * BoolArray::name() { return "BoolArray"; } +template <> PYIMATH_EXPORT const char * SignedCharArray::name() { return "SignedCharArray"; } +template <> PYIMATH_EXPORT const char * UnsignedCharArray::name() { return "UnsignedCharArray"; } +template <> PYIMATH_EXPORT const char * ShortArray::name() { return "ShortArray"; } +template <> PYIMATH_EXPORT const char * UnsignedShortArray::name(){ return "UnsignedShortArray"; } +template <> PYIMATH_EXPORT const char * IntArray::name() { return "IntArray"; } +template <> PYIMATH_EXPORT const char * UnsignedIntArray::name() { return "UnsignedIntArray"; } +template <> PYIMATH_EXPORT const char * FloatArray::name() { return "FloatArray"; } +template <> PYIMATH_EXPORT const char * DoubleArray::name() { return "DoubleArray"; } +template <> PYIMATH_EXPORT const char * VIntArray::name() { return "VIntArray"; } +template <> PYIMATH_EXPORT const char * VFloatArray::name() { return "VFloatArray"; } +template <> PYIMATH_EXPORT const char * VV2iArray::name() { return "VV2iArray"; } +template <> PYIMATH_EXPORT const char * VV2fArray::name() { return "VV2fArray"; } + +} diff --git a/Sources/MetaPy/PyImath/PyImathAutovectorize.cpp b/Sources/MetaPy/PyImath/PyImathAutovectorize.cpp new file mode 100644 index 00000000..9699e631 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathAutovectorize.cpp @@ -0,0 +1,90 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathAutovectorize.h" + + +namespace PyImath { + +namespace detail { +// +// cheek possible vectorizations to ensure correctness +// +// single argument should be ((false),(true)) +// +BOOST_STATIC_ASSERT(( size::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( size::type,0>::type>::type::value == 1 )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type,0>::type::value == false )); +BOOST_STATIC_ASSERT(( size::type,1>::type>::type::value == 1 )); +BOOST_STATIC_ASSERT(( at_c::type,1>::type,0>::type::value == true )); + +// +// two argument should be ((false,false),(false,true),(true,false),(true,true)) +// +BOOST_STATIC_ASSERT(( size::type>::type::value == 4 )); +BOOST_STATIC_ASSERT(( size::type,0>::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type,0>::type::value == false )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type,1>::type::value == false )); +BOOST_STATIC_ASSERT(( size::type,1>::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,1>::type,0>::type::value == false )); +BOOST_STATIC_ASSERT(( at_c::type,1>::type,1>::type::value == true )); +BOOST_STATIC_ASSERT(( size::type,2>::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,2>::type,0>::type::value == true )); +BOOST_STATIC_ASSERT(( at_c::type,2>::type,1>::type::value == false )); +BOOST_STATIC_ASSERT(( size::type,3>::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,3>::type,0>::type::value == true )); +BOOST_STATIC_ASSERT(( at_c::type,3>::type,1>::type::value == true )); +BOOST_STATIC_ASSERT(( size::type>::type::value == 8 )); + +// +// Check disallow_vectorization for given vectorizable flags +// +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); + +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == true )); +BOOST_STATIC_ASSERT(( disallow_vectorization >::apply >::type::value == false )); + +// +// Check allowable_vectorizations, single argument not vectorizable, and two argument second argument vectorizable. +// +typedef allowable_vectorizations > AV1f; +BOOST_STATIC_ASSERT(( size::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type::value == false )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type::value == true )); +BOOST_STATIC_ASSERT(( size::type::value == 1 )); +BOOST_STATIC_ASSERT(( size::type>::type::value == 1 )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type::value == false )); + +typedef allowable_vectorizations > AV2ft; +BOOST_STATIC_ASSERT(( size::type::value == 2 )); +BOOST_STATIC_ASSERT(( size::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type::value == false )); +BOOST_STATIC_ASSERT(( at_c::type,1>::type::value == false )); +BOOST_STATIC_ASSERT(( size::type>::type::value == 2 )); +BOOST_STATIC_ASSERT(( at_c::type,0>::type::value == false )); +BOOST_STATIC_ASSERT(( at_c::type,1>::type::value == true )); + +} // namespace detail + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathBasicTypes.cpp b/Sources/MetaPy/PyImath/PyImathBasicTypes.cpp new file mode 100644 index 00000000..dfc3b4bd --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathBasicTypes.cpp @@ -0,0 +1,95 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include "PyImath.h" +#include "PyImathExport.h" +#include "PyImathBasicTypes.h" +#include "PyImathFixedArray.h" +#include "PyImathFixedVArray.h" +#include "PyImathBufferProtocol.h" + +using namespace boost::python; + +namespace PyImath { + +void +register_basicTypes() +{ + class_ bclass = BoolArray::register_("Fixed length array of bool"); + add_comparison_functions(bclass); + + class_ scclass = SignedCharArray::register_("Fixed length array of signed chars"); + add_arithmetic_math_functions(scclass); + add_mod_math_functions(scclass); + add_comparison_functions(scclass); + add_ordered_comparison_functions(scclass); + + class_ ucclass = UnsignedCharArray::register_("Fixed length array of unsigned chars"); + add_arithmetic_math_functions(ucclass); + add_mod_math_functions(ucclass); + add_comparison_functions(ucclass); + add_ordered_comparison_functions(ucclass); + add_buffer_protocol(ucclass); + + class_ sclass = ShortArray::register_("Fixed length array of shorts"); + add_arithmetic_math_functions(sclass); + add_mod_math_functions(sclass); + add_comparison_functions(sclass); + add_ordered_comparison_functions(sclass); + + class_ usclass = UnsignedShortArray::register_("Fixed length array of unsigned shorts"); + add_arithmetic_math_functions(usclass); + add_mod_math_functions(usclass); + add_comparison_functions(usclass); + add_ordered_comparison_functions(usclass); + + class_ iclass = IntArray::register_("Fixed length array of ints"); + add_arithmetic_math_functions(iclass); + add_mod_math_functions(iclass); + add_comparison_functions(iclass); + add_ordered_comparison_functions(iclass); + add_explicit_construction_from_type(iclass); + add_explicit_construction_from_type(iclass); + add_buffer_protocol(iclass); + + class_ uiclass = UnsignedIntArray::register_("Fixed length array of unsigned ints"); + add_arithmetic_math_functions(uiclass); + add_mod_math_functions(uiclass); + add_comparison_functions(uiclass); + add_ordered_comparison_functions(uiclass); + add_explicit_construction_from_type(uiclass); + add_explicit_construction_from_type(uiclass); + + class_ fclass = FloatArray::register_("Fixed length array of floats"); + add_arithmetic_math_functions(fclass); + add_pow_math_functions(fclass); + add_comparison_functions(fclass); + add_ordered_comparison_functions(fclass); + add_explicit_construction_from_type(fclass); + add_explicit_construction_from_type(fclass); + add_buffer_protocol(fclass); + + class_ dclass = DoubleArray::register_("Fixed length array of doubles"); + add_arithmetic_math_functions(dclass); + add_pow_math_functions(dclass); + add_comparison_functions(dclass); + add_ordered_comparison_functions(dclass); + add_explicit_construction_from_type(dclass); + add_explicit_construction_from_type(dclass); + add_buffer_protocol(dclass); + + class_ ivclass = VIntArray::register_("Variable fixed length array of ints"); + class_ fvclass = VFloatArray::register_("Variable fixed length array of floats"); + class_ v2ivclass = VV2iArray::register_("Variable fixed length array of V2i"); + class_ v2fvclass = VV2fArray::register_("Variable fixed length array of V2f"); + // Don't add other functionality until its defined better. +} + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathBox.cpp b/Sources/MetaPy/PyImath/PyImathBox.cpp new file mode 100644 index 00000000..cb0885a8 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathBox.cpp @@ -0,0 +1,398 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#include +#include +#include "PyImathBox.h" +#include "PyImathVec.h" +#include "PyImathMathExc.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" +#include "PyImathTask.h" +#include "PyImathBoxArrayImpl.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; +using namespace PyImath; + +template struct BoxName { static const char *value; }; +template <> const char *BoxName::value = "Box2s"; +template <> const char *BoxName::value = "Box2i"; +template <> const char *BoxName::value = "Box2i64"; +template <> const char *BoxName::value = "Box2f"; +template <> const char *BoxName::value = "Box2d"; +template <> const char *BoxName::value = "Box3s"; +template <> const char *BoxName::value = "Box3i"; +template <> const char *BoxName::value = "Box3i64"; +template <> const char *BoxName::value = "Box3f"; +template <> const char *BoxName::value = "Box3d"; + +template <> PYIMATH_EXPORT const char *PyImath::Box2sArray::name() { return "Box2sArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box2iArray::name() { return "Box2iArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box2i64Array::name() { return "Box2i64Array"; } +template <> PYIMATH_EXPORT const char *PyImath::Box2fArray::name() { return "Box2fArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box2dArray::name() { return "Box2dArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box3sArray::name() { return "Box3sArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box3iArray::name() { return "Box3iArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box3i64Array::name() { return "Box3i64Array"; } +template <> PYIMATH_EXPORT const char *PyImath::Box3fArray::name() { return "Box3fArray"; } +template <> PYIMATH_EXPORT const char *PyImath::Box3dArray::name() { return "Box3dArray"; } + +template +static Box * box2TupleConstructor1(const tuple &t) +{ + if(t.attr("__len__")() == 2) + { + // The constructor was called like this: + // Box2f ((V2f(1,2), V2f(3,4))) or + // Box2f (((1,2), (3,4))) + + PyObject *t0Obj = extract (t[0])().ptr(); + PyObject *t1Obj = extract (t[1])().ptr(); + T t0, t1; + if (V2::convert (t0Obj, &t0) && + V2::convert (t1Obj, &t1)) + { + return new Box (t0, t1); + } + + // The constructor was called like this: + // Box2f ((1,2)) + + else + { + T point; + point.x = extract(t[0]); + point.y = extract(t[1]); + return new Box(point); + } + } + else + throw std::invalid_argument ( "Invalid input to Box tuple constructor"); +} + +template +static Box * box2TupleConstructor2(const tuple &t0, const tuple &t1) +{ + if(t0.attr("__len__")() == 2 && t1.attr("__len__")() == 2) + { + T point0, point1; + point0.x = extract(t0[0]); point0.y = extract(t0[1]); + point1.x = extract(t1[0]); point1.y = extract(t1[1]); + + return new Box(point0, point1); + } + else + throw std::invalid_argument ("Invalid input to Box tuple constructor"); +} + +template +static Box *boxConstructor(const Box &box) +{ + Box *newBox = new Box; + newBox->min = box.min; + newBox->max = box.max; + + return newBox; +} + +template +static Box * box3TupleConstructor1(const tuple &t) +{ + if(t.attr("__len__")() == 3) + { + // The constructor was called like this: + // Box3f ((1,2,3)) + + T point; + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + return new Box(point); + } + + else if (t.attr("__len__")() == 2) + { + // The constructor was called like this: + // Box3f ((V3f(1,2,3), V3f(4,5,6))) or + // Box3f (((1,2,3), (4,5,6))) + + PyObject *t0Obj = extract (t[0])().ptr(); + PyObject *t1Obj = extract (t[1])().ptr(); + T t0, t1; + if (V3::convert (t0Obj, &t0) && + V3::convert (t1Obj, &t1)) + { + return new Box (t0, t1); + } + else + throw std::invalid_argument ("Invalid input to Box tuple constructor"); + } + + else + throw std::invalid_argument ("Invalid input to Box tuple constructor"); +} + +template +static Box * box3TupleConstructor2(const tuple &t0, const tuple &t1) +{ + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3) + { + T point0, point1; + point0.x = extract(t0[0]); + point0.y = extract(t0[1]); + point0.z = extract(t0[2]); + + point1.x = extract(t1[0]); + point1.y = extract(t1[1]); + point1.z = extract(t1[2]); + + return new Box(point0, point1); + } + else + throw std::invalid_argument ("Invalid input to Box tuple constructor"); +} + +template +static std::string Box_repr(const Box &box) +{ + std::stringstream stream; + typename boost::python::return_by_value::apply ::type converter; + + handle<> minObj (converter (box.min)); + handle<> minH (PYUTIL_OBJECT_REPR (minObj.get())); + std::string minReprStr = extract (minH.get()); + + handle<> maxObj (converter (box.max)); + handle<> maxH (PYUTIL_OBJECT_REPR (maxObj.get())); + std::string maxReprStr = extract (maxH.get()); + + stream << BoxName::value << "(" << minReprStr << ", " << maxReprStr << ")"; + + return stream.str(); +} + +template +static void boxSetMin(IMATH_NAMESPACE::Box &box, const T &m) +{ + box.min = m; +} + +template +static void boxSetMax(IMATH_NAMESPACE::Box &box, const T &m) +{ + box.max = m; +} + +template +static T +boxMin(IMATH_NAMESPACE::Box &box) +{ + return box.min; +} + +template +static T +boxMax(IMATH_NAMESPACE::Box &box) +{ + return box.max; +} + +template +struct IntersectsTask : public Task +{ + const IMATH_NAMESPACE::Box& box; + const PyImath::FixedArray& points; + PyImath::FixedArray& results; + + IntersectsTask(IMATH_NAMESPACE::Box& b, const PyImath::FixedArray &p, PyImath::FixedArray &r) + : box(b), points(p), results(r) {} + + void execute(size_t start, size_t end) + { + for(size_t p = start; p < end; ++p) + results[p] = box.intersects(points[p]); + } +}; + +template +struct ExtendByTask : public Task +{ + std::vector >& boxes; + const PyImath::FixedArray& points; + + ExtendByTask(std::vector >& b, const PyImath::FixedArray &p) + : boxes(b), points(p) {} + + void execute(size_t start, size_t end, int tid) + { + for(size_t p = start; p < end; ++p) + boxes[tid].extendBy(points[p]); + } + void execute(size_t start, size_t end) + { + throw std::invalid_argument ("Box::ExtendBy execute requires a thread id"); + } +}; + +template +static void +box_extendBy(IMATH_NAMESPACE::Box &box, const PyImath::FixedArray &points) +{ + size_t numBoxes = workers(); + std::vector > boxes(numBoxes); + ExtendByTask task(boxes,points); + dispatchTask(task,points.len()); + for (size_t i=0; i +PyImath::FixedArray +box_intersects(IMATH_NAMESPACE::Box& box, const PyImath::FixedArray& points) +{ + size_t numPoints = points.len(); + PyImath::FixedArray mask(numPoints); + + IntersectsTask task(box,points,mask); + dispatchTask(task,numPoints); + return mask; +} + +template +class_ > +register_Box2() +{ + void (IMATH_NAMESPACE::Box::*extendBy1)(const T&) = &IMATH_NAMESPACE::Box::extendBy; + void (IMATH_NAMESPACE::Box::*extendBy2)(const IMATH_NAMESPACE::Box&) = &IMATH_NAMESPACE::Box::extendBy; + bool (IMATH_NAMESPACE::Box::*intersects1)(const T&) const = &IMATH_NAMESPACE::Box::intersects; + bool (IMATH_NAMESPACE::Box::*intersects2)(const IMATH_NAMESPACE::Box&) const = &IMATH_NAMESPACE::Box::intersects; + const char *name = BoxName::value; + class_ > box_class(name); + box_class + .def(init<>("Box() create empty box") ) + .def(init("Box(point)create box containing the given point") ) + .def(init("Box(point,point) create box continaing min and max") ) + .def("__init__", make_constructor(box2TupleConstructor1), "Box(point) where point is a python tuple") + .def("__init__", make_constructor(box2TupleConstructor2), "Box(point,point) where point is a python tuple") + .def("__init__", make_constructor(boxConstructor)) + .def("__init__", make_constructor(boxConstructor)) + .def("__init__", make_constructor(boxConstructor)) + .def("__init__", make_constructor(boxConstructor)) + .def_readwrite("min",&Box::min) + .def_readwrite("max",&Box::max) + .def("min", &boxMin) + .def("max", &boxMax) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__repr__", &Box_repr) + .def("makeEmpty",&Box::makeEmpty,"makeEmpty() make the box empty") + .def("makeInfinite",&Box::makeInfinite,"makeInfinite() make the box cover all space") + .def("extendBy",extendBy1,"extendBy(point) extend the box by a point") + .def("extendBy",box_extendBy,"extendBy(array) extend the box the values in the array") + .def("extendBy",extendBy2,"extendBy(box) extend the box by a box") + .def("size",&Box::size,"size() size of the box") + .def("center",&Box::center,"center() center of the box") + .def("intersects",intersects1,"intersects(point) returns true if the box intersects the given point") + .def("intersects",intersects2,"intersects(box) returns true if the box intersects the given box") + .def("majorAxis",&Box::majorAxis,"majorAxis() major axis of the box") + .def("isEmpty",&Box::isEmpty,"isEmpty() returns true if the box is empty") + .def("isInfinite",&Box::isInfinite,"isInfinite() returns true if the box covers all space") + .def("hasVolume",&Box::hasVolume,"hasVolume() returns true if the box has volume") + .def("setMin",&boxSetMin,"setMin() sets the min value of the box") + .def("setMax",&boxSetMax,"setMax() sets the max value of the box") + ; + return box_class; +} + +template +static IMATH_NAMESPACE::Box +mulM44 (const IMATH_NAMESPACE::Box &b, const Matrix44 &m) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::transform (b, m); +} + +template +static const IMATH_NAMESPACE::Box & +imulM44 (IMATH_NAMESPACE::Box &b, const Matrix44 &m) +{ + MATH_EXC_ON; + b = IMATH_NAMESPACE::transform (b, m); + return b; +} + +template +class_ > +register_Box3() +{ + void (IMATH_NAMESPACE::Box::*extendBy1)(const T&) = &IMATH_NAMESPACE::Box::extendBy; + void (IMATH_NAMESPACE::Box::*extendBy2)(const IMATH_NAMESPACE::Box&) = &IMATH_NAMESPACE::Box::extendBy; + bool (IMATH_NAMESPACE::Box::*intersects1)(const T&) const = &IMATH_NAMESPACE::Box::intersects; + bool (IMATH_NAMESPACE::Box::*intersects2)(const IMATH_NAMESPACE::Box&) const = &IMATH_NAMESPACE::Box::intersects; + const char *name = BoxName::value; + class_ > box_class(name); + box_class + .def(init<>("Box() create empty box") ) + .def(init("Box(point)create box containing the given point") ) + .def(init("Box(point,point) create box continaing min and max") ) + .def("__init__", make_constructor(box3TupleConstructor1), "Box(point) where point is a python tuple") + .def("__init__", make_constructor(box3TupleConstructor2), "Box(point,point) where point is a python tuple") + .def("__init__", make_constructor(boxConstructor)) + .def("__init__", make_constructor(boxConstructor)) + .def("__init__", make_constructor(boxConstructor)) + .def("__init__", make_constructor(boxConstructor)) + .def_readwrite("min",&Box::min) + .def_readwrite("max",&Box::max) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__mul__", &mulM44) + .def("__mul__", &mulM44) + .def("__imul__", &imulM44,return_internal_reference<>()) + .def("__imul__", &imulM44,return_internal_reference<>()) + .def("min", &boxMin) + .def("max", &boxMax) + .def("__repr__", &Box_repr) + .def("makeEmpty",&Box::makeEmpty,"makeEmpty() make the box empty") + .def("makeInfinite",&Box::makeInfinite,"makeInfinite() make the box cover all space") + .def("extendBy",extendBy1,"extendBy(point) extend the box by a point") + .def("extendBy",box_extendBy,"extendBy(array) extend the box the values in the array") + .def("extendBy",extendBy2,"extendBy(box) extend the box by a box") + .def("size",&Box::size,"size() size of the box") + .def("center",&Box::center,"center() center of the box") + .def("intersects",intersects1,"intersects(point) returns true if the box intersects the given point") + .def("intersects",intersects2,"intersects(box) returns true if the box intersects the given box") + .def("intersects",box_intersects, "intersects(array) returns an int array where 0 indicates the point is not in the box and 1 indicates that it is") + .def("majorAxis",&Box::majorAxis,"majorAxis() major axis of the box") + .def("isEmpty",&Box::isEmpty,"isEmpty() returns true if the box is empty") + .def("isInfinite",&Box::isInfinite,"isInfinite() returns true if the box covers all space") + .def("hasVolume",&Box::hasVolume,"hasVolume() returns true if the box has volume") + .def("setMin",&boxSetMin,"setMin() sets the min value of the box") + .def("setMax",&boxSetMax,"setMax() sets the max value of the box") + ; + + decoratecopy(box_class); + + return box_class; +} + + +template PYIMATH_EXPORT class_ > register_Box2(); +template PYIMATH_EXPORT class_ > register_Box2(); +template PYIMATH_EXPORT class_ > register_Box2(); +template PYIMATH_EXPORT class_ > register_Box2(); +template PYIMATH_EXPORT class_ > register_Box2(); +template PYIMATH_EXPORT class_ > register_Box3(); +template PYIMATH_EXPORT class_ > register_Box3(); +template PYIMATH_EXPORT class_ > register_Box3(); +template PYIMATH_EXPORT class_ > register_Box3(); +template PYIMATH_EXPORT class_ > register_Box3(); + +} diff --git a/Sources/MetaPy/PyImath/PyImathBox2Array.cpp b/Sources/MetaPy/PyImath/PyImathBox2Array.cpp new file mode 100644 index 00000000..6020b30a --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathBox2Array.cpp @@ -0,0 +1,25 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathBoxArrayImpl.h" +#include "PyImathExport.h" + +namespace PyImath { +using namespace boost::python; + +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box2s PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box2s(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box2i PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box2i(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box2i64 PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box2i64(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box2f PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box2f(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box2d PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box2d(); } +} diff --git a/Sources/MetaPy/PyImath/PyImathBox3Array.cpp b/Sources/MetaPy/PyImath/PyImathBox3Array.cpp new file mode 100644 index 00000000..7bf3a8b1 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathBox3Array.cpp @@ -0,0 +1,25 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathBoxArrayImpl.h" +#include "PyImathExport.h" + +namespace PyImath { +using namespace boost::python; + +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); +template PYIMATH_EXPORT class_ > register_BoxArray(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box3s PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box3s(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box3i PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box3i(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box3i64 PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box3i64(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box3f PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box3f(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Box3d PyImath::FixedArrayDefaultValue::value() { return IMATH_NAMESPACE::Box3d(); } +} diff --git a/Sources/MetaPy/PyImath/PyImathBufferProtocol.cpp b/Sources/MetaPy/PyImath/PyImathBufferProtocol.cpp new file mode 100644 index 00000000..c4bce93c --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathBufferProtocol.cpp @@ -0,0 +1,415 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#include "PyImathBufferProtocol.h" +#include "PyImathExport.h" +#include "PyImathFixedArray.h" +#include "PyImathFixedArrayTraits.h" + +#include + +namespace PyImath { + +namespace { + +// A wrapper API for the buffer protocol functions to access and traverse +// the memory of a FixedArray. +// +template +class BufferAPI +{ + using T = typename ArrayT::BaseType; + static_assert (std::is_same >::value, + "BufferAPI is only valid for FixedArray classes"); + + public: + + virtual ~BufferAPI() + { delete[] shape; delete[] stride; } + + // The size, in bytes, of the smallest individual element of a buffer + // element. For example, a V3fArray is a 2D array of 4-byte floats. + Py_ssize_t atomicSize() const + { return FixedArrayAtomicSize::value; } + + // NonCopyable + BufferAPI (const BufferAPI &rhs) = delete; + BufferAPI &operator= (const BufferAPI &rhs) = delete; + + // API + virtual bool sharedBuffer() const = 0; + virtual Py_ssize_t numBytes() const = 0; + virtual bool readOnly() const = 0; + virtual void * buffer() = 0; + + protected: + + BufferAPI (const unsigned int length, const unsigned int interleave) + : dimensions (FixedArrayDimension::value), + shape (new Py_ssize_t[dimensions]), + stride (new Py_ssize_t[dimensions]) + { + shape[0] = Py_ssize_t (length); + stride[0] = atomicSize() * FixedArrayWidth::value * interleave; + for (int d=1; d::value * interleave; + stride[d] = atomicSize(); + } + } + + public: + + // The number of dimensions in the data buffer (e.g. a one dimensional + // FixedArray of V3fs would have a data buffer dimension of 2). + int dimensions; + + Py_ssize_t *shape; + Py_ssize_t *stride; +}; + + +// The SharedBuffer class is used for buffers that will be shared between +// two different Python objects, and so the FixedArray is stored internally +// as a reference. +// +template +class SharedBufferAPI : public BufferAPI +{ + public: + + using BufferAPI::atomicSize; + + explicit + SharedBufferAPI (ArrayT &a) + : BufferAPI (a.len(), a.stride()), + _orig (a) + {} + + // NonCopyable + SharedBufferAPI (const SharedBufferAPI &rhs) = delete; + SharedBufferAPI &operator= (const SharedBufferAPI &rhs) = delete; + + virtual ~SharedBufferAPI() = default; + + bool sharedBuffer() const override + { return true; } + + Py_ssize_t numBytes() const override + { return _orig.len() * atomicSize() * _orig.stride(); } + + bool readOnly() const override + { return !_orig.writable(); } + + void *buffer() override + { return static_cast (&_orig.direct_index(0)); } + + private: + + ArrayT &_orig; +}; + + +// This class exists for the case in which the Python array view +// object is writable but the source FixedArray is not. +// +template +class CopyBufferAPI : public BufferAPI +{ + public: + + using BufferAPI::atomicSize; + + explicit + CopyBufferAPI (ArrayT &a) + : BufferAPI (a.len(), a.stride()), + _copy (a) + {} + + virtual ~CopyBufferAPI() = default; + + // NonCopyable + CopyBufferAPI (const CopyBufferAPI &rhs) = delete; + CopyBufferAPI &operator= (const CopyBufferAPI &rhs) = delete; + + bool sharedBuffer() const override + { return false; } + + Py_ssize_t numBytes() const override + { return _copy.len() * atomicSize() * _copy.stride(); } + + bool readOnly() const override + { return false; } + + void *buffer() override + { return static_cast (&_copy.direct_index(0)); } + + private: + + ArrayT _copy; +}; + + +template +Py_ssize_t +bufferInfo (ArrayT &array, void **buf) +{ + *buf = static_cast (&array.direct_index(0)); + return array.len() * sizeof(typename ArrayT::BaseType); +} + + +template +Py_ssize_t +getreadbuf (PyObject *obj, Py_ssize_t segment, void **buf) +{ + if (segment != 0) + { + PyErr_SetString (PyExc_ValueError, "FixedArrays are not segmented"); + return -1; + } + + boost::python::extract eObj (obj); + if (!eObj.check()) + { + PyErr_SetString (PyExc_ValueError, "Cannot extract FixedArray"); + return -1; + } + + ArrayT array = eObj(); + return bufferInfo (array, buf); +} + + +template +Py_ssize_t +getwritebuf (PyObject *obj, Py_ssize_t segment, void **buf) +{ + PyErr_SetString + (PyExc_ValueError, + "writable buffers supported only with new-style buffer protocol."); + + return -1; +} + + +template +Py_ssize_t +getsegcount (PyObject *obj, Py_ssize_t *lenp) +{ + // FixedArrays are always in one, fixed-sized block + return 1; +} + + +template +Py_ssize_t +getcharbuf (PyObject *obj, Py_ssize_t segment, const char **charBuf) +{ + return getreadbuf (obj, segment, (void **)charBuf); +} + + +template +void +releasebuffer (PyObject *obj, Py_buffer *view) +{ + delete static_cast *>(view->internal); +} + + +template +int +getbuffer (PyObject *obj, Py_buffer *view, int flags) +{ + if (view == nullptr) + { + PyErr_SetString (PyExc_ValueError, "Buffer view is NULL"); + return -1; + } + + if ((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) + { + PyErr_SetString (PyExc_ValueError, "FORTRAN order not supported"); + return -1; + } + + boost::python::extract eObj (obj); + if (!eObj.check()) + { + PyErr_SetString (PyExc_ValueError, "Cannot extract FixedArray"); + return -1; + } + ArrayT array = eObj(); + + if (array.isMaskedReference()) + { + PyErr_SetString (PyExc_ValueError, "Buffer protocol does not support masked references"); + return -1; + } + + BufferAPI *api = nullptr; + bool writableBuffer = ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE); + if (writableBuffer && !array.writable()) + api = new CopyBufferAPI (array); + else + api = new SharedBufferAPI (array); + + view->internal = api; + view->buf = api->buffer(); + view->len = api->numBytes(); + view->readonly = api->readOnly(); + view->itemsize = api->atomicSize(); + view->suboffsets = nullptr; + + view->format = ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) + ? PyFormat() + : nullptr; + + view->strides = ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) + ? api->stride + : nullptr; + + if ((flags & PyBUF_ND) == PyBUF_ND) + { + view->ndim = api->dimensions; + view->shape = api->shape; + } + else + { + view->ndim = 0; + view->shape = nullptr; + } + + view->obj = obj; + Py_INCREF (obj); + + return 0; +} + + +// This structure serves to instantiate a PyBufferProcs instance with pointers +// to the right buffer protocol functions. +template +struct FixedArrayBufferProcs +{ + static PyBufferProcs procs; +}; + + +template +PyBufferProcs FixedArrayBufferProcs::procs = +{ +#if PY_MAJOR_VERSION == 2 + (readbufferproc) getreadbuf, + (writebufferproc) getwritebuf, + (segcountproc) getsegcount, + (charbufferproc) getcharbuf, + (getbufferproc) getbuffer, + (releasebufferproc) releasebuffer +#else // Note deprecation of support for the older style + (getbufferproc) getbuffer, + (releasebufferproc) releasebuffer +#endif +}; + +} // anonymous + + +template +void +add_buffer_protocol (boost::python::class_ &classObj) +{ + auto *typeObj = reinterpret_cast (classObj.ptr()); + typeObj->tp_as_buffer = &FixedArrayBufferProcs::procs; +#if PY_MAJOR_VERSION == 2 + typeObj->tp_flags |= (Py_TPFLAGS_HAVE_NEWBUFFER | Py_TPFLAGS_HAVE_GETCHARBUFFER); +#endif +} + + +template +ArrayT * +fixedArrayFromBuffer (PyObject *obj) +{ + if (!PyObject_CheckBuffer (obj)) + throw std::invalid_argument ("Python object does not support the buffer protocol"); + + // Request a strided buffer with type & dimensions. + Py_buffer view; + memset(&view, 0, sizeof(view)); + if (PyObject_GetBuffer (obj, &view, PyBUF_FORMAT | PyBUF_STRIDES) != 0) + { + throw std::logic_error ("Failed to get dimensioned, typed buffer"); + } + + // We have a buffer. Check that the type matches. + if (!view.format || + view.format[0] == '>' || + view.format[0] == '!' || + view.format[0] == '=' || + view.format[0] == '^') + { + PyBuffer_Release(&view); + throw std::invalid_argument ("Unsupported buffer type"); + } + + ArrayT *array = new ArrayT (view.shape[0], PyImath::UNINITIALIZED); + memcpy (reinterpret_cast(&array->direct_index(0)), view.buf, view.len); + PyBuffer_Release(&view); + + return array; +} + +/////////////////////////////////////////////////////////////////////////////// + +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > &classObj); + +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); +template PYIMATH_EXPORT void add_buffer_protocol (boost::python::class_ > > &classObj); + +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray >* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray* fixedArrayFromBuffer (PyObject *obj); +template PYIMATH_EXPORT FixedArray* fixedArrayFromBuffer (PyObject *obj); + +} diff --git a/Sources/MetaPy/PyImath/PyImathColor3.cpp b/Sources/MetaPy/PyImath/PyImathColor3.cpp new file mode 100644 index 00000000..0b8dfeec --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathColor3.cpp @@ -0,0 +1,652 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +// +// This .C file was turned into a header file so that instantiations +// of the various V3* types can be spread across multiple files in +// order to work around MSVC limitations. +// + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathColor.h" +#include "PyImathVec.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" +#include "PyImathColor3ArrayImpl.h" + +namespace PyImath { +template <> const char *PyImath::C3cArray::name() { return "C3cArray"; } +template <> const char *PyImath::C3fArray::name() { return "C3fArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct Color3Name { static const char *value; }; +template<> const char *Color3Name::value = "Color3c"; +template<> const char *Color3Name::value = "Color3f"; + +// create a new default constructor that initializes Color3 to zero. +template +static Color3 * Color3_construct_default() +{ + return new Color3(T(0),T(0),T(0)); +} + +template +static Color3 * Color3_component_construct1(S x, S y, S z) +{ + // Assigning a floating point value to an integer type can cause a + // float-point error, which we want to translate into an exception. + + MATH_EXC_ON; + + if(strcmp(Color3Name::value, "Color3c") == 0) + { + unsigned char r = (unsigned char) x; + unsigned char g = (unsigned char) y; + unsigned char b = (unsigned char) z; + + return new Color3(r,g,b); + } + else + return new Color3(T (x), T(y), T(z)); +} + +template +static Color3 * Color3_component_construct2(S x) +{ + MATH_EXC_ON; + + if(strcmp(Color3Name::value, "Color3c") == 0) + { + unsigned char u = (unsigned char) x; + + return new Color3(u,u,u); + } + else + return new Color3(T(x), T(x), T(x)); +} + +template +static Color3 * Color3_color_construct(const Color3 &c) +{ + MATH_EXC_ON; + + if(strcmp(Color3Name::value, "Color3c") == 0) + { + unsigned char r = (unsigned char) c.x; + unsigned char g = (unsigned char) c.y; + unsigned char b = (unsigned char) c.z; + + return new Color3(r,g,b); + } + else + return new Color3(T (c.x), T(c.y), T(c.z)); +} + +template +static Color3 * Color3_vector_construct(const Vec3 &c) +{ + MATH_EXC_ON; + + if(strcmp(Color3Name::value, "Color3c") == 0) + { + unsigned char r = (unsigned char) c.x; + unsigned char g = (unsigned char) c.y; + unsigned char b = (unsigned char) c.z; + + return new Color3(r,g,b); + } + else + return new Color3(T (c.x), T(c.y), T(c.z)); +} + + +template +static std::string +color3_str(const Color3 &v) +{ + std::stringstream stream; + if(strcmp(Color3Name::value, "Color3c") == 0) + { + int r = int(v.x); + int g = int(v.y); + int b = int(v.z); + + stream << Color3Name::value << "(" << r << ", " << g << ", " << b << ")"; + return stream.str(); + } + else + { + stream << Color3Name::value << "(" << v.x << ", " << v.y << ", " << v.z << ")"; + return stream.str(); + } +} + +// Non-specialized repr is same as str +template +static std::string +color3_repr(const Color3 &v) +{ + return color3_str(v); +} + +// Specialization for float to full precision +template <> +std::string +color3_repr(const Color3 &v) +{ + return (boost::format("%s(%.9g, %.9g, %.9g)") + % Color3Name::value % v.x % v.y % v.z).str(); +} + +// No specialization for double, since we don't instantiate Color3d + + +template +static Color3 * Color3_construct_tuple(const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + return new Color3(extract(t[0]), extract(t[1]), extract(t[2])); + } + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 * Color3_construct_list(const list &l) +{ + MATH_EXC_ON; + if(l.attr("__len__")() == 3) + { + return new Color3(extract(l[0]), extract(l[1]), extract(l[2])); + } + else + throw std::invalid_argument ("Color3 expects list of length 3"); +} + +template +static const Color3 & +iadd(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color += color2; +} + +template +static Color3 +add(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color + color2; +} + +template +static Color3 +addTuple(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + return Color3(color.x + extract(t[0]), + color.y + extract(t[1]), + color.z + extract(t[2])); + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 +addT(Color3 &v, T a) +{ + MATH_EXC_ON; + Color3 w(v.x + a, v.y + a, v.z + a); + return w; +} + +template +static const Color3 & +isub(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color -= color2; +} + +template +static Color3 +sub(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color - color2; +} + +template +static Color3 +subtractL(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + return Color3(color.x - extract(t[0]), + color.y - extract(t[1]), + color.z - extract(t[2])); + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 +subtractLT(const Color3 &color, T a) +{ + MATH_EXC_ON; + return Color3(color.x - a, + color.y - a, + color.z - a); +} + +template +static Color3 +subtractRT(const Color3 &color, T a) +{ + MATH_EXC_ON; + return Color3(a - color.x, + a - color.y, + a - color.z); +} + +template +static Color3 +subtractR(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + return Color3(extract(t[0]) - color.x, + extract(t[1]) - color.y, + extract(t[2]) - color.z); + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 +neg(Color3 &color) +{ + MATH_EXC_ON; + return -color; +} + +template +static const Color3 & +negate(Color3 &color) +{ + MATH_EXC_ON; + return color.negate(); +} + +template +static const Color3 & +imul(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color *= color2; +} + +template +static const Color3 & +imulT(Color3 &color, const T &t) +{ + MATH_EXC_ON; + return color *= t; +} + +template +static Color3 +mul(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color * color2; +} + +template +static Color3 +mulT(Color3 &color, const T &t) +{ + MATH_EXC_ON; + return color * t; +} + +template +static Color3 +rmulT(Color3 &color, const T &t) +{ + MATH_EXC_ON; + return t * color; +} + +template +static Color3 +mulTuple(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + return Color3(color.x * extract(t[0]), + color.y * extract(t[1]), + color.z * extract(t[2])); + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static const Color3 & +idiv(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color /= color2; +} + +template +static const Color3 & +idivT(Color3 &color, const T &t) +{ + MATH_EXC_ON; + return color /= t; +} + +template +static Color3 +div(Color3 &color, const Color3 &color2) +{ + MATH_EXC_ON; + return color / color2; +} + +template +static Color3 +divT(Color3 &color, const T &t) +{ + MATH_EXC_ON; + return color / t; +} + +template +static Color3 +divTupleL(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + return Color3(color.x / extract(t[0]), + color.y / extract(t[1]), + color.z / extract(t[2])); + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 +divTupleR(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + return Color3(extract(t[0]) / color.x, + extract(t[1]) / color.y, + extract(t[2]) / color.z); + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 +divTR(Color3 &color, T a) +{ + MATH_EXC_ON; + return Color3(a / color.x, + a / color.y, + a / color.z); +} + +template +static Color3 +hsv2rgb(Color3 &color) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::hsv2rgb(color); +} + +template +static Color3 +hsv2rgbTuple(const tuple &t) +{ + MATH_EXC_ON; + Color3 color; + if(t.attr("__len__")() == 3) + { + color.x = extract(t[0]); + color.y = extract(t[1]); + color.z = extract(t[2]); + + return IMATH_NAMESPACE::hsv2rgb(color); + } + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static Color3 +rgb2hsv(Color3 &color) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::rgb2hsv(color); +} + +template +static Color3 +rgb2hsvTuple(const tuple &t) +{ + MATH_EXC_ON; + Color3 color; + if(t.attr("__len__")() == 3) + { + color.x = extract(t[0]); + color.y = extract(t[1]); + color.z = extract(t[2]); + + return IMATH_NAMESPACE::rgb2hsv(color); + } + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static void +setValue1(Color3 &color, const T &a, const T &b, const T &c) +{ + MATH_EXC_ON; + color.setValue(a, b, c); +} + +template +static void +setValue2(Color3 &color, const Color3 &v) +{ + MATH_EXC_ON; + color.setValue(v); +} + +template +static void +setValueTuple(Color3 &color, const tuple &t) +{ + MATH_EXC_ON; + Color3 v; + if(t.attr("__len__")() == 3) + { + v.x = extract(t[0]); + v.y = extract(t[1]); + v.z = extract(t[2]); + + color.setValue(v); + } + else + throw std::invalid_argument ("Color3 expects tuple of length 3"); +} + +template +static bool +lessThan(Color3 &v, const Color3 &w) +{ + bool isLessThan = (v.x <= w.x && v.y <= w.y && v.z <= w.z) + && v != w; + + return isLessThan; +} + +template +static bool +greaterThan(Color3 &v, const Color3 &w) +{ + bool isGreaterThan = (v.x >= w.x && v.y >= w.y && v.z >= w.z) + && v != w; + + return isGreaterThan; +} + +template +static bool +lessThanEqual(Color3 &v, const Color3 &w) +{ + bool isLessThanEqual = (v.x <= w.x && v.y <= w.y && v.z <= w.z); + + return isLessThanEqual; +} + +template +static bool +greaterThanEqual(Color3 &v, const Color3 &w) +{ + bool isGreaterThanEqual = (v.x >= w.x && v.y >= w.y && v.z >= w.z); + + return isGreaterThanEqual; +} + +template +class_, bases > > +register_Color3() +{ + class_, bases > > color3_class(Color3Name::value, Color3Name::value,init >("copy construction")); + color3_class + .def("__init__",make_constructor(Color3_construct_default),"initialize to (0,0,0)") + .def("__init__",make_constructor(Color3_construct_tuple), "initialize to (r,g,b) with a python tuple") + .def("__init__",make_constructor(Color3_construct_list), "initialize to (r,g,b) with a python list") + .def("__init__",make_constructor(Color3_component_construct1)) + .def("__init__",make_constructor(Color3_component_construct1)) + .def("__init__",make_constructor(Color3_component_construct2)) + .def("__init__",make_constructor(Color3_component_construct2)) + .def("__init__",make_constructor(Color3_color_construct)) + .def("__init__",make_constructor(Color3_color_construct)) + .def("__init__",make_constructor(Color3_color_construct)) + .def("__init__",make_constructor(Color3_vector_construct)) + .def("__init__",make_constructor(Color3_vector_construct)) + .def("__init__",make_constructor(Color3_vector_construct)) + + .def_readwrite("r", &Color3::x) + .def_readwrite("g", &Color3::y) + .def_readwrite("b", &Color3::z) + .def("__str__", &color3_str) + .def("__repr__", &color3_repr) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__iadd__", &iadd,return_internal_reference<>()) + .def("__add__", &add) + .def("__add__", &addTuple) + .def("__add__", &addT) + .def("__radd__", &addTuple) + .def("__radd__", &addT) + .def("__isub__", &isub,return_internal_reference<>()) + .def("__sub__", &sub) + .def("__sub__", &subtractL) + .def("__sub__", &subtractLT) + .def("__rsub__", &subtractR) + .def("__rsub__", &subtractRT) + .def("__neg__", &neg) + .def("negate",&negate,return_internal_reference<>(),"component-wise multiplication by -1") + .def("__imul__", &imul,return_internal_reference<>()) + .def("__imul__", &imulT,return_internal_reference<>()) + .def("__mul__", &mul) + .def("__mul__", &mulT) + .def("__rmul__", &rmulT) + .def("__mul__", &mulTuple) + .def("__rmul__", &mulTuple) + .def("__idiv__", &idiv,return_internal_reference<>()) + .def("__idiv__", &idivT,return_internal_reference<>()) + .def("__itruediv__", &idiv,return_internal_reference<>()) + .def("__itruediv__", &idivT,return_internal_reference<>()) + .def("__div__", &div) + .def("__div__", &divT) + .def("__div__", &divTupleL) + .def("__truediv__", &div) + .def("__truediv__", &divT) + .def("__truediv__", &divTupleL) + .def("__rdiv__", &divTupleR) + .def("__rdiv__", &divTR) + .def("__rtruediv__", &divTupleR) + .def("__rtruediv__", &divTR) + .def("__lt__", &lessThan) + .def("__gt__", &greaterThan) + .def("__le__", &lessThanEqual) + .def("__ge__", &greaterThanEqual) + .def("dimensions", &Color3::dimensions,"dimensions() number of dimensions in the color") + .staticmethod("dimensions") + .def("baseTypeEpsilon", &Color3::baseTypeEpsilon,"baseTypeEpsilon() epsilon value of the base type of the color") + .staticmethod("baseTypeEpsilon") + .def("baseTypeMax", &Color3::baseTypeMax,"baseTypeMax() max value of the base type of the color") + .staticmethod("baseTypeMax") + .def("baseTypeLowest", &Color3::baseTypeLowest,"baseTypeLowest() largest negative value of the base type of the color") + .staticmethod("baseTypeLowest") + .def("baseTypeSmallest", &Color3::baseTypeSmallest,"baseTypeSmallest() smallest value of the base type of the color") + .staticmethod("baseTypeSmallest") + .def("hsv2rgb", &hsv2rgb, + "C.hsv2rgb() -- returns a new color which " + "is C converted from RGB to HSV") + .def("hsv2rgb", &rgb2hsvTuple) + + .def("rgb2hsv", &rgb2hsv, + "C.rgb2hsv() -- returns a new color which " + "is C converted from HSV to RGB") + .def("rgb2hsv", &rgb2hsvTuple) + + .def("setValue", &setValue1, + "C1.setValue(C2)\nC1.setValue(a,b,c) -- " + "set C1's elements") + .def("setValue", &setValue2) + .def("setValue", &setValueTuple) + ; + + decoratecopy(color3_class); + + return color3_class; +} + +template PYIMATH_EXPORT class_, bases > > register_Color3(); +template PYIMATH_EXPORT class_, bases > > register_Color3(); +template PYIMATH_EXPORT class_ > > register_Color3Array(); +template PYIMATH_EXPORT class_ > > register_Color3Array(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Color3 PyImath::FixedArrayDefaultValue >::value() +{ return IMATH_NAMESPACE::Color3(0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Color3 PyImath::FixedArrayDefaultValue >::value() +{ return IMATH_NAMESPACE::Color3(0,0,0); } + +} diff --git a/Sources/MetaPy/PyImath/PyImathColor4.cpp b/Sources/MetaPy/PyImath/PyImathColor4.cpp new file mode 100644 index 00000000..f39a143f --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathColor4.cpp @@ -0,0 +1,660 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathColor.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" +#include "PyImathColor4Array2DImpl.h" +#include "PyImathColor4ArrayImpl.h" + +namespace PyImath { +template <> const char *PyImath::C4cArray::name() { return "C4cArray"; } +template <> const char *PyImath::C4fArray::name() { return "C4fArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct Color4Name { static const char *value; }; +template<> const char *Color4Name::value = "Color4c"; +template<> const char *Color4Name::value = "Color4f"; +// template<> const char *Color4ArrayName::value() { return "Color4fArray"; } +// template<> const char *Color4ArrayName::value() { return "Color4cArray"; } +template<> const char *Color4Array2DName::value() { return "Color4fArray2D"; } +template<> const char *Color4Array2DName::value() { return "Color4cArray2D"; } + +// create a new default constructor that initializes Color4 to zero. +template +static Color4 * Color4_construct_default() +{ + return new Color4(T(0),T(0),T(0),T(0)); +} + +template +static Color4 * Color4_component_construct1(S x, S y, S z, S w) +{ + // Assigning a floating point value to an integer type can cause a + // float-point error, which we want to translate into an exception. + + MATH_EXC_ON; + + if(strcmp(Color4Name::value, "Color4c") == 0) + { + unsigned char r = (unsigned char) x; + unsigned char g = (unsigned char) y; + unsigned char b = (unsigned char) z; + unsigned char a = (unsigned char) w; + return new Color4(r,g,b,a); + } + else + return new Color4(T(x) , T(y), T(z), T(w)); +} + +template +static Color4 * Color4_component_construct2(S x) +{ + MATH_EXC_ON; + if(strcmp(Color4Name::value, "Color4c") == 0) + { + unsigned char u = (unsigned char) x; + + return new Color4(u,u,u,u); + } + else + return new Color4(T(x),T(x),T(x),T(x)); +} + +template +static Color4 * Color4_color_construct(const Color4 &c) +{ + MATH_EXC_ON; + if(strcmp(Color4Name::value, "Color4c") == 0) + { + unsigned char r = (unsigned char) c.r; + unsigned char g = (unsigned char) c.g; + unsigned char b = (unsigned char) c.b; + unsigned char a = (unsigned char) c.a; + + return new Color4(r,g,b,a); + } + else + return new Color4(T (c.r), T(c.g), T(c.b), T(c.a)); +} + +template +static Color4 * Color4_construct_tuple(const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + { + return new Color4(extract(t[0]), + extract(t[1]), + extract(t[2]), + extract(t[3])); + } + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 * Color4_construct_list(const list &l) +{ + MATH_EXC_ON; + if(l.attr("__len__")() == 4) + { + return new Color4(extract(l[0]), + extract(l[1]), + extract(l[2]), + extract(l[3])); + } + else + throw std::invalid_argument ("Color4 expects list of length 4"); +} + +template +static std::string +color4_str(const Color4 &c) +{ + std::stringstream stream; + if(strcmp(Color4Name::value, "Color4c") == 0) + { + int r = int(c.r); + int g = int(c.g); + int b = int(c.b); + int a = int(c.a); + + stream << Color4Name::value << "(" << r << ", " << g << ", " << b << ", " << a << ")"; + return stream.str(); + } + else + { + stream << Color4Name::value << "(" << c.r << ", " << c.g << ", " << c.b << ", " << c.a << ")"; + return stream.str(); + } +} + +// Non-specialized repr is same as str +template +static std::string +color4_repr(const Color4 &c) +{ + return color4_str(c); +} + +// Specialization for float to full precision +template <> +std::string +color4_repr(const Color4 &c) +{ + return (boost::format("%s(%.9g, %.9g, %.9g, %.9g)") + % Color4Name::value % c.r % c.g % c.b % c.a).str(); +} + +// No specialization for double, since we don't instantiate Color4d + + +template +static Color4 +hsv2rgb(Color4 &color) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::hsv2rgb(color); +} + +template +static Color4 +hsv2rgbTuple(const tuple &t) +{ + MATH_EXC_ON; + Color4 color; + if(t.attr("__len__")() == 4) + { + color.r = extract(t[0]); + color.g = extract(t[1]); + color.b = extract(t[2]); + color.a = extract(t[3]); + + return IMATH_NAMESPACE::hsv2rgb(color); + } + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 +rgb2hsv(Color4 &color) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::rgb2hsv(color); +} + +template +static Color4 +rgb2hsvTuple(const tuple &t) +{ + MATH_EXC_ON; + Color4 color; + if(t.attr("__len__")() == 4) + { + color.r = extract(t[0]); + color.g = extract(t[1]); + color.b = extract(t[2]); + color.a = extract(t[3]); + + return IMATH_NAMESPACE::rgb2hsv(color); + } + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + + +template +static void +setValue1(Color4 &color, const T &a, const T &b, const T &c, const T &d) +{ + MATH_EXC_ON; + color.setValue(a, b, c, d); +} + +template +static void +setValue2(Color4 &color, const Color4 &v) +{ + MATH_EXC_ON; + color.setValue(v); +} + +template +static void +setValueTuple(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + Color4 v; + if(t.attr("__len__")() == 4) + { + v.r = extract(t[0]); + v.g = extract(t[1]); + v.b = extract(t[2]); + v.a = extract(t[3]); + + color.setValue(v); + } + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static const Color4 & +iadd(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color += color2; +} + +template +static Color4 +add(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color + color2; +} + +template +static Color4 +addTuple(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + return Color4(color.r + extract(t[0]), + color.g + extract(t[1]), + color.b + extract(t[2]), + color.a + extract(t[3])); + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 +addT(Color4 &v, T a) +{ + MATH_EXC_ON; + Color4 w(v.r + a, v.g + a, v.b + a, v.a + a); + return w; +} + +template +static const Color4 & +isub(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color -= color2; +} + +template +static Color4 +sub(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color - color2; +} + +template +static Color4 +subtractL(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + return Color4(color.r - extract(t[0]), + color.g - extract(t[1]), + color.b - extract(t[2]), + color.a - extract(t[3])); + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 +subtractR(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + return Color4(extract(t[0]) - color.r, + extract(t[1]) - color.g, + extract(t[2]) - color.b, + extract(t[3]) - color.a); + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 +subtractLT(const Color4 &color, T a) +{ + MATH_EXC_ON; + return Color4(color.r - a, + color.g - a, + color.b - a, + color.a - a); +} + +template +static Color4 +subtractRT(const Color4 &color, T a) +{ + MATH_EXC_ON; + return Color4(a - color.r, + a - color.g, + a - color.b, + a - color.a); +} + +template +static Color4 +neg(Color4 &color) +{ + MATH_EXC_ON; + return -color; +} + +template +static const Color4 & +negate(Color4 &color) +{ + MATH_EXC_ON; + return color.negate(); +} + +template +static const Color4 & +imul(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color *= color2; +} + +template +static const Color4 & +imulT(Color4 &color, const T &t) +{ + MATH_EXC_ON; + return color *= t; +} + +template +static Color4 +mul(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color * color2; +} + +template +static Color4 +mulT(Color4 &color, const T &t) +{ + MATH_EXC_ON; + return color * t; +} + +template +static Color4 +rmulT(Color4 &color, const T &t) +{ + MATH_EXC_ON; + return t * color; +} + +template +static Color4 +mulTuple(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + return Color4(color.r * extract(t[0]), + color.g * extract(t[1]), + color.b * extract(t[2]), + color.a * extract(t[3])); + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static const Color4 & +idiv(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color /= color2; +} + +template +static const Color4 & +idivT(Color4 &color, const T &t) +{ + MATH_EXC_ON; + return color /= t; +} + +template +static Color4 +div(Color4 &color, const Color4 &color2) +{ + MATH_EXC_ON; + return color / color2; +} + +template +static Color4 +divT(Color4 &color, const T &t) +{ + MATH_EXC_ON; + return color / t; +} + +template +static Color4 +divTupleL(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + return Color4(color.r / extract(t[0]), + color.g / extract(t[1]), + color.b / extract(t[2]), + color.a / extract(t[3])); + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 +divTupleR(Color4 &color, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 4) + return Color4(extract(t[0]) / color.r, + extract(t[1]) / color.g, + extract(t[2]) / color.b, + extract(t[3]) / color.a); + else + throw std::invalid_argument ("Color4 expects tuple of length 4"); +} + +template +static Color4 +divTR(Color4 &color, T a) +{ + MATH_EXC_ON; + return Color4(a / color.r, + a / color.g, + a / color.b, + a / color.a); +} + +template +static bool +lessThan(Color4 &v, const Color4 &w) +{ + bool isLessThan = (v.r <= w.r && v.g <= w.g && v.b <= w.b && v.a <= w.a) + && v != w; + + return isLessThan; +} + +template +static bool +greaterThan(Color4 &v, const Color4 &w) +{ + bool isGreaterThan = (v.r >= w.r && v.g >= w.g && v.b >= w.b && v.a >= w.a) + && v != w; + + return isGreaterThan; +} + +template +static bool +lessThanEqual(Color4 &v, const Color4 &w) +{ + bool isLessThanEqual = (v.r <= w.r && v.g <= w.g && v.b <= w.b && v.a <= w.a); + + return isLessThanEqual; +} + +template +static bool +greaterThanEqual(Color4 &v, const Color4 &w) +{ + bool isGreaterThanEqual = (v.r >= w.r && v.g >= w.g && v.b >= w.b) && v.a >= w.a; + + return isGreaterThanEqual; +} + +template +class_ > +register_Color4() +{ + typedef PyImath::StaticFixedArray,T,4> Color4_helper; + void (IMATH_NAMESPACE::Color4::*getValue1)(Color4 &) const = &IMATH_NAMESPACE::Color4::getValue; + void (IMATH_NAMESPACE::Color4::*getValue2)(T &, T &, T &, T &) const = &IMATH_NAMESPACE::Color4::getValue; + + class_ > color4_class(Color4Name::value, Color4Name::value,init >("copy construction")); + color4_class + .def("__init__",make_constructor(Color4_construct_default),"initialize to (0,0,0,0)") + .def("__init__",make_constructor(Color4_construct_tuple), "initialize to (r,g,b,a) with a python tuple") + .def("__init__",make_constructor(Color4_construct_list), "initialize to (r,g,b,a) with a python list") + .def("__init__",make_constructor(Color4_component_construct1)) + .def("__init__",make_constructor(Color4_component_construct1)) + .def("__init__",make_constructor(Color4_component_construct2)) + .def("__init__",make_constructor(Color4_component_construct2)) + .def("__init__",make_constructor(Color4_color_construct)) + .def("__init__",make_constructor(Color4_color_construct)) + .def("__init__",make_constructor(Color4_color_construct)) + .def_readwrite("r", &Color4::r) + .def_readwrite("g", &Color4::g) + .def_readwrite("b", &Color4::b) + .def_readwrite("a", &Color4::a) + .def("__str__", &color4_str) + .def("__repr__", &color4_repr) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__iadd__", &iadd,return_internal_reference<>()) + .def("__add__", &add) + .def("__add__", &addTuple) + .def("__add__", &addT) + .def("__radd__", &addTuple) + .def("__radd__", &addT) + .def("__isub__", &isub,return_internal_reference<>()) + .def("__sub__", &sub) + .def("__sub__", &subtractL) + .def("__sub__", &subtractLT) + .def("__rsub__", &subtractR) + .def("__rsub__", &subtractRT) + .def("__neg__", &neg) + .def("negate",&negate,return_internal_reference<>(),"component-wise multiplication by -1") + .def("__imul__", &imul,return_internal_reference<>()) + .def("__imul__", &imulT,return_internal_reference<>()) + .def("__mul__", &mul) + .def("__mul__", &mulT) + .def("__rmul__", &mulT) + .def("__mul__", &mulTuple) + .def("__rmul__", &mulTuple) + .def("__idiv__", &idiv,return_internal_reference<>()) + .def("__idiv__", &idivT,return_internal_reference<>()) + .def("__itruediv__", &idiv,return_internal_reference<>()) + .def("__itruediv__", &idivT,return_internal_reference<>()) + .def("__div__", &div) + .def("__div__", &divT) + .def("__div__", &divTupleL) + .def("__truediv__", &div) + .def("__truediv__", &divT) + .def("__truediv__", &divTupleL) + .def("__rdiv__", &divTupleR) + .def("__rdiv__", &divTR) + .def("__rtruediv__", &divTupleR) + .def("__rtruediv__", &divTR) + .def("__lt__", &lessThan) + .def("__gt__", &greaterThan) + .def("__le__", &lessThanEqual) + .def("__ge__", &greaterThanEqual) + .def("__len__", Color4_helper::len) + .def("__getitem__", Color4_helper::getitem,return_value_policy()) + .def("__setitem__", Color4_helper::setitem) + .def("dimensions", &Color4::dimensions,"dimensions() number of dimensions in the color") + .staticmethod("dimensions") + .def("baseTypeEpsilon", &Color4::baseTypeEpsilon,"baseTypeEpsilon() epsilon value of the base type of the color") + .staticmethod("baseTypeEpsilon") + .def("baseTypeMax", &Color4::baseTypeMax,"baseTypeMax() max value of the base type of the color") + .staticmethod("baseTypeMax") + .def("baseTypeLowest", &Color4::baseTypeLowest,"baseTypeLowest() largest negative value of the base type of the color") + .staticmethod("baseTypeLowest") + .def("baseTypeSmallest", &Color4::baseTypeSmallest,"baseTypeSmallest() smallest value of the base type of the color") + .staticmethod("baseTypeSmallest") + .def("__repr__",&color4_repr) + .def("hsv2rgb", &hsv2rgb, + "C.hsv2rgb() -- returns a new color which " + "is C converted from RGB to HSV") + + .def("hsv2rgb", &rgb2hsvTuple) + + .def("rgb2hsv", &rgb2hsv, + "C.rgb2hsv() -- returns a new color which " + "is C converted from HSV to RGB") + .def("rgb2hsv", &rgb2hsvTuple) + + .def("setValue", &setValue1, + "C1.setValue(C2)\nC1.setValue(a,b,c) -- " + "set C1's elements") + .def("setValue", &setValue2) + .def("setValue", &setValueTuple) + + .def("getValue", getValue1, "getValue()") + .def("getValue", getValue2) + ; + + decoratecopy(color4_class); + + return color4_class; +} + +template PYIMATH_EXPORT class_ > register_Color4(); +template PYIMATH_EXPORT class_ > register_Color4(); +template PYIMATH_EXPORT class_ > > register_Color4Array(); +template PYIMATH_EXPORT class_ > > register_Color4Array(); +template PYIMATH_EXPORT class_ > > register_Color4Array2D(); +template PYIMATH_EXPORT class_ > > register_Color4Array2D(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Color4 PyImath::FixedArrayDefaultValue >::value() +{ return IMATH_NAMESPACE::Color4(0,0,0, 0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Color4 PyImath::FixedArrayDefaultValue >::value() +{ return IMATH_NAMESPACE::Color4(0,0,0,0); } +} diff --git a/Sources/MetaPy/PyImath/PyImathEuler.cpp b/Sources/MetaPy/PyImath/PyImathEuler.cpp new file mode 100644 index 00000000..ff1fceae --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathEuler.cpp @@ -0,0 +1,878 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathEuler.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" +#include "PyImathOperators.h" + +// XXX incomplete array wrapping, docstrings missing + +namespace PyImath { +template<> const char *PyImath::EulerfArray::name() { return "EulerfArray"; } +template<> const char *PyImath::EulerdArray::name() { return "EulerdArray"; } +} + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct EulerName { static const char *value; }; +template<> const char *EulerName::value = "Eulerf"; +template<> const char *EulerName::value = "Eulerd"; + +template +static std::string nameOfOrder(typename IMATH_NAMESPACE::Euler::Order order) +{ + switch(order) + { + case IMATH_NAMESPACE::Euler::XYZ: + return "EULER_XYZ"; + case IMATH_NAMESPACE::Euler::XZY: + return "EULER_XZY"; + case IMATH_NAMESPACE::Euler::YZX: + return "EULER_YZX"; + case IMATH_NAMESPACE::Euler::YXZ: + return "EULER_YXZ"; + case IMATH_NAMESPACE::Euler::ZXY: + return "EULER_ZXY"; + case IMATH_NAMESPACE::Euler::ZYX: + return "EULER_ZYX"; + case IMATH_NAMESPACE::Euler::XZX: + return "EULER_XZX"; + case IMATH_NAMESPACE::Euler::XYX: + return "EULER_XYX"; + case IMATH_NAMESPACE::Euler::YXY: + return "EULER_YXY"; + case IMATH_NAMESPACE::Euler::YZY: + return "EULER_YZY"; + case IMATH_NAMESPACE::Euler::ZYZ: + return "EULER_ZYZ"; + case IMATH_NAMESPACE::Euler::ZXZ: + return "EULER_ZXZ"; + case IMATH_NAMESPACE::Euler::XYZr: + return "EULER_XYZr"; + case IMATH_NAMESPACE::Euler::XZYr: + return "EULER_XZYr"; + case IMATH_NAMESPACE::Euler::YZXr: + return "EULER_YZXr"; + case IMATH_NAMESPACE::Euler::YXZr: + return "EULER_YXZr"; + case IMATH_NAMESPACE::Euler::ZXYr: + return "EULER_ZXYr"; + case IMATH_NAMESPACE::Euler::ZYXr: + return "EULER_ZYXr"; + case IMATH_NAMESPACE::Euler::XZXr: + return "EULER_XZXr"; + case IMATH_NAMESPACE::Euler::XYXr: + return "EULER_XYXr"; + case IMATH_NAMESPACE::Euler::YXYr: + return "EULER_YXYr"; + case IMATH_NAMESPACE::Euler::YZYr: + return "EULER_YZYr"; + case IMATH_NAMESPACE::Euler::ZYZr: + return "EULER_ZYZr"; + case IMATH_NAMESPACE::Euler::ZXZr: + return "EULER_ZXZr"; + default: + break; + } + + return ""; +} + +template +static std::string Euler_str(const Euler &e) +{ + std::stringstream stream; + stream << EulerName::value << "(" << e.x << ", " << e.y << ", " << e.z << ", " + << nameOfOrder (e.order()) << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Euler_repr(const Euler &e) +{ + return Euler_str(e); +} + +// Specialization for float to full precision +template <> +std::string Euler_repr(const Euler &e) +{ + return (boost::format("%s(%.9g, %.9g, %.9g, %s)") + % EulerName::value + % e.x % e.y % e.z + % nameOfOrder(e.order()).c_str()).str(); +} + +// Specialization for double to full precision +template <> +std::string Euler_repr(const Euler &e) +{ + return (boost::format("%s(%.17g, %.17g, %.17g, %s)") + % EulerName::value + % e.x % e.y % e.z + % nameOfOrder(e.order()).c_str()).str(); +} + + +template +static bool +equal(const Euler &e0, const Euler &e1) +{ + if(e0.x == e1.x && e0.y == e1.y && e0.z == e1.z && (e0.order())==(e1.order())) + return true; + else + return false; +} + +template +static bool +notequal(const Euler &e0, const Euler &e1) +{ + if(e0.x != e1.x || e0.y != e1.y || e0.z != e1.z || (e0.order()) != (e1.order())) + { + return true; + } + else + return false; +} + +template +static IMATH_NAMESPACE::Vec3 getAngleOrder(Euler &euler) +{ + int i, j, k; + euler.angleOrder(i, j, k); + return IMATH_NAMESPACE::Vec3 (i, j, k); +} + +template +static void +setXYZTuple(Euler &euler, const tuple &t) +{ + MATH_EXC_ON; + Vec3 v; + if(t.attr("__len__")() == 3) + { + v.x = extract(t[0]); + v.y = extract(t[1]); + v.z = extract(t[2]); + + euler.setXYZVector(v); + } + else + throw std::invalid_argument ("Euler expects tuple of length 3"); +} + +// needed to convert Eulerf::Order to Euler::Order +template +static typename Euler::Order interpretOrder(typename IMATH_NAMESPACE::Eulerf::Order order) +{ + typename Euler::Order o = Euler::XYZ; + switch(order) + { + case IMATH_NAMESPACE::Eulerf::XYZ: + { + o = Euler::XYZ; + }break; + case IMATH_NAMESPACE::Eulerf::XZY: + { + o = Euler::XZY; + }break; + case IMATH_NAMESPACE::Eulerf::YZX: + { + o = Euler::YZX; + }break; + case IMATH_NAMESPACE::Eulerf::YXZ: + { + o = Euler::YXZ; + }break; + case IMATH_NAMESPACE::Eulerf::ZXY: + { + o = Euler::ZXY; + }break; + case IMATH_NAMESPACE::Eulerf::ZYX: + { + o = Euler::ZYX; + }break; + case IMATH_NAMESPACE::Eulerf::XZX: + { + o = Euler::XZX; + }break; + case IMATH_NAMESPACE::Eulerf::XYX: + { + o = Euler::XYX; + }break; + case IMATH_NAMESPACE::Eulerf::YXY: + { + o = Euler::YXY; + }break; + case IMATH_NAMESPACE::Eulerf::YZY: + { + o = Euler::YZY; + }break; + case IMATH_NAMESPACE::Eulerf::ZYZ: + { + o = Euler::ZYZ; + }break; + case IMATH_NAMESPACE::Eulerf::ZXZ: + { + o = Euler::ZXZ; + }break; + case IMATH_NAMESPACE::Eulerf::XYZr: + { + o = Euler::XYZr; + }break; + case IMATH_NAMESPACE::Eulerf::XZYr: + { + o = Euler::XZYr; + }break; + case IMATH_NAMESPACE::Eulerf::YZXr: + { + o = Euler::YZXr; + }break; + case IMATH_NAMESPACE::Eulerf::YXZr: + { + o = Euler::YXZr; + }break; + case IMATH_NAMESPACE::Eulerf::ZXYr: + { + o = Euler::ZXYr; + }break; + case IMATH_NAMESPACE::Eulerf::ZYXr: + { + o = Euler::ZYXr; + }break; + case IMATH_NAMESPACE::Eulerf::XZXr: + { + o = Euler::XZXr; + }break; + case IMATH_NAMESPACE::Eulerf::XYXr: + { + o = Euler::XYXr; + }break; + case IMATH_NAMESPACE::Eulerf::YXYr: + { + o = Euler::YXYr; + }break; + case IMATH_NAMESPACE::Eulerf::YZYr: + { + o = Euler::YZYr; + }break; + case IMATH_NAMESPACE::Eulerf::ZYZr: + { + o = Euler::ZYZr; + }break; + case IMATH_NAMESPACE::Eulerf::ZXZr: + { + o = Euler::ZXZr; + }break; + default: + break; + } + + return o; +} + +// needed to convert Eulerf::InputLayout to Euler::InputLayout +template +static typename Euler::InputLayout interpretInputLayout(typename IMATH_NAMESPACE::Eulerf::InputLayout layout) +{ + if (layout == IMATH_NAMESPACE::Eulerf::XYZLayout) + return Euler::XYZLayout; + return Euler::IJKLayout; +} + +// needed to convert Eulerf::Axis to Euler::Axis +template +static typename Euler::Axis interpretAxis(typename IMATH_NAMESPACE::Eulerf::Axis axis) +{ + if (axis == IMATH_NAMESPACE::Eulerf::X) + return Euler::X; + else if (axis == IMATH_NAMESPACE::Eulerf::Y) + return Euler::Y; + else + return Euler::Z; +} + +template +static Euler * +eulerConstructor1(const Vec3 &v, + typename IMATH_NAMESPACE::Eulerf::Order order, + typename IMATH_NAMESPACE::Eulerf::InputLayout layout = IMATH_NAMESPACE::Eulerf::IJKLayout) +{ + typename Euler::Order o = interpretOrder(order); + typename Euler::InputLayout l = interpretInputLayout(layout); + return new Euler(v, o, l); +} + +template +static Euler * +eulerConstructor1a(const Vec3 &v) +{ + return eulerConstructor1 (v, IMATH_NAMESPACE::Eulerf::Default); +} + +template +static Euler * +eulerConstructor1b(const Vec3 &v, int iorder) +{ + typename Euler::Order o = typename Euler::Order (iorder); + return new Euler(v, o); +} + +// + +template +static Euler * +eulerConstructor1d(const Euler& e, int iorder) +{ + typename Euler::Order o = typename Euler::Order (iorder); + return new Euler(e, o); +} + +template +static Euler * +eulerConstructor1e(const Euler& e, int iorder, int layout) +{ + typename Euler::Order o = typename Euler::Order (iorder); + typename Euler::InputLayout l = typename Euler::InputLayout (layout); + return new Euler(e, o, l); +} + + +template +static Euler * +eulerConstructor2(T i, T j, T k, + typename IMATH_NAMESPACE::Eulerf::Order order, + typename IMATH_NAMESPACE::Eulerf::InputLayout layout = IMATH_NAMESPACE::Eulerf::IJKLayout) +{ + typename Euler::Order o = interpretOrder(order); + typename Euler::InputLayout l = interpretInputLayout(layout); + return new Euler(i, j, k, o, l); +} + +template +static Euler * +eulerConstructor2a(T i, T j, T k) +{ + return eulerConstructor2 (i, j, k, IMATH_NAMESPACE::Eulerf::Default); +} + +template +static Euler * +eulerConstructor2b(T i, T j, T k, int iorder) +{ + typename Euler::Order o = typename Euler::Order (iorder); + return new Euler(i, j, k, o); +} + +template +static Euler * +eulerConstructor3(const Matrix33 &mat, typename IMATH_NAMESPACE::Eulerf::Order order) +{ + typename Euler::Order o = interpretOrder(order); + return new Euler(mat, o); +} + +template +static Euler * +eulerConstructor3a(const Matrix33 &mat) +{ + return eulerConstructor3 (mat, IMATH_NAMESPACE::Eulerf::Default); +} + +template +static Euler * +eulerConstructor3b(const Matrix33 &mat, int iorder) +{ + typename Euler::Order o = typename Euler::Order (iorder); + return new Euler(mat, o); +} + +template +static Euler * +eulerConstructor4(const Matrix44 &mat, typename IMATH_NAMESPACE::Eulerf::Order order) +{ + typename Euler::Order o = interpretOrder(order); + return new Euler(mat, o); +} + +template +static Euler * +eulerConstructor4a(const Matrix44 &mat) +{ + return eulerConstructor4 (mat, IMATH_NAMESPACE::Eulerf::Default); +} + +template +static Euler * +eulerConstructor4b(const Matrix44 &mat, int iorder) +{ + typename Euler::Order o = typename Euler::Order (iorder); + return new Euler(mat, o); +} + +template +static Euler * +eulerConstructor5(typename IMATH_NAMESPACE::Eulerf::Order order) +{ + typename Euler::Order o = interpretOrder(order); + return new Euler(o); +} + +template +static Euler * +eulerConstructor5a() +{ + typename Euler::Order o = interpretOrder(IMATH_NAMESPACE::Eulerf::Default); + return new Euler(o); +} + +template +static Euler * +eulerConstructor5b(int iorder) +{ + typename Euler::Order o = typename Euler::Order (iorder); + return new Euler(o); +} + +template +static Euler * +eulerConstructor6(T x, T y, T z) +{ + return new Euler(Vec3(x,y,z)); +} + +template +static Euler * +eulerConstructor7(const Quat &quat, typename IMATH_NAMESPACE::Eulerf::Order order) +{ + Euler *e = eulerConstructor5(order); + e->extract(quat); + return e; +} + +template +static Euler * +eulerConstructor7a(const Quat &quat) +{ + return eulerConstructor7(quat, IMATH_NAMESPACE::Eulerf::Default); +} + +template +static Euler * +eulerConstructor7b(const Quat &quat, int iorder) +{ + Euler *e = eulerConstructor5b(iorder); + e->extract(quat); + return e; +} + +template +static Euler * +eulerConversionConstructor(const Euler &euler) +{ + MATH_EXC_ON; + Euler *e = new Euler; + *e = euler; + return e; +} + +template +static void +eulerMakeNear(Euler &euler, Euler &target) +{ + MATH_EXC_ON; + euler.makeNear (target); +} + +template +static void +eulerSetOrder(Euler &euler, typename IMATH_NAMESPACE::Eulerf::Order order) +{ + typename Euler::Order o = interpretOrder(order); + euler.setOrder (o); +} + +template +static void +eulerSet(Euler &euler, IMATH_NAMESPACE::Eulerf::Axis axis, int relative, int parityEven, int firstRepeats) +{ + MATH_EXC_ON; + typename Euler::Axis a = interpretAxis(axis); + euler.set (a, relative, parityEven, firstRepeats); +} + +template +static void +extract1(Euler &euler, const Matrix33 &m) +{ + MATH_EXC_ON; + euler.extract(m); +} + +template +static void +extract2(Euler &euler, const Matrix44 &m) +{ + MATH_EXC_ON; + euler.extract(m); +} + +template +static void +extract3(Euler &euler, const Quat &q) +{ + MATH_EXC_ON; + euler.extract(q); +} + +template +static Matrix33 +toMatrix33(Euler &euler) +{ + MATH_EXC_ON; + return euler.toMatrix33(); +} + +template +static Matrix44 +toMatrix44(Euler &euler) +{ + MATH_EXC_ON; + return euler.toMatrix44(); +} + +template +static Quat +toQuat(Euler &euler) +{ + MATH_EXC_ON; + return euler.toQuat(); +} + +template +static Vec3 +toXYZVector(Euler &euler) +{ + MATH_EXC_ON; + return euler.toXYZVector(); +} + +template +class_,bases > > +register_Euler() +{ + class_,bases > > euler_class(EulerName::value,EulerName::value,init >("copy construction")); + euler_class + .def(init<>("imath Euler default construction")) + .def("__init__", make_constructor(eulerConstructor1)) + .def("__init__", make_constructor(eulerConstructor1a)) + .def("__init__", make_constructor(eulerConstructor1b)) + .def("__init__", make_constructor(eulerConstructor1d)) + .def("__init__", make_constructor(eulerConstructor1e)) + .def("__init__", make_constructor(eulerConstructor2)) + .def("__init__", make_constructor(eulerConstructor2a)) + .def("__init__", make_constructor(eulerConstructor2b)) + .def("__init__", make_constructor(eulerConstructor3), + "Euler-from-matrix construction assumes, but does\n" + "not verify, that the matrix includes no shear or\n" + "non-uniform scaling. If necessary, you can fix\n" + "the matrix by calling the removeScalingAndShear()\n" + "function.\n") + .def("__init__", make_constructor(eulerConstructor3a)) + .def("__init__", make_constructor(eulerConstructor3b)) + .def("__init__", make_constructor(eulerConstructor4)) + .def("__init__", make_constructor(eulerConstructor4a)) + .def("__init__", make_constructor(eulerConstructor4b)) + .def("__init__", make_constructor(eulerConstructor5)) + .def("__init__", make_constructor(eulerConstructor5a)) + .def("__init__", make_constructor(eulerConstructor5b)) + .def("__init__", make_constructor(eulerConstructor6)) + .def("__init__", make_constructor(eulerConstructor7)) + .def("__init__", make_constructor(eulerConstructor7a)) + .def("__init__", make_constructor(eulerConstructor7b)) + .def("__init__", make_constructor(eulerConversionConstructor)) + .def("__init__", make_constructor(eulerConversionConstructor)) + + .def("angleOrder", &getAngleOrder, "angleOrder() set the angle order") + + .def("frameStatic", &Euler::frameStatic, + "e.frameStatic() -- returns true if the angles of e\n" + "are measured relative to a set of fixed axes,\n" + "or false if the angles of e are measured relative to\n" + "each other\n") + + .def("initialAxis", &Euler::initialAxis, + "e.initialAxis() -- returns the initial rotation\n" + "axis of e (EULER_X_AXIS, EULER_Y_AXIS, EULER_Z_AXIS)") + + .def("initialRepeated", &Euler::initialRepeated, + "e.initialRepeated() -- returns 1 if the initial\n" + "rotation axis of e is repeated (for example,\n" + "e.order() == EULER_XYX); returns 0 if the initial\n" + "rotation axis is not repeated.\n") + + .def("makeNear", &eulerMakeNear, + "e.makeNear(t) -- adjusts Euler e so that it\n" + "represents the same rotation as before, but the\n" + "individual angles of e differ from the angles of\n" + "t by as little as possible.\n" + "This method might not make sense if e.order()\n" + "and t.order() are different\n") + + .def("order", &Euler::order, + "e.order() -- returns the rotation order in e\n" + "(EULER_XYZ, EULER_XZY, ...)") + + .def("parityEven", &Euler::parityEven, + "e.parityEven() -- returns the parity of the\n" + "axis permutation of e\n") + + .def("set", &eulerSet, + "e.set(i,r,p,f) -- sets the rotation order in e\n" + "according to the following flags:\n" + "\n" + " i initial axis (EULER_X_AXIS,\n" + " EULER_Y_AXIS or EULER_Z_AXIS)\n" + "\n" + " r rotation angles are measured relative\n" + " to each other (r == 1), or relative to a\n" + " set of fixed axes (r == 0)\n" + "\n" + " p parity of axis permutation is even (r == 1)\n" + " or odd (r == 0)\n" + "\n" + " f first rotation axis is repeated (f == 1)\n" + " or not repeated (f == 0)\n") + + .def("setOrder", &eulerSetOrder, + "e.setOrder(o) -- sets the rotation order in e\n" + "to o (EULER_XYZ, EULER_XZY, ...)") + + .def("setXYZVector", &Euler::setXYZVector, + "e.setXYZVector(v) -- sets the three rotation\n" + "angles in e to v[0], v[1], v[2]") + .def("setXYZVector", &setXYZTuple) + + .def("extract", &extract1, + "e.extract(m) -- extracts the rotation component\n" + "from 3x3 matrix m and stores the result in e.\n" + "Assumes that m does not contain shear or non-\n" + "uniform scaling. If necessary, you can fix m\n" + "by calling m.removeScalingAndShear().") + + .def("extract", &extract2, + "e.extract(m) -- extracts the rotation component\n" + "from 4x4 matrix m and stores the result in e.\n" + "Assumes that m does not contain shear or non-\n" + "uniform scaling. If necessary, you can fix m\n" + "by calling m.removeScalingAndShear().") + + .def("extract", &extract3, + "e.extract(q) -- extracts the rotation component\n" + "from quaternion q and stores the result in e") + + .def("toMatrix33", &toMatrix33, "e.toMatrix33() -- converts e into a 3x3 matrix\n") + + .def("toMatrix44", &toMatrix44, "e.toMatrix44() -- converts e into a 4x4 matrix\n") + + .def("toQuat", &toQuat, "e.toQuat() -- converts e into a quaternion\n") + + .def("toXYZVector", &toXYZVector, + "e.toXYZVector() -- converts e into an XYZ\n" + "rotation vector") + .def("__str__", &Euler_str) + .def("__repr__", &Euler_repr) + + .def("__eq__", &equal) + .def("__ne__", ¬equal) + ; + + // fill in the Euler scope + { + scope euler_scope(euler_class); + enum_::Order> euler_order("Order"); + euler_order + .value("XYZ",Euler::XYZ) + .value("XZY",Euler::XZY) + .value("YZX",Euler::YZX) + .value("YXZ",Euler::YXZ) + .value("ZXY",Euler::ZXY) + .value("ZYX",Euler::ZYX) + .value("XZX",Euler::XZX) + .value("XYX",Euler::XYX) + .value("YXY",Euler::YXY) + .value("YZY",Euler::YZY) + .value("ZYZ",Euler::ZYZ) + .value("ZXZ",Euler::ZXZ) + .value("XYZr",Euler::XYZr) + .value("XZYr",Euler::XZYr) + .value("YZXr",Euler::YZXr) + .value("YXZr",Euler::YXZr) + .value("ZXYr",Euler::ZXYr) + .value("ZYXr",Euler::ZYXr) + .value("XZXr",Euler::XZXr) + .value("XYXr",Euler::XYXr) + .value("YXYr",Euler::YXYr) + .value("YZYr",Euler::YZYr) + .value("ZYZr",Euler::ZYZr) + .value("ZXZr",Euler::ZXZr) + + // don't export these, they're not really part of the public interface + //.value("Legal",Euler::Legal) + //.value("Min",Euler::Min) + //.value("Max",Euler::Max) + + // handle Default seperately since boost sets up a 1-1 mapping for enum values + //.value("Default",Euler::Default) + .export_values() + ; + // just set it to the XYZ value manually + euler_scope.attr("Default") = euler_scope.attr("XYZ"); + + enum_::Axis>("Axis") + .value("X",Euler::X) + .value("Y",Euler::Y) + .value("Z",Euler::Z) + .export_values() + ; + + enum_::InputLayout>("InputLayout") + .value("XYZLayout",Euler::XYZLayout) + .value("IJKLayout",Euler::IJKLayout) + .export_values() + ; + } + + decoratecopy(euler_class); + + return euler_class; +} + +// XXX fixme - template this +// really this should get generated automatically... + +/* +template +static FixedArray +EulerArray_get(FixedArray > &qa) +{ + return FixedArray( &(qa[0].r)+index, qa.len(), 4*qa.stride()); +} +*/ + +template +static FixedArray > * +EulerArray_eulerConstructor7a(const FixedArray > &q) +{ + MATH_EXC_ON; + size_t len = q.len(); + FixedArray >* result = new FixedArray >(len); + for (size_t i = 0; i < len; ++i) { + (*result)[i].extract(q[i]); + } + return result; +} + +template +static FixedArray > * +EulerArray_eulerConstructor8a(const FixedArray >& v) +{ + MATH_EXC_ON; + size_t len = v.len(); + FixedArray >* result = new FixedArray >(len); + + for (size_t i = 0; i < len; ++i) + (*result)[i] = Euler(v[i]); + + return result; +} + +template +static FixedArray > * +EulerArray_eulerConstructor9a(const FixedArray >& v, typename IMATH_NAMESPACE::Eulerf::Order order) +{ + MATH_EXC_ON; + size_t len = v.len(); + FixedArray >* result = new FixedArray >(len); + + typename Euler::Order o = interpretOrder(order); + for (size_t i = 0; i < len; ++i) + (*result)[i] = Euler(v[i], o); + + return result; +} + +template +static FixedArray > +EulerArray_toXYZVector(const FixedArray >& e) +{ + MATH_EXC_ON; + size_t len = e.len(); + FixedArray > result(len, UNINITIALIZED); + for (size_t i = 0; i < len; ++i) + result[i] = e[i].toXYZVector(); + return result; +} + +template +static FixedArray > +EulerArray_toQuat(const FixedArray >& e) +{ + MATH_EXC_ON; + size_t len = e.len(); + FixedArray > result(len, UNINITIALIZED); + for (size_t i = 0; i < len; ++i) + result[i] = e[i].toQuat(); + return result; +} + + +template +class_ > > +register_EulerArray() +{ + class_ > > eulerArray_class = FixedArray >::register_("Fixed length array of IMATH_NAMESPACE::Euler"); + eulerArray_class + //.add_property("x",&EulerArray_get) + //.add_property("y",&EulerArray_get) + //.add_property("z",&EulerArray_get) + .def("__init__", make_constructor(EulerArray_eulerConstructor7a)) + .def("__init__", make_constructor(EulerArray_eulerConstructor8a)) + .def("__init__", make_constructor(EulerArray_eulerConstructor9a)) + .def("toXYZVector", EulerArray_toXYZVector) + .def("toQuat", EulerArray_toQuat) + ; + + add_comparison_functions(eulerArray_class); + PyImath::add_explicit_construction_from_type >(eulerArray_class); + PyImath::add_explicit_construction_from_type >(eulerArray_class); + return eulerArray_class; +} + +template PYIMATH_EXPORT class_,bases > > register_Euler(); +template PYIMATH_EXPORT class_,bases > > register_Euler(); + +template PYIMATH_EXPORT class_ > > register_EulerArray(); +template PYIMATH_EXPORT class_ > > register_EulerArray(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Euler FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Euler(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Euler FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Euler(); } + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathFixedArray.cpp b/Sources/MetaPy/PyImath/PyImathFixedArray.cpp new file mode 100644 index 00000000..efca3022 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathFixedArray.cpp @@ -0,0 +1,26 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathFixedArray.h" +#include "PyImathExport.h" + +namespace PyImath { + +template <> PYIMATH_EXPORT bool FixedArrayDefaultValue::value() { return false; } +template <> PYIMATH_EXPORT signed char FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT unsigned char FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT short FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT unsigned short FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT int FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT int64_t FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT unsigned int FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT float FixedArrayDefaultValue::value() { return 0; } +template <> PYIMATH_EXPORT double FixedArrayDefaultValue::value() { return 0; } + +//int alloc_count = 0; + +} diff --git a/Sources/MetaPy/PyImath/PyImathFixedVArray.cpp b/Sources/MetaPy/PyImath/PyImathFixedVArray.cpp new file mode 100644 index 00000000..d95cac30 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathFixedVArray.cpp @@ -0,0 +1,871 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + + +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include "PyImathExport.h" +#include "PyImathFixedVArray.h" + +namespace PyImath { + +template +FixedVArray::FixedVArray (std::vector* ptr, Py_ssize_t length, + Py_ssize_t stride, bool writable) + : _ptr(ptr), _length(length), _stride(stride), _writable (writable), + _handle(), _unmaskedLength(0) +{ + if (length < 0) + { + throw std::invalid_argument("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::invalid_argument("Fixed array stride must be positive"); + } + + // Nothing else to do (pointer given, so we have the data) +} + +template +FixedVArray::FixedVArray (std::vector* ptr, Py_ssize_t length, + Py_ssize_t stride, boost::any handle, bool writable) + : _ptr(ptr), _length(length), _stride(stride), _writable(writable), + _handle(handle), _unmaskedLength(0) +{ + if (length < 0) + { + throw std::invalid_argument("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::invalid_argument("Fixed array stride must be positive"); + } + + // Nothing else to do (pointer given, so we have the data) +} + +template +FixedVArray::FixedVArray (const std::vector* ptr, Py_ssize_t length, + Py_ssize_t stride) + : _ptr(const_cast *>(ptr)), _length(length), _stride(stride), + _writable(false), _handle(), _unmaskedLength(0) +{ + if (length < 0) + { + throw std::invalid_argument("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::invalid_argument("Fixed array stride must be positive"); + } + + // Nothing else to do (pointer given, so we have the data) +} + +template +FixedVArray::FixedVArray (const std::vector* ptr, Py_ssize_t length, + Py_ssize_t stride, boost::any handle) + : _ptr(const_cast *>(ptr)), _length(length), _stride(stride), + _writable(false), _handle(handle), _unmaskedLength(0) +{ + if (length < 0) + { + throw std::invalid_argument("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::invalid_argument("Fixed array stride must be positive"); + } + + // Nothing else to do (pointer given, so we have the data) +} + +template +FixedVArray::FixedVArray(Py_ssize_t length) + : _ptr(0), _length(length), _stride(1), _writable(true), + _handle(), _unmaskedLength(0) +{ + if (length < 0) + { + throw std::invalid_argument("Fixed array length must be non-negative"); + } + + boost::shared_array > a(new std::vector[length]); + // Initial vectors in the array will be zero-length. + _handle = a; + _ptr = a.get(); +} + +template +FixedVArray::FixedVArray(const T& initialValue, Py_ssize_t length) + : _ptr(0), _length(length), _stride(1), _writable(true), + _handle(), _unmaskedLength(0) +{ + if (length < 0) + { + throw std::invalid_argument("Fixed array length must be non-negative"); + } + + boost::shared_array > a(new std::vector[length]); + for (Py_ssize_t i = 0; i < length; ++i) + { + a[i].push_back (initialValue); + } + _handle = a; + _ptr = a.get(); +} + +template +FixedVArray::FixedVArray(FixedVArray& other, const FixedArray& mask) + : _ptr(other._ptr), _stride(other._stride), _writable(other._writable), + _handle(other._handle) +{ + if (other.isMaskedReference()) + { + throw std::invalid_argument + ("Masking an already-masked FixedVArray is not supported yet (SQ27000)"); + } + + size_t len = (size_t) other.match_dimension (mask); + _unmaskedLength = len; + + size_t reduced_len = 0; + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + reduced_len++; + } + } + + _indices.reset (new size_t[reduced_len]); + + for (size_t i = 0, j = 0; i < len; ++i) + { + if (mask[i]) + { + _indices[j] = i; // NOSONAR - suppress SonarCloud warning. + j++; + } + } + + _length = reduced_len; +} + +template +FixedVArray::FixedVArray(const FixedArray& size, const T& initialValue) + : _ptr(nullptr), _length (size.len()), _stride(1), + _writable(true), _handle(), _indices(), _unmaskedLength(0) +{ + boost::shared_array > a(new std::vector[_length]); + for (size_t i = 0; i < _length; ++i) + { + if (size[i] < 0) + throw std::invalid_argument("Attempt to create negative FixedVArray element"); + + std::vector &v = a[i]; + + v.resize (size[i]); + std::fill (v.begin(), v.end(), initialValue); + } + _handle = a; + _ptr = a.get(); +} + +template +FixedVArray::FixedVArray(const FixedVArray& other) + : _ptr(other._ptr), + _length(other._length), + _stride(other._stride), + _writable (other._writable), + _handle(other._handle), + _indices(other._indices), + _unmaskedLength(other._unmaskedLength) +{ + // Nothing. +} + +template +const FixedVArray & +FixedVArray::operator = (const FixedVArray& other) +{ + if (&other == this) + return *this; + + _ptr = other._ptr; + _length = other._length; + _stride = other._stride; + _writable = other._writable; + _handle = other._handle; + _unmaskedLength = other._unmaskedLength; + _indices = other._indices; + + return *this; +} + +template +FixedVArray::~FixedVArray() +{ + // Nothing. +} + + +template +std::vector& +FixedVArray::operator [] (size_t i) +{ + if (!_writable) + throw std::invalid_argument("Fixed V-array is read-only."); + + return _ptr[(_indices ? raw_ptr_index(i) : i) * _stride]; +} + +template +const std::vector& +FixedVArray::operator [] (size_t i) const +{ + return _ptr[(_indices ? raw_ptr_index(i) : i) * _stride]; +} + + +namespace { + +// +// Make an index suitable for indexing into an array in c++ +// from a python index, which can be negative for indexing +// relative to the end of an array. +// +size_t +canonical_index (Py_ssize_t index, const size_t& totalLength) +{ + if (index < 0) + { + index += totalLength; + } + if ((size_t) index >= totalLength || index < 0) + { + PyErr_SetString (PyExc_IndexError, "Index out of range"); + boost::python::throw_error_already_set(); + } + return index; // still a 'virtual' index if this is a masked reference array +} + +void +extract_slice_indices (PyObject* index, size_t& start, size_t& end, + Py_ssize_t& step, size_t& sliceLength, + const size_t& totalLength) +{ + if (PySlice_Check (index)) + { +#if PY_MAJOR_VERSION > 2 + PyObject* slice = index; +#else + PySliceObject* slice = reinterpret_cast(index); +#endif + Py_ssize_t s, e, sl; + if (PySlice_GetIndicesEx(slice, totalLength, &s, &e, &step, &sl) == -1) + { + boost::python::throw_error_already_set(); + } + if (s < 0 || e < -1 || sl < 0) + { + throw std::domain_error + ("Slice extraction produced invalid start, end, or length indices"); + } + + start = s; + end = e; + sliceLength = sl; + } + else if (PyInt_Check (index)) + { + size_t i = canonical_index (PyInt_AsSsize_t(index), totalLength); + start = i; + end = i + 1; + step = 1; + sliceLength = 1; + } + else + { + PyErr_SetString (PyExc_TypeError, "Object is not a slice"); + boost::python::throw_error_already_set(); + } +} + +} // namespace + + +// this must have a call policy of return_internal_reference +template +FixedArray +FixedVArray::getitem (Py_ssize_t index) +{ + const size_t i = canonical_index (index, _length); + std::vector& data = _ptr[(_indices ? raw_ptr_index(i) : i) * _stride]; + return FixedArray(data.empty() ? nullptr : &data[0], data.size(), 1, _writable); +} + +template +FixedVArray +FixedVArray::getslice (PyObject* index) const +{ + size_t start = 0; + size_t end = 0; + size_t sliceLength = 0; + Py_ssize_t step; + extract_slice_indices (index, start, end, step, sliceLength, _length); + + FixedVArray f(sliceLength); + + if (_indices) + { + for (size_t i = 0; i < sliceLength; ++i) + { + f._ptr[i] = _ptr[raw_ptr_index(start + i*step)*_stride]; + } + } + else + { + for (size_t i = 0; i < sliceLength; ++i) + { + f._ptr[i] = _ptr[(start + i*step)*_stride]; + } + } + + return f; +} + +template +FixedVArray +FixedVArray::getslice_mask (const FixedArray& mask) +{ + return FixedVArray (*this, mask); +} + +template +void +FixedVArray::setitem_scalar (PyObject* index, const FixedArray& data) +{ + if (!_writable) + throw std::invalid_argument ("Fixed V-array is read-only."); + + size_t start = 0; + size_t end = 0; + size_t sliceLength = 0; + Py_ssize_t step; + extract_slice_indices (index, start, end, step, sliceLength, _length); + + if (_indices) + { + for (size_t i = 0; i < sliceLength; ++i) + { + std::vector &d =_ptr[raw_ptr_index(start + i*step)*_stride]; + if (data.len() != static_cast(d.size())) + throw std::invalid_argument("FixedVArray::setitem: length of data does not match length of array element"); + + if (data.isMaskedReference()) + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data[j]; + } + } + else + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data.direct_index(j); + } + } + } + } + else + { + for (size_t i = 0; i < sliceLength; ++i) + { + std::vector &d =_ptr[(start + i*step)*_stride]; + if (data.len() != static_cast(d.size())) + throw std::invalid_argument("FixedVArray::setitem: length of data does not match length of array element"); + + if (data.isMaskedReference()) + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data[j]; + } + } + else + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data.direct_index(j); + } + } + } + } +} + +template +void +FixedVArray::setitem_scalar_mask (const FixedArray& mask, const FixedArray& data) +{ + if (!_writable) + throw std::invalid_argument ("Fixed V-array is read-only."); + + size_t len = match_dimension(mask, false); + + if (_indices) + { + for (size_t i = 0; i < len; ++i) + { + // We don't need to actually look at 'mask' because + // match_dimensions has already forced some expected condition. + std::vector &d =_ptr[raw_ptr_index(i)*_stride]; + if (data.len() != static_cast(d.size())) + throw std::invalid_argument("FixedVArray::setitem: length of data does not match length of array element"); + + if (data.isMaskedReference()) + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data[j]; + } + } + else + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data.direct_index(j); + } + } + } + } + else + { + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + std::vector &d = _ptr[i*_stride]; + if (data.len() != static_cast(d.size())) + throw std::invalid_argument("FixedVArray::setitem: length of data does not match length of array element"); + + if (data.isMaskedReference()) + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data[j]; + } + } + else + { + for (Py_ssize_t j = 0; j < data.len(); ++j) + { + d[j] = data.direct_index(j); + } + } + } + } + } +} +template +void +FixedVArray::setitem_vector (PyObject* index, const FixedVArray& data) +{ + if (!_writable) + throw std::invalid_argument ("Fixed V-array is read-only."); + + size_t start = 0; + size_t end = 0; + size_t sliceLength = 0; + Py_ssize_t step; + extract_slice_indices (index, start, end, step, sliceLength, _length); + + if ((size_t) data.len() != sliceLength) + { + PyErr_SetString (PyExc_IndexError, + "Dimensions of source do not match destination"); + boost::python::throw_error_already_set(); + } + + if (_indices) + { + for (size_t i = 0; i < sliceLength; ++i) + { + _ptr[raw_ptr_index(start + i*step)*_stride] = data[i]; + } + } + else + { + for (size_t i = 0; i < sliceLength; ++i) + { + _ptr[(start + i*step)*_stride] = data[i]; + } + } +} + +template +void +FixedVArray::setitem_vector_mask (const FixedArray& mask, + const FixedVArray& data) +{ + if (!_writable) + throw std::invalid_argument ("Fixed V-array is read-only."); + + // This restriction could be removed if there is a compelling use-case. + if (_indices) + { + throw std::invalid_argument + ("We don't support setting item masks for masked reference arrays"); + } + + size_t len = match_dimension(mask); + + if ((size_t) data.len() == len) + { + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + _ptr[i*_stride] = data[i]; + } + } + } + else + { + size_t count = 0; + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + count++; + } + } + if ((size_t) data.len() != count) + { + throw std::invalid_argument + ("Dimensions of source data do not match destination " + "either masked or unmasked"); + } + + Py_ssize_t dataIndex = 0; + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + _ptr[i*_stride] = data[dataIndex]; + dataIndex++; + } + } + } +} + + +template +int +FixedVArray::SizeHelper::getitem (Py_ssize_t index) const +{ + size_t i = canonical_index(index, _a._length); + + if (_a._indices) + { + return _a._ptr[_a.raw_ptr_index(i)*_a._stride].size(); + } + + return _a._ptr[i*_a._stride].size(); +} + +template +FixedArray +FixedVArray::SizeHelper::getitem_slice (PyObject* index) const +{ + size_t start = 0; + size_t end = 0; + size_t sliceLength = 0; + Py_ssize_t step; + extract_slice_indices (index, start, end, step, sliceLength, _a._length); + + FixedArray f(sliceLength); + + if (_a._indices) + { + for (size_t i = 0; i < sliceLength; ++i) + { + f.direct_index(i) = _a._ptr[_a.raw_ptr_index(start + i*step)*_a._stride].size(); + } + } + else + { + for (size_t i = 0; i < sliceLength; ++i) + { + f.direct_index(i) = _a._ptr[(start + i*step)*_a._stride].size(); + } + } + + return f; +} + +template +FixedArray +FixedVArray::SizeHelper::getitem_mask (const FixedArray& mask) const +{ + int len = mask.len(); + if (len != _a.len()) + { + throw std::invalid_argument("Dimensions of mask do not match array"); + } + + int count = 0; + for (Py_ssize_t i = 0; i < mask.len(); ++i) + { + if (mask[i]) count += 1; + } + + FixedArray f(count); + + if (_a._indices) + { + size_t index = 0; + for (Py_ssize_t i = 0; i < mask.len(); ++i) + { + if (mask[i]) + { + f.direct_index(index) = _a._ptr[_a.raw_ptr_index(i)*_a._stride].size(); + index += 1; + } + } + } + else + { + size_t index = 0; + for (Py_ssize_t i = 0; i < mask.len(); ++i) + { + if (mask[i]) + { + f.direct_index(index) = _a._ptr[i*_a._stride].size(); + index += 1; + } + } + } + + return f; +} + + +template +void +FixedVArray::SizeHelper::setitem_scalar (PyObject* index, size_t size) +{ + if (!_a.writable()) + throw std::invalid_argument ("Fixed V-array is read-only."); + + size_t start = 0; + size_t end = 0; + size_t sliceLength = 0; + Py_ssize_t step; + extract_slice_indices (index, start, end, step, sliceLength, _a._length); + + if (_a._indices) + { + for (size_t i = 0; i < sliceLength; ++i) + { + _a._ptr[_a.raw_ptr_index(start + i*step)*_a._stride].resize(size); + } + } + else + { + for (size_t i = 0; i < sliceLength; ++i) + { + _a._ptr[(start + i*step)*_a._stride].resize(size); + } + } +} + +template +void +FixedVArray::SizeHelper::setitem_scalar_mask (const FixedArray& mask, size_t size) +{ + if (!_a.writable()) + throw std::invalid_argument ("Fixed V-array is read-only."); + + size_t len = _a.match_dimension(mask, false); + + if (_a._indices) + { + for (size_t i = 0; i < len; ++i) + { + // We don't need to actually look at 'mask' because + // match_dimensions has already forced some expected condition. + _a._ptr[_a.raw_ptr_index(i)*_a._stride].resize(size); + } + } + else + { + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + _a._ptr[i*_a._stride].resize(size); + } + } + } +} + +template +void +FixedVArray::SizeHelper::setitem_vector(PyObject *index, const FixedArray &size) +{ + if (!_a.writable()) + throw std::invalid_argument ("Fixed V-array is read-only."); + + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength,_a._length); + + // we have a valid range of indices + if (size.len() != static_cast(slicelength)) { + PyErr_SetString(PyExc_IndexError, "Dimensions of source do not match destination"); + boost::python::throw_error_already_set(); + } + + if (_a._indices) + { + for (size_t i=0; i +void +FixedVArray::SizeHelper::setitem_vector_mask(const FixedArray &mask, const FixedArray &size) +{ + if (!_a.writable()) + throw std::invalid_argument ("Fixed V-array is read-only."); + + // We could relax this but this restriction if there's a good + // enough reason too. + + if (_a._indices) + { + throw std::invalid_argument("We don't support setting item masks for masked reference arrays."); + } + + Py_ssize_t len = _a.match_dimension(mask); + if (size.len() == len) + { + for (Py_ssize_t i = 0; i < len; ++i) + if (mask[i]) _a._ptr[i*_a._stride].resize(size[i]); + } + else + { + Py_ssize_t count = 0; + for (Py_ssize_t i = 0; i < len; ++i) + if (mask[i]) count++; + + if (size.len() != count) { + throw std::invalid_argument("Dimensions of source data do not match destination either masked or unmasked"); + } + + Py_ssize_t sizeIndex = 0; + for (Py_ssize_t i = 0; i < len; ++i) + { + if (mask[i]) + { + _a._ptr[i*_a._stride].resize(size[sizeIndex]); + sizeIndex++; + } + } + } +} + +template +size_t +FixedVArray::raw_ptr_index (size_t i) const +{ + assert (isMaskedReference()); + assert (i < _length); + assert (_indices[i] >= 0 && _indices[i] < _unmaskedLength); + + return _indices[i]; +} + + +template +boost::shared_ptr::SizeHelper> +FixedVArray::getSizeHelper() +{ + return boost::shared_ptr(new typename FixedVArray::SizeHelper (*this)); +} + +// static +template +boost::python::class_ > +FixedVArray::register_(const char* doc) +{ + boost::python::class_ > fixedVArray_class (name(), doc, + boost::python::init("Construct a variable array of the " + "specified length initialized to the default value for the given type")); + + fixedVArray_class + .def(boost::python::init &>("Construct a variable array with the same values as the given array")) + .def(boost::python::init("Construct a variable array of the specified length initialized to the specified default value")) + .def(boost::python::init &, const T &>("Construct a variable array with each array size equal to the specified elements and initialized to the specified default value")) + .def("__getitem__", &FixedVArray::getslice) + .def("__getitem__", &FixedVArray::getslice_mask) + .def("__getitem__", &FixedVArray::getitem, boost::python::with_custodian_and_ward_postcall<1,0>()) + + .def("__setitem__", &FixedVArray::setitem_scalar) + .def("__setitem__", &FixedVArray::setitem_scalar_mask) + .def("__setitem__", &FixedVArray::setitem_vector) + .def("__setitem__", &FixedVArray::setitem_vector_mask) + + .def("__len__", &FixedVArray::len) + .def("writable", &FixedVArray::writable) + .def("makeReadOnly",&FixedVArray::makeReadOnly) + + .add_property("size", boost::python::make_function(&FixedVArray::getSizeHelper, boost::python::with_custodian_and_ward_postcall<0,1>())) + ; + + { + boost::python::scope s(fixedVArray_class); + + boost::python::class_::SizeHelper,boost::noncopyable> sizeHelper_class ("SizeHelper", boost::python::no_init); + sizeHelper_class + .def("__getitem__", &FixedVArray::SizeHelper::getitem) + .def("__getitem__", &FixedVArray::SizeHelper::getitem_mask) + .def("__getitem__", &FixedVArray::SizeHelper::getitem_slice) + + .def("__setitem__", &FixedVArray::SizeHelper::setitem_scalar) + .def("__setitem__", &FixedVArray::SizeHelper::setitem_scalar_mask) + .def("__setitem__", &FixedVArray::SizeHelper::setitem_vector) + .def("__setitem__", &FixedVArray::SizeHelper::setitem_vector_mask) + ; + } + + boost::python::register_ptr_to_python::SizeHelper> >(); + + return fixedVArray_class; +} + + +// ---- Explicit Class Instantiation --------------------------------- + +template class PYIMATH_EXPORT FixedVArray; +template class PYIMATH_EXPORT FixedVArray; +template class PYIMATH_EXPORT FixedVArray >; +template class PYIMATH_EXPORT FixedVArray >; + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathFrustum.cpp b/Sources/MetaPy/PyImath/PyImathFrustum.cpp new file mode 100644 index 00000000..7b46a80a --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathFrustum.cpp @@ -0,0 +1,499 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathVec.h" +#include "PyImathFrustum.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" + +namespace PyImath{ +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct FrustumName {static const char *value;}; +template <> const char *FrustumName::value = "Frustumf"; +template <> const char *FrustumName::value = "Frustumd"; + +template struct FrustumTestName {static const char *value;}; +template <> const char *FrustumTestName::value = "FrustumTestf"; +template <> const char *FrustumTestName::value = "FrustumTestd"; + + +template +static std::string Frustum_repr(const Frustum &f) +{ + std::stringstream stream; + stream << FrustumName::value << "(" << f.nearPlane() << ", " << f.farPlane() << ", " + << f.left() << ", " << f.right() << ", " << f.top() << ", " + << f.bottom() << ", " << f.orthographic() << ")"; + return stream.str(); +} + +template +static void +modifyNearAndFar(Frustum &f, T nearPlane, T farPlane) +{ + MATH_EXC_ON; + f.modifyNearAndFar (nearPlane, farPlane); +} + +template +static T +fovx(Frustum &f) +{ + MATH_EXC_ON; + return f.fovx(); +} + +template +static T +fovy(Frustum &f) +{ + MATH_EXC_ON; + return f.fovy(); +} + +template +static T +aspect(Frustum &f) +{ + MATH_EXC_ON; + return f.aspect(); +} + +template +static Matrix44 +projectionMatrix(Frustum &f) +{ + MATH_EXC_ON; + return f.projectionMatrix(); +} + +template +static Frustum +window (Frustum &f, T l, T r, T b, T t) +{ + MATH_EXC_ON; + return f.window(l, r, b, t); +} + +template +static Line3 +projectScreenToRay (Frustum &f, const Vec2 &p) +{ + MATH_EXC_ON; + return f.projectScreenToRay(p); +} + +template +static Line3 +projectScreenToRayTuple(Frustum &f, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 point; + point.x = extract(t[0]); + point.y = extract(t[1]); + return f.projectScreenToRay(point); + } + else + throw std::invalid_argument ( "projectScreenToRay expects tuple of length 2"); + +} + +// dead code? +template +static Vec2 +projectPointToScreen (Frustum &f, const Vec3 &p) +{ + MATH_EXC_ON; + return f.projectPointToScreen(p); +} + +// dead code? +template +static Vec2 +projectPointToScreenTuple(Frustum &f, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 point; + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + return f.projectPointToScreen(point); + } + else + throw std::invalid_argument ( "projectPointToScreen expects tuple of length 3"); + +} + +template +static Vec2 +projectPointToScreenObj(Frustum &f, const object &o) +{ + MATH_EXC_ON; + Vec3 v; + if (PyImath::V3::convert (o.ptr(), &v)) + return f.projectPointToScreen(v); + else + throw std::invalid_argument ( "projectPointToScreen expects tuple of length 3"); +} + +template +static T +ZToDepth(Frustum &f, long z, long min, long max) +{ + MATH_EXC_ON; + return f.ZToDepth(z, min, max); +} + +template +static T +normalizedZToDepth(Frustum &f, T z) +{ + MATH_EXC_ON; + return f.normalizedZToDepth(z); +} + +template +static long +DepthToZ(Frustum &f, T depth, long min, long max) +{ + MATH_EXC_ON; + return f.DepthToZ(depth, min, max); +} + +template +static T +worldRadius(Frustum &f, const Vec3 &p, T radius) +{ + MATH_EXC_ON; + return f.worldRadius(p, radius); +} + +template +static T +worldRadiusTuple(Frustum &f, const tuple &t, T radius) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 point; + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + return f.worldRadius(point, radius); + } + else + throw std::invalid_argument ( "worldRadius expects tuple of length 3"); +} + +template +static T +screenRadius(Frustum &f, const Vec3 &p, T radius) +{ + MATH_EXC_ON; + return f.screenRadius(p, radius); +} + +template +static T +screenRadiusTuple(Frustum &f, const tuple &t, T radius) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 point; + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + return f.screenRadius(point, radius); + } + else + throw std::invalid_argument ("screenRadius expects tuple of length 3"); +} + +// dead code? +template +static void +planes1(Frustum &f, Plane3 *p) +{ + MATH_EXC_ON; + f.planes(p); +} + +// dead code? +template +static void +planes2(Frustum &f, Plane3 *p, const Matrix44 &m) +{ + MATH_EXC_ON; + f.planes(p, m); +} + +template +static tuple +planes3(Frustum &f, const Matrix44 &mat) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::Plane3 p[6]; + f.planes(p,mat); + + tuple t = make_tuple(p[0],p[1],p[2],p[3],p[4],p[5]); + + return t; +} + +template +static tuple +planes4(Frustum &f) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::Plane3 p[6]; + f.planes(p); + + tuple t = make_tuple(p[0],p[1],p[2],p[3],p[4],p[5]); + + return t; +} + +template +class_ > +register_Frustum() +{ + void (IMATH_NAMESPACE::Frustum::*set1)(T,T,T,T,T,T,bool) = &IMATH_NAMESPACE::Frustum::set; + void (IMATH_NAMESPACE::Frustum::*set2)(T,T,T,T,T) = &IMATH_NAMESPACE::Frustum::set; + const char *name = FrustumName::value; + + class_< Frustum > frustum_class(name,name,init >("copy construction")); + frustum_class + .def(init<>("Frustum() default construction")) + .def(init("Frustum(nearPlane,farPlane,left,right,top,bottom,ortho) construction")) + .def(init("Frustum(nearPlane,farPlane,fovx,fovy,aspect) construction")) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__repr__",&Frustum_repr) + .def("set", set1, + "F.set(nearPlane, farPlane, left, right, top, bottom, " + "[ortho])\n" + "F.set(nearPlane, farPlane, fovx, fovy, aspect) " + " -- sets the entire state of " + "frustum F as specified. Only one of " + "fovx or fovy may be non-zero.") + .def("set", set2) + + .def("modifyNearAndFar", &modifyNearAndFar, + "F.modifyNearAndFar(nearPlane, farPlane) -- modifies " + "the already-valid frustum F as specified") + + .def("setOrthographic", &Frustum::setOrthographic, + "F.setOrthographic(b) -- modifies the " + "already-valid frustum F to be orthographic " + "or not") + + .def("nearPlane", &Frustum::nearPlane, + "F.nearPlane() -- returns the coordinate of the " + "near clipping plane of frustum F") + + .def("farPlane", &Frustum::farPlane, + "F.farPlane() -- returns the coordinate of the " + "far clipping plane of frustum F") + + // The following two functions provide backwards compatibility + // with the previous API for this class. + + .def("near", &Frustum::nearPlane, + "F.near() -- returns the coordinate of the " + "near clipping plane of frustum F") + + .def("far", &Frustum::farPlane, + "F.far() -- returns the coordinate of the " + "far clipping plane of frustum F") + + .def("left", &Frustum::left, + "F.left() -- returns the left coordinate of " + "the near clipping window of frustum F") + + .def("right", &Frustum::right, + "F.right() -- returns the right coordinate of " + "the near clipping window of frustum F") + + .def("top", &Frustum::top, + "F.top() -- returns the top coordinate of " + "the near clipping window of frustum F") + + .def("bottom", &Frustum::bottom, + "F.bottom() -- returns the bottom coordinate " + "of the near clipping window of frustum F") + + .def("orthographic", &Frustum::orthographic, + "F.orthographic() -- returns whether frustum " + "F is orthographic or not") + + .def("planes", planes1, + "F.planes([M]) -- returns a sequence of 6 " + "Plane3s, the sides of the frustum F " + "(top, right, bottom, left, nearPlane, farPlane), " + "optionally transformed by the matrix M " + "if specified") + .def("planes", planes2) + .def("planes", planes3) + .def("planes", planes4) + + .def("fovx", &fovx, + "F.fovx() -- derives and returns the " + "x field of view (in radians) for frustum F") + + .def("fovy", &fovy, + "F.fovy() -- derives and returns the " + "y field of view (in radians) for frustum F") + + .def("aspect", &aspect, + "F.aspect() -- derives and returns the " + "aspect ratio for frustum F") + + .def("projectionMatrix", &projectionMatrix, + "F.projectionMatrix() -- derives and returns " + "the projection matrix for frustum F") + + .def("window", &window, + "F.window(l,r,b,t) -- takes a rectangle in " + "the screen space (i.e., -1 <= l <= r <= 1, " + "-1 <= b <= t <= 1) of F and returns a new " + "Frustum whose near clipping-plane window " + "is that rectangle in local space") + + .def("projectScreenToRay", &projectScreenToRay, + "F.projectScreenToRay(V) -- returns a Line3 " + "through V, a V2 point in screen space") + + .def("projectScreenToRay", &projectScreenToRayTuple) + + .def("projectPointToScreen", &projectPointToScreen, + "F.projectPointToScreen(V) -- returns the " + "projection of V3 V into screen space") + + .def("projectPointToScreen", &projectPointToScreenTuple) + + .def("projectPointToScreen", &projectPointToScreenObj) + + .def("ZToDepth", &ZToDepth, + "F.ZToDepth(z, zMin, zMax) -- returns the " + "depth (Z in the local space of the " + "frustum F) corresponding to z (a result of " + "transformation by F's projection matrix) " + "after normalizing z to be between zMin " + "and zMax") + + .def("normalizedZToDepth", &normalizedZToDepth, + "F.normalizedZToDepth(z) -- returns the " + "depth (Z in the local space of the " + "frustum F) corresponding to z (a result of " + "transformation by F's projection matrix), " + "which is assumed to have been normalized " + "to [-1, 1]") + + .def("DepthToZ", &DepthToZ, + "F.DepthToZ(depth, zMin, zMax) -- converts " + "depth (Z in the local space of the frustum " + "F) to z (a result of transformation by F's " + "projection matrix) which is normalized to " + "[zMin, zMax]") + + .def("worldRadius", &worldRadius, + "F.worldRadius(V, r) -- returns the radius " + "in F's local space corresponding to the " + "point V and radius r in screen space") + + .def("worldRadius", &worldRadiusTuple) + + .def("screenRadius", &screenRadius, + "F.screenRadius(V, r) -- returns the radius " + "in screen space corresponding to " + "the point V and radius r in F's local " + "space") + + .def("screenRadius", &screenRadiusTuple) + + ; + + decoratecopy(frustum_class); + + return frustum_class; +} + +template +struct IsVisibleTask : public Task +{ + const IMATH_NAMESPACE::FrustumTest& frustumTest; + const PyImath::FixedArray& points; + PyImath::FixedArray& results; + + IsVisibleTask(const IMATH_NAMESPACE::FrustumTest& ft, const PyImath::FixedArray &p, PyImath::FixedArray &r) + : frustumTest(ft), points(p), results(r) {} + + void execute(size_t start, size_t end) + { + for(size_t p = start; p < end; ++p) + results[p] = frustumTest.isVisible(IMATH_NAMESPACE::Vec3(points[p])); + } +}; + +template +PyImath::FixedArray +frustumTest_isVisible(IMATH_NAMESPACE::FrustumTest& ft, const PyImath::FixedArray& points) +{ + size_t numPoints = points.len(); + PyImath::FixedArray mask(numPoints); + + IsVisibleTask task(ft,points,mask); + dispatchTask(task,numPoints); + return mask; +} + +template +class_ > +register_FrustumTest() +{ + const char *name = FrustumTestName::value; + + // dead code? no wrapping for Sphere3 + bool (FrustumTest::*isVisibleS)(const Sphere3 &) const = &FrustumTest::isVisible; + bool (FrustumTest::*isVisibleB)(const Box > &) const = &FrustumTest::isVisible; + bool (FrustumTest::*isVisibleV)(const Vec3 &) const = &FrustumTest::isVisible; + bool (FrustumTest::*completelyContainsS)(const Sphere3 &) const = &FrustumTest::completelyContains; + bool (FrustumTest::*completelyContainsB)(const Box > &) const = &FrustumTest::completelyContains; + + class_< FrustumTest > frustumtest_class(name,name,init&,const IMATH_NAMESPACE::Matrix44&>("create a frustum test object from a frustum and transform")); + frustumtest_class + .def("isVisible",isVisibleS) + .def("isVisible",isVisibleB) + .def("isVisible",isVisibleV) + .def("isVisible",&frustumTest_isVisible) + .def("completelyContains",completelyContainsS) + .def("completelyContains",completelyContainsB) + ; + + decoratecopy(frustumtest_class); + + return frustumtest_class; +} + +template PYIMATH_EXPORT class_ > register_Frustum(); +template PYIMATH_EXPORT class_ > register_Frustum(); +template PYIMATH_EXPORT class_ > register_FrustumTest(); +template PYIMATH_EXPORT class_ > register_FrustumTest(); +} diff --git a/Sources/MetaPy/PyImath/PyImathFun.cpp b/Sources/MetaPy/PyImath/PyImathFun.cpp new file mode 100644 index 00000000..f9ba0ca7 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathFun.cpp @@ -0,0 +1,242 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImathFun.h" +#include "PyImathFunOperators.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" +#include "PyImathAutovectorize.h" + +namespace PyImath { + +using namespace boost::python; + +namespace +{ + +struct RegisterFloatDoubleOps +{ + template + void operator() (T) + { + // nb: MSVC gets confused about which arg we want (it thinks it + // might be boost::arg), so telling it which one explicitly here. + typedef boost::python::arg arg; + + generate_bindings,boost::mpl::true_>( + "abs", + "return the absolute value of 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "sign", + "return 1 or -1 based on the sign of 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "log", + "return the natural log of 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "log10", + "return the base 10 log of 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_,boost::mpl::true_,boost::mpl::true_>( + "lerp", + "return the linear interpolation of 'a' to 'b' using parameter 't'", + (arg("a"),arg("b"),arg("t"))); + + generate_bindings,boost::mpl::true_,boost::mpl::true_,boost::mpl::true_>( + "lerpfactor", + "return how far m is between a and b, that is return t such that\n" + "if:\n" + " t = lerpfactor(m, a, b);\n" + "then:\n" + " m = lerp(a, b, t);\n" + "\n" + "If a==b, return 0.\n", + (arg("m"),arg("a"),arg("b"))); + + generate_bindings,boost::mpl::true_,boost::mpl::true_,boost::mpl::true_>( + "clamp", + "return the value clamped to the range [low,high]", + (arg("value"),arg("low"),arg("high"))); + + generate_bindings,boost::mpl::true_>( + "floor", + "return the closest integer less than or equal to 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "ceil", + "return the closest integer greater than or equal to 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "trunc", + "return the closest integer with magnitude less than or equal to 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "rgb2hsv", + "return the hsv version of an rgb color", + args("rgb")); + + generate_bindings,boost::mpl::true_>( + "hsv2rgb", + "return the rgb version of an hsv color", + args("hsv")); + + generate_bindings,boost::mpl::true_>( + "sin", + "return the sine of the angle theta", + args("theta")); + + generate_bindings,boost::mpl::true_>( + "cos", + "return the cosine of the angle theta", + args("theta")); + + generate_bindings,boost::mpl::true_>( + "tan", + "return the tangent of the angle theta", + args("theta")); + + generate_bindings,boost::mpl::true_>( + "asin", + "return the arcsine of the value x", + args("x")); + + generate_bindings,boost::mpl::true_>( + "acos", + "return the arccosine of the value x", + args("x")); + + generate_bindings,boost::mpl::true_>( + "atan", + "return the arctangent of the value x", + args("x")); + + generate_bindings,boost::mpl::true_,boost::mpl::true_>( + "atan2", + "return the arctangent of the coordinate x,y - note the y " + "is the first argument for consistency with libm ordering", + args("y","x")); + + generate_bindings,boost::mpl::true_>( + "sqrt", + "return the square root of x", + args("x")); + + generate_bindings,boost::mpl::true_,boost::mpl::true_>( + "pow", + "return x**y", + args("x","y")); + + generate_bindings,boost::mpl::true_>( + "exp", + "return exp(x)", + args("x")); + + generate_bindings,boost::mpl::true_>( + "sinh", + "return sinh(x)", + args("x")); + + generate_bindings,boost::mpl::true_>( + "cosh", + "return cosh(x)", + args("x")); + + def("cmp", IMATH_NAMESPACE::cmp); + def("cmpt", IMATH_NAMESPACE::cmpt); + def("iszero", IMATH_NAMESPACE::iszero); + def("equal", IMATH_NAMESPACE::equal); + } +}; + +} // namespace + +void register_functions() +{ + // + // Utility Functions + // + + // nb: MSVC gets confused about which arg we want (it thinks it + // might be boost::arg), so telling it which one explicitly here. + typedef boost::python::arg arg; + + using fp_types = boost::mpl::vector; + boost::mpl::for_each(RegisterFloatDoubleOps()); + + generate_bindings,boost::mpl::true_>( + "abs", + "return the absolute value of 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_>( + "sign", + "return 1 or -1 based on the sign of 'value'", + (arg("value"))); + + generate_bindings,boost::mpl::true_,boost::mpl::true_,boost::mpl::true_>( + "clamp", + "return the value clamped to the range [low,high]", + (arg("value"),arg("low"),arg("high"))); + + generate_bindings( + "divs", + "return x/y where the remainder has the same sign as x:\n" + " divs(x,y) == (abs(x) / abs(y)) * (sign(x) * sign(y))\n", + (arg("x"),arg("y"))); + generate_bindings( + "mods", + "return x%y where the remainder has the same sign as x:\n" + " mods(x,y) == x - y * divs(x,y)\n", + (arg("x"),arg("y"))); + + generate_bindings( + "divp", + "return x/y where the remainder is always positive:\n" + " divp(x,y) == floor (double(x) / double (y))\n", + (arg("x"),arg("y"))); + generate_bindings( + "modp", + "return x%y where the remainder is always positive:\n" + " modp(x,y) == x - y * divp(x,y)\n", + (arg("x"),arg("y"))); + + generate_bindings( + "bias", + "bias(x,b) is a gamma correction that remaps the unit interval such that bias(0.5, b) = b.", + (arg("x"),arg("b"))); + + generate_bindings( + "gain", + "gain(x,g) is a gamma correction that remaps the unit interval with the property that gain(0.5, g) = 0.5.\n" + "The gain function can be thought of as two scaled bias curves forming an 'S' shape in the unit interval.", + (arg("x"),arg("g"))); + + // + // Vectorized utility functions + // + generate_bindings,boost::mpl::true_,boost::mpl::true_,boost::mpl::true_>( + "rotationXYZWithUpDir", + "return the XYZ rotation vector that rotates 'fromDir' to 'toDir'" + "using the up vector 'upDir'", + args("fromDir","toDir","upDir")); +} + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathLine.cpp b/Sources/MetaPy/PyImath/PyImathLine.cpp new file mode 100644 index 00000000..16a23130 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathLine.cpp @@ -0,0 +1,558 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include "PyImathLine.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" +#include "PyImath.h" +#include "PyImathVec.h" +#include "PyImathMathExc.h" + + +namespace PyImath{ +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct LineName {static const char *value;}; +template <> const char *LineName::value = "Line3f"; +template <> const char *LineName::value = "Line3d"; + +template +static Line3 * +Line3_construct_default() +{ + Vec3 point1(T (0), T(0), T(0)); + Vec3 point2(T (1), T(0), T(0)); + + return new Line3(point1, point2); +} + +template +static Line3 * +Line3_tuple_construct(const tuple &t0, const tuple &t1) +{ + Vec3 v0, v1; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3) + { + v0.x = extract(t0[0]); + v0.y = extract(t0[1]); + v0.z = extract(t0[2]); + + v1.x = extract(t1[0]); + v1.y = extract(t1[1]); + v1.z = extract(t1[2]); + + return new Line3(v0, v1); + } + else + throw std::invalid_argument ("Line3 expects tuple of length 3"); +} + +template +static Line3 * +Line3_line_construct(const Line3 &line) +{ + Line3 *l = new Line3; + l->pos = line.pos; + l->dir = line.dir; + + return l; +} + +template +static void +set1(Line3 &line, const Vec3 &p0, const Vec3 &p1) +{ + MATH_EXC_ON; + line.set (p0, p1); +} + +template +static void +setTuple(Line3 &line, const tuple &t0, const tuple &t1) +{ + MATH_EXC_ON; + Vec3 v0, v1; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3) + { + v0.x = extract(t0[0]); + v0.y = extract(t0[1]); + v0.z = extract(t0[2]); + + v1.x = extract(t1[0]); + v1.y = extract(t1[1]); + v1.z = extract(t1[2]); + + line.set(v0, v1); + } + else + throw std::invalid_argument ("Line3 expects tuple of length 3"); +} + +template +static Vec3 +pointAt(Line3 &line, T t) +{ + MATH_EXC_ON; + return line.operator()(t); +} + +template +static T +distanceTo1(Line3 &line, Vec3 &p) +{ + MATH_EXC_ON; + return line.distanceTo(p); +} + +template +static T +distanceTo2(Line3 &line, Line3 &other) +{ + MATH_EXC_ON; + return line.distanceTo(other); +} + +template +static T +distanceToTuple(Line3 line, const tuple &t) +{ + Vec3 v; + if(t.attr("__len__")() == 3) + { + v.x = extract(t[0]); + v.y = extract(t[1]); + v.z = extract(t[2]); + + return line.distanceTo(v); + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static Vec3 +closestPointTo1(Line3 line, const Vec3 &p) +{ + MATH_EXC_ON; + return line.closestPointTo(p); +} + +template +static Vec3 +closestPointTo2(Line3 line, const Line3 &other) +{ + MATH_EXC_ON; + return line.closestPointTo(other); +} + +template +static Vec3 +closestPointToTuple(Line3 line, const tuple &t) +{ + MATH_EXC_ON; + Vec3 v; + if(t.attr("__len__")() == 3) + { + v.x = extract(t[0]); + v.y = extract(t[1]); + v.z = extract(t[2]); + + return line.closestPointTo(v); + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static Vec3 +getPosition(Line3 &line) +{ + return line.pos; +} + +template +static void +setPosition(Line3 &line, const Vec3 &pos) +{ + line.pos = pos; +} + +template +static void +setPositionTuple(Line3 &line, const tuple &t) +{ + Vec3 pos; + if(t.attr("__len__")() == 3) + { + pos.x = extract(t[0]); + pos.y = extract(t[1]); + pos.z = extract(t[2]); + + line.pos = pos; + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static Vec3 +getDirection(Line3 &line) +{ + return line.dir; +} + +template +static void +setDirection(Line3 &line, const Vec3 &dir) +{ + MATH_EXC_ON; + line.dir = dir.normalized(); +} + +template +static void +setDirectionTuple(Line3 &line, const tuple &t) +{ + MATH_EXC_ON; + Vec3 dir; + if(t.attr("__len__")() == 3) + { + dir.x = extract(t[0]); + dir.y = extract(t[1]); + dir.z = extract(t[2]); + + line.dir = dir.normalized(); + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static void +closestPoints1(Line3 &line1, const Line3 &line2, Vec3 &p0, Vec3 &p1) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::closestPoints(line1, line2, p0, p1); +} + +template +static tuple +closestPoints2(Line3 &line1, const Line3 &line2) +{ + MATH_EXC_ON; + Vec3 p0, p1; + IMATH_NAMESPACE::closestPoints(line1, line2, p0, p1); + tuple p0Tuple = make_tuple(p0.x,p0.y,p0.z); + tuple p1Tuple = make_tuple(p1.x,p1.y,p1.z); + +#if !defined(_MSC_VER) || (_MSC_VER <= 1200) + tuple t = make_tuple(p0Tuple, p1Tuple); + return t; +#else + list v3; + v3.append(p0Tuple); + v3.append(p1Tuple); + return tuple(v3); +#endif +} + +template +static Vec3 +closestVertex(Line3 &line, const Vec3 &v0, const Vec3 &v1, const Vec3 &v2) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::closestVertex(v0, v1, v2, line); +} + +template +static Vec3 +closestVertexTuple(Line3 &line, const tuple &t0, const tuple &t1, const tuple &t2) +{ + MATH_EXC_ON; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3 && t2.attr("__len__")() == 3) + { + Vec3 v0, v1, v2; + v0.x = extract(t0[0]); + v0.y = extract(t0[1]); + v0.z = extract(t0[2]); + + v1.x = extract(t1[0]); + v1.y = extract(t1[1]); + v1.z = extract(t1[2]); + + v2.x = extract(t2[0]); + v2.y = extract(t2[1]); + v2.z = extract(t2[2]); + + return IMATH_NAMESPACE::closestVertex(v0, v1, v2, line); + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static bool +intersect1(Line3 &line, const Vec3 &v0, const Vec3 &v1, const Vec3 &v2, + Vec3 &pt, Vec3 &barycentric, bool &front) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::intersect(line, v0, v1, v2, pt, barycentric, front); +} + +template +static object +intersect2(Line3 &line, const Vec3 &v0, const Vec3 &v1, const Vec3 &v2) +{ + MATH_EXC_ON; + Vec3 pt, bar; + bool front; + + if(IMATH_NAMESPACE::intersect(line, v0, v1, v2, pt, bar, front)) + { + tuple t = make_tuple(pt, bar, front); + return t; + } + else + { + return object(); + } +} + +template +static tuple +intersectTuple(Line3 &line, const tuple &t0, const tuple &t1, const tuple &t2) +{ + + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3 && t2.attr("__len__")() == 3) + { + Vec3 v0, v1, v2, pt, bar; + bool front; + v0.x = extract(t0[0]); + v0.y = extract(t0[1]); + v0.z = extract(t0[2]); + + v1.x = extract(t1[0]); + v1.y = extract(t1[1]); + v1.z = extract(t1[2]); + + v2.x = extract(t2[0]); + v2.y = extract(t2[1]); + v2.z = extract(t2[2]); + + if(IMATH_NAMESPACE::intersect(line, v0, v1, v2, pt, bar, front)) + { + tuple t = make_tuple(pt, bar, front); + return t; + } + else + { + tuple t; + return t; + } + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static Vec3 +rotatePoint(Line3 &line, const Vec3 &p, const T &r) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::rotatePoint(p, line, r); +} + +template +static Vec3 +rotatePointTuple(Line3 &line, const tuple &t, const T &r) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 p; + p.x = extract(t[0]); + p.y = extract(t[1]); + p.z = extract(t[2]); + + return IMATH_NAMESPACE::rotatePoint(p, line, r); + } + else + throw std::invalid_argument ( "Line3 expects tuple of length 3"); +} + +template +static std::string Line3_repr(const Line3 &v) +{ + typename return_by_value::apply >::type converter; + + Vec3 v1 = v.pos; + Vec3 v2 = v.pos + v.dir; + + handle<> v1h (converter (v.pos)); + handle<> v1Repr (PYUTIL_OBJECT_REPR (v1h.get())); + std::string v1ReprStr = extract (v1Repr.get()); + + handle<> v2h (converter (v.pos + v.dir)); + handle<> v2Repr (PYUTIL_OBJECT_REPR (v2h.get())); + std::string v2ReprStr = extract (v2Repr.get()); + + std::stringstream stream; + stream << LineName::value << "(" << v1ReprStr << ", " << v2ReprStr << ")"; + return stream.str(); +} + +template +static bool +equal(const Line3 &l1, const Line3 &l2) +{ + if(l1.pos == l2.pos && l1.dir == l2.dir) + return true; + else + return false; +} + +template +static bool +notequal(const Line3 &l1, const Line3 &l2) +{ + if(l1.pos != l2.pos || l1.dir != l2.dir) + return true; + else + return false; +} + +template +class_ > +register_Line() +{ + const char *name = LineName::value; + + class_ > line_class(name); + line_class + .def("__init__", make_constructor(Line3_construct_default), "initialize point to (0,0,0) and direction to (1,0,0)") + .def("__init__", make_constructor(Line3_tuple_construct)) + .def("__init__", make_constructor(Line3_line_construct)) + .def("__init__", make_constructor(Line3_line_construct)) + .def(init &, const Vec3 &>("Line3(point1, point2) construction")) + .def(init &, const Vec3 &>("Line3(point1, point2) construction")) + .def(self * Matrix44()) + .def("__eq__", &equal) + .def("__ne__", ¬equal) + + .def_readwrite("pos", &Line3::pos) + .def_readwrite("dir", &Line3::dir) + + .def("pos", &getPosition, + "l.pos() -- returns the start point of line l") + + .def("dir", &getDirection, + "l.dir() -- returns the direction of line l\n") + + .def("setPos", &setPosition, + "l.setPos(p) -- sets the start point of line l to p") + .def("setPos", &setPositionTuple) + + .def("setDir", &setDirection, + "l.setDir(d) -- sets the direction of line l\n" + "to d.normalized().\n") + .def("setDir", &setDirectionTuple) + + .def("set", &set1, + "l.set(p1, p2) -- sets the start point\n" + "and direction of line l by calling\n" + " l.setPos (p1)\n" + " l.setDir (p2 - p1)\n") + + .def("set", &setTuple) + + .def("pointAt", &pointAt, + "l.pointAt(t) -- returns l.pos() + t * l.dir()") + + .def("distanceTo", &distanceTo1, + "l.distanceTo(p) -- returns the distance from\n" + " line l to point p\n") + + .def("distanceTo", &distanceTo2, + "l1.distanceTo(l2) -- returns the distance from\n" + " line l1 to line l2\n") + + .def("distanceTo", &distanceToTuple) + + .def("closestPointTo", &closestPointTo1, + "l.closestPointTo(p) -- returns the point on\n" + " line l that is closest to point p\n" + "\n") + + .def("closestPointTo", &closestPointToTuple) + .def("closestPointTo", &closestPointTo2, + "l1.closestPointTo(l2) -- returns the point on\n" + " line l1 that is closest to line l2\n") + + .def("closestPoints", &closestPoints1, + "l1.closestPoints(l2,p0,p1)") + + .def("closestPoints", &closestPoints2, + "l1.closestPoints(l2) -- returns a tuple with\n" + "two points:\n" + " (l1.closestPoint(l2), l2.closestPoint(l1)\n") + + .def("closestTriangleVertex", &closestVertex, + "l.closestTriangleVertex(v0, v1, v2) -- returns\n" + "a copy of v0, v1, or v2, depending on which is\n" + "closest to line l.\n") + + .def("closestTriangleVertex", &closestVertexTuple) + .def("intersectWithTriangle", &intersect2) + .def("intersectWithTriangle", &intersect1, + "l.intersectWithTriangle(v0, v1, v2) -- computes the\n" + "intersection of line l and triangle (v0, v1, v2).\n" + "\n" + "If the line and the triangle do not intersect,\n" + "None is returned.\n" + "" + "If the line and the triangle intersect, a tuple\n" + "(p, b, f) is returned:\n" + "\n" + " p intersection point in 3D space\n" + "\n" + " b intersection point in barycentric coordinates\n" + "\n" + " f 1 if the line hits the triangle from the\n" + " front (((v2-v1) % (v1-v2)) ^ l.dir() < 0),\n" + " 0 if the line hits the trianble from the\n" + " back\n" + "\n") + .def("intersectWithTriangle", &intersectTuple) + + .def("rotatePoint", &rotatePoint, + "l.rotatePoint(p,r) -- rotates point p around\n" + "line by angle r (in radians), and returns the\n" + "result (p is not modified)\n") + + .def("rotatePoint", &rotatePointTuple) + .def("__repr__",&Line3_repr) + ; + + decoratecopy(line_class); + + return line_class; +} + +template PYIMATH_EXPORT class_ > register_Line(); +template PYIMATH_EXPORT class_ > register_Line(); + +} // namespace PyImath + + diff --git a/Sources/MetaPy/PyImath/PyImathMatrix22.cpp b/Sources/MetaPy/PyImath/PyImathMatrix22.cpp new file mode 100644 index 00000000..5ba6542e --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathMatrix22.cpp @@ -0,0 +1,735 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#define BOOST_PYTHON_MAX_ARITY 17 + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include +#include +#include "PyImath.h" +#include "PyImathVec.h" +#include "PyImathMathExc.h" +#include "PyImathMatrix.h" +#include "PyImathExport.h" +#include "PyImathDecorators.h" + +namespace PyImath { + +template<> const char *PyImath::M22fArray::name() { return "M22fArray"; } +template<> const char *PyImath::M22dArray::name() { return "M22dArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template +struct MatrixRow { + explicit MatrixRow(T *data) : _data(data) {} + T & operator [] (int i) { return _data[i]; } + T *_data; + + static const char *name; + static void register_class() + { + typedef PyImath::StaticFixedArray MatrixRow_helper; + class_ matrixRow_class(name,no_init); + matrixRow_class + .def("__len__", MatrixRow_helper::len) + .def("__getitem__", MatrixRow_helper::getitem,return_value_policy()) + .def("__setitem__", MatrixRow_helper::setitem) + ; + } +}; + +template <> const char *MatrixRow::name = "M22fRow"; +template <> const char *MatrixRow::name = "M22dRow"; + + +template +struct IndexAccessMatrixRow { + typedef MatrixRow result_type; + static MatrixRow apply(Container &c, int i) { return MatrixRow(c[i]); } +}; + +template struct Matrix22Name { static const char *value; }; +template<> const char *Matrix22Name::value = "M22f"; +template<> const char *Matrix22Name::value = "M22d"; + +template +static std::string Matrix22_str(const Matrix22 &v) +{ + std::stringstream stream; + stream << Matrix22Name::value << "("; + for (int row = 0; row < 2; row++) + { + stream << "("; + for (int col = 0; col < 2; col++) + { + stream << v[row][col]; + stream << (col != 1 ? ", " : ""); + } + stream << ")" << (row != 1 ? ", " : ""); + } + stream << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Matrix22_repr(const Matrix22 &v) +{ + return Matrix22_str(v); +} + +// Specialization for float to full precision +template <> +std::string Matrix22_repr(const Matrix22 &v) +{ + return (boost::format("%s((%.9g, %.9g), (%.9g, %.9g))") + % Matrix22Name::value + % v[0][0] % v[0][1] + % v[1][0] % v[1][1]).str(); +} + +// Specialization for double to full precision +template <> +std::string Matrix22_repr(const Matrix22 &v) +{ + return (boost::format("%s((%.17g, %.17g), (%.17g, %.17g))") + % Matrix22Name::value + % v[0][0] % v[0][1] + % v[1][0] % v[1][1]).str(); +} + +template +static const Matrix22 & +invert22 (Matrix22 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.invert(singExc); +} + +template +static Matrix22 +inverse22 (Matrix22 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.inverse(singExc); +} + +template +static const Matrix22 & +iadd22(Matrix22 &m, const Matrix22 &m2) +{ + MATH_EXC_ON; + Matrix22 m3; + m3.setValue (m2); + return m += m3; +} + +template +static const Matrix22 & +iadd22T(Matrix22 &mat, T a) +{ + MATH_EXC_ON; + return mat += a; +} + +template +static Matrix22 +add22(Matrix22 &m, const Matrix22 &m2) +{ + MATH_EXC_ON; + return m + m2; +} + +template +static const Matrix22 & +isub22(Matrix22 &m, const Matrix22 &m2) +{ + MATH_EXC_ON; + Matrix22 m3; + m3.setValue (m2); + return m -= m3; +} + +template +static const Matrix22 & +isub22T(Matrix22 &mat, T a) +{ + MATH_EXC_ON; + return mat -= a; +} + +template +static Matrix22 +sub22(Matrix22 &m, const Matrix22 &m2) +{ + MATH_EXC_ON; + return m - m2; +} + +template +static const Matrix22 & +negate22 (Matrix22 &m) +{ + MATH_EXC_ON; + return m.negate(); +} + +template +static Matrix22 +neg22 (Matrix22 &m) +{ + MATH_EXC_ON; + return -m; +} + +template +static const Matrix22 & +imul22T(Matrix22 &m, const T &t) +{ + MATH_EXC_ON; + return m *= t; +} + +template +static Matrix22 +mul22T(Matrix22 &m, const T &t) +{ + MATH_EXC_ON; + return m * t; +} + +template +static Matrix22 +rmul22T(Matrix22 &m, const T &t) +{ + MATH_EXC_ON; + return t * m; +} + +template +static const Matrix22 & +idiv22T(Matrix22 &m, const T &t) +{ + MATH_EXC_ON; + return m /= t; +} + +template +static Matrix22 +div22T(Matrix22 &m, const T &t) +{ + MATH_EXC_ON; + return m / t; +} + +template +void +outerProduct22(Matrix22 &mat, const Vec2 &a, const Vec2 &b) +{ + MATH_EXC_ON; + mat = IMATH_NAMESPACE::outerProduct(a,b); +} + +template +static void +multDirMatrix22(Matrix22 &mat, const Vec2 &src, Vec2 &dst) +{ + MATH_EXC_ON; + mat.multDirMatrix(src, dst); +} + +template +static Vec2 +multDirMatrix22_return_value(Matrix22 &mat, const Vec2 &src) +{ + MATH_EXC_ON; + Vec2 dst; + mat.multDirMatrix(src, dst); + return dst; +} + +template +static FixedArray > +multDirMatrix22_array(Matrix22 &mat, const FixedArray >&src) +{ + MATH_EXC_ON; + size_t len = src.len(); + FixedArray > dst(len); + for (size_t i=0; i +static const Matrix22 & +rotate22(Matrix22 &mat, const T &r) +{ + MATH_EXC_ON; + return mat.rotate(r); +} + +template +static void +extractEuler(Matrix22 &mat, Vec2 &dstObj) +{ + MATH_EXC_ON; + T dst; + IMATH_NAMESPACE::extractEuler(mat, dst); + dstObj.setValue(dst, T (0)); +} + +template +static const Matrix22 & +scaleSc22(Matrix22 &mat, const T &s) +{ + MATH_EXC_ON; + Vec2 sVec(s, s); + return mat.scale(sVec); +} + +template +static const Matrix22 & +scaleV22(Matrix22 &mat, const Vec2 &s) +{ + MATH_EXC_ON; + return mat.scale(s); +} + +template +static const Matrix22 & +scale22Tuple(Matrix22 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + + return mat.scale(s); + } + else + throw std::invalid_argument ( "m.scale needs tuple of length 2"); +} + +template +static const Matrix22 & +setRotation22(Matrix22 &mat, const T &r) +{ + MATH_EXC_ON; + return mat.setRotation(r); +} + +template +static const Matrix22 & +setScaleSc22(Matrix22 &mat, const T &s) +{ + MATH_EXC_ON; + return mat.setScale(s); +} + +template +static const Matrix22 & +setScaleV22(Matrix22 &mat, const Vec2 &s) +{ + MATH_EXC_ON; + return mat.setScale(s); +} + +template +static const Matrix22 & +setScale22Tuple(Matrix22 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + + return mat.setScale(s); + } + else + throw std::invalid_argument ( "m.setScale needs tuple of length 2"); +} + +template +static void +setValue22(Matrix22 &mat, const Matrix22 &value) +{ + MATH_EXC_ON; + mat.setValue(value); +} + +template +static Matrix22 +subtractTL22(Matrix22 &mat, T a) +{ + MATH_EXC_ON; + Matrix22 m(mat.x); + for(int i = 0; i < 2; ++i) + for(int j = 0; j < 2; ++j) + m.x[i][j] -= a; + + return m; +} + +template +static Matrix22 +subtractTR22(Matrix22 &mat, T a) +{ + MATH_EXC_ON; + Matrix22 m(mat.x); + for(int i = 0; i < 2; ++i) + for(int j = 0; j < 2; ++j) + m.x[i][j] = a - m.x[i][j]; + + return m; +} + + +template +static Matrix22 +add22T(Matrix22 &mat, T a) +{ + MATH_EXC_ON; + Matrix22 m(mat.x); + for(int i = 0; i < 2; ++i) + for(int j = 0; j < 2; ++j) + m.x[i][j] += a; + + return m; +} + +template +static Matrix22 +mul22(Matrix22 &mat1, Matrix22 &mat2) +{ + MATH_EXC_ON; + Matrix22 mat2T; + mat2T.setValue (mat2); + return mat1 * mat2T; +} + +template +static Matrix22 +rmul22(Matrix22 &mat2, Matrix22 &mat1) +{ + MATH_EXC_ON; + Matrix22 mat1T; + mat1T.setValue (mat1); + return mat1T * mat2; +} + +template +static const Matrix22 & +imul22(Matrix22 &mat1, Matrix22 &mat2) +{ + MATH_EXC_ON; + Matrix22 mat2T; + mat2T.setValue (mat2); + return mat1 *= mat2T; +} + +template +static bool +lessThan22(Matrix22 &mat1, const Matrix22 &mat2) +{ + for(int i = 0; i < 2; ++i){ + for(int j = 0; j < 2; ++j){ + if(mat1[i][j] > mat2[i][j]){ + return false; + } + } + } + + return (mat1 != mat2); +} + +template +static bool +lessThanEqual22(Matrix22 &mat1, const Matrix22 &mat2) +{ + for(int i = 0; i < 2; ++i){ + for(int j = 0; j < 2; ++j){ + if(mat1[i][j] > mat2[i][j]){ + return false; + } + } + } + + return true; +} + +template +static bool +greaterThan22(Matrix22 &mat1, const Matrix22 &mat2) +{ + for(int i = 0; i < 2; ++i){ + for(int j = 0; j < 2; ++j){ + if(mat1[i][j] < mat2[i][j]){ + std::cout << mat1[i][j] << " " << mat2[i][j] << std::endl; + return false; + } + } + } + + return (mat1 != mat2); +} + +template +static bool +greaterThanEqual22(Matrix22 &mat1, const Matrix22 &mat2) +{ + for(int i = 0; i < 2; ++i){ + for(int j = 0; j < 2; ++j){ + if(mat1[i][j] < mat2[i][j]){ + return false; + } + } + } + + return true; +} + +BOOST_PYTHON_FUNCTION_OVERLOADS(invert22_overloads, invert22, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(inverse22_overloads, inverse22, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(outerProduct22_overloads, outerProduct22, 3, 3); + +template +static Matrix22 * Matrix2_tuple_constructor(const tuple &t0, const tuple &t1) +{ + if(t0.attr("__len__")() == 2 && t1.attr("__len__")() == 2) + { + return new Matrix22(extract(t0[0]), extract(t0[1]), + extract(t1[0]), extract(t1[1])); + } + else + throw std::invalid_argument ("Matrix22 takes 2 tuples of length 2"); +} + +template +static Matrix22 *Matrix2_matrix_constructor(const Matrix22 &mat) +{ + Matrix22 *m = new Matrix22; + + for(int i = 0; i < 2; ++i) + for(int j = 0; j < 2; ++j) + m->x[i][j] = T (mat.x[i][j]); + + return m; +} + +template +class_ > +register_Matrix22() +{ + typedef PyImath::StaticFixedArray,T,2,IndexAccessMatrixRow,T,2> > Matrix22_helper; + + MatrixRow::register_class(); + class_ > matrix22_class(Matrix22Name::value, Matrix22Name::value,init >("copy construction")); + matrix22_class + .def(init<>("initialize to identity")) + .def(init("initialize all entries to a single value")) + .def(init("make from components")) + .def("__init__", make_constructor(Matrix2_tuple_constructor)) + .def("__init__", make_constructor(Matrix2_matrix_constructor)) + .def("__init__", make_constructor(Matrix2_matrix_constructor)) + + //.def_readwrite("x00", &Matrix22::x[0][0]) + //.def_readwrite("x01", &Matrix22::x[0][1]) + //.def_readwrite("x02", &Matrix22::x[0][2]) + //.def_readwrite("x10", &Matrix22::x[1][0]) + //.def_readwrite("x11", &Matrix22::x[1][1]) + //.def_readwrite("x12", &Matrix22::x[1][2]) + //.def_readwrite("x20", &Matrix22::x[2][0]) + //.def_readwrite("x21", &Matrix22::x[2][1]) + //.def_readwrite("x22", &Matrix22::x[2][2]) + .def("baseTypeEpsilon", &Matrix22::baseTypeEpsilon,"baseTypeEpsilon() epsilon value of the base type of the vector") + .staticmethod("baseTypeEpsilon") + .def("baseTypeMax", &Matrix22::baseTypeMax,"baseTypeMax() max value of the base type of the vector") + .staticmethod("baseTypeMax") + .def("baseTypeLowest", &Matrix22::baseTypeLowest,"baseTypeLowest() largest negative value of the base type of the vector") + .staticmethod("baseTypeLowest") + .def("baseTypeSmallest", &Matrix22::baseTypeSmallest,"baseTypeSmallest() smallest value of the base type of the vector") + .staticmethod("baseTypeSmallest") + .def("equalWithAbsError", &Matrix22::equalWithAbsError,"m1.equalWithAbsError(m2,e) true if the elements " + "of v1 and v2 are the same with an absolute error of no more than e, " + "i.e., abs(m1[i] - m2[i]) <= e") + .def("equalWithRelError", &Matrix22::equalWithRelError,"m1.equalWithAbsError(m2,e) true if the elements " + "of m1 and m2 are the same with an absolute error of no more than e, " + "i.e., abs(m1[i] - m2[i]) <= e * abs(m1[i])") + // need a different version for matrix data access + .def("__len__", Matrix22_helper::len) + .def("__getitem__", Matrix22_helper::getitem) + //.def("__setitem__", Matrix22_helper::setitem) + .def("makeIdentity",&Matrix22::makeIdentity,"makeIdentity() make this matrix the identity matrix") + .def("transpose",&Matrix22::transpose,return_internal_reference<>(),"transpose() transpose this matrix") + .def("transposed",&Matrix22::transposed,"transposed() return a transposed copy of this matrix") + .def("invert",&invert22,invert22_overloads("invert() invert this matrix")[return_internal_reference<>()]) + .def("inverse",&inverse22,inverse22_overloads("inverse() return an inverted copy of this matrix")) + .def("determinant",&Matrix22::determinant,"determinant() return the determinant of this matrix") + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__iadd__", &iadd22,return_internal_reference<>()) + .def("__iadd__", &iadd22,return_internal_reference<>()) + .def("__iadd__", &iadd22T,return_internal_reference<>()) + .def("__add__", &add22) + .def("__isub__", &isub22,return_internal_reference<>()) + .def("__isub__", &isub22,return_internal_reference<>()) + .def("__isub__", &isub22T,return_internal_reference<>()) + .def("__sub__", &sub22) + .def("negate",&negate22,return_internal_reference<>(),"negate() negate all entries in this matrix") + .def("__neg__", &neg22) + .def("__imul__", &imul22T,return_internal_reference<>()) + .def("__mul__", &mul22T) + .def("__rmul__", &rmul22T) + .def("__idiv__", &idiv22T,return_internal_reference<>()) + .def("__itruediv__", &idiv22T,return_internal_reference<>()) + .def("__div__", &div22T) + .def("__truediv__", &div22T) + .def("__add__", &add22T) + .def("__radd__", &add22T) + .def("__sub__", &subtractTL22) + .def("__rsub__", &subtractTR22) + .def("__mul__", &mul22) + .def("__mul__", &mul22) + .def("__rmul__", &rmul22) + .def("__rmul__", &rmul22) + .def("__imul__", &imul22,return_internal_reference<>()) + .def("__imul__", &imul22,return_internal_reference<>()) + .def("__lt__", &lessThan22) + .def("__le__", &lessThanEqual22) + .def("__gt__", &greaterThan22) + .def("__ge__", &greaterThanEqual22) + //.def(self_ns::str(self)) + .def("__str__",&Matrix22_str) + .def("__repr__",&Matrix22_repr) + + .def("extractEuler", &extractEuler, + "M.extractEuler(r) -- extracts the " + "rotation component of M into r. " + "Assumes that M contains no shear or " + "non-uniform scaling; results are " + "meaningless if it does.") + + .def("multDirMatrix", &multDirMatrix22, "mult matrix") + .def("multDirMatrix", &multDirMatrix22_return_value, "mult matrix") + .def("multDirMatrix", &multDirMatrix22_array, "mult matrix") + .def("multDirMatrix", &multDirMatrix22, "mult matrix") + .def("multDirMatrix", &multDirMatrix22_return_value, "mult matrix") + .def("multDirMatrix", &multDirMatrix22_array, "mult matrix") + + .def("rotate", &rotate22, return_internal_reference<>(),"rotate matrix") + + .def("scale", &scaleSc22, return_internal_reference<>(),"scale matrix") + .def("scale", &scaleV22, return_internal_reference<>(),"scale matrix") + .def("scale", &scale22Tuple, return_internal_reference<>(),"scale matrix") + + .def("setRotation", &setRotation22, return_internal_reference<>(),"setRotation()") + .def("setScale", &setScaleSc22, return_internal_reference<>(),"setScale()") + .def("setScale", &setScaleV22, return_internal_reference<>(),"setScale()") + .def("setScale", &setScale22Tuple, return_internal_reference<>(),"setScale()") + + .def("setValue", &setValue22, "setValue()") + ; + + decoratecopy(matrix22_class); + + return matrix22_class; +/* + const Matrix22 & operator = (const Matrix22 &v); + const Matrix22 & operator = (T a); + T * getValue (); + const T * getValue () const; + template void getValue (Matrix22 &v) const; + template Matrix22 & setValue (const Matrix22 &v); + template Matrix22 & setTheMatrix (const Matrix22 &v); + template void multVecMatrix(const Vec2 &src, Vec2 &dst) const; + template void multDirMatrix(const Vec2 &src, Vec2 &dst) const; + template const Matrix22 & setRotation (S r); + template const Matrix22 & rotate (S r); + const Matrix22 & setScale (T s); + template const Matrix22 & setScale (const Vec2 &s); + template const Matrix22 & scale (const Vec2 &s); + template const Matrix22 & setTranslation (const Vec2 &t); + Vec2 translation () const; + template const Matrix22 & translate (const Vec2 &t); + template const Matrix22 & setShear (const S &h); + template const Matrix22 & setShear (const Vec2 &h); + template const Matrix22 & shear (const S &xy); + template const Matrix22 & shear (const Vec2 &h); +*/ +} + +template +static void +setM22ArrayItem(FixedArray > &ma, + Py_ssize_t index, + const IMATH_NAMESPACE::Matrix22 &m) +{ + ma[ma.canonical_index(index)] = m; +} + +template +static FixedArray > +inverse22_array(FixedArray >&ma, bool singExc = true) +{ + MATH_EXC_ON; + size_t len = ma.len(); + FixedArray > dst(len); + for (size_t i=0; i +static FixedArray > & +invert22_array(FixedArray >&ma, bool singExc = true) +{ + MATH_EXC_ON; + size_t len = ma.len(); + for (size_t i=0; i +class_ > > +register_M22Array() +{ + class_ > > matrixArray_class = FixedArray >::register_("Fixed length array of IMATH_NAMESPACE::Matrix22"); + matrixArray_class + .def("__setitem__", &setM22ArrayItem) + .def("inverse",&inverse22_array,inverse22_array_overloads("inverse() return an inverted copy of this matrix")) + .def("invert",&invert22_array,invert22_array_overloads("invert() invert these matricies")[return_internal_reference<>()]) + ; + + add_comparison_functions(matrixArray_class); + + return matrixArray_class; +} + +template PYIMATH_EXPORT class_ > register_Matrix22(); +template PYIMATH_EXPORT class_ > register_Matrix22(); + +template PYIMATH_EXPORT class_ > > register_M22Array(); +template PYIMATH_EXPORT class_ > > register_M22Array(); + + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Matrix22 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Matrix22(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Matrix22 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Matrix22(); } +} diff --git a/Sources/MetaPy/PyImath/PyImathMatrix33.cpp b/Sources/MetaPy/PyImath/PyImathMatrix33.cpp new file mode 100644 index 00000000..52850835 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathMatrix33.cpp @@ -0,0 +1,1248 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#define BOOST_PYTHON_MAX_ARITY 17 + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include +#include +#include "PyImathMatrix.h" +#include "PyImathExport.h" +#include "PyImathDecorators.h" +#include "PyImath.h" +#include "PyImathVec.h" +#include "PyImathMathExc.h" + +namespace PyImath { + +template<> const char *PyImath::M33fArray::name() { return "M33fArray"; } +template<> const char *PyImath::M33dArray::name() { return "M33dArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template +struct MatrixRow { + explicit MatrixRow(T *data) : _data(data) {} + T & operator [] (int i) { return _data[i]; } + T *_data; + + static const char *name; + static void register_class() + { + typedef PyImath::StaticFixedArray MatrixRow_helper; + class_ matrixRow_class(name,no_init); + matrixRow_class + .def("__len__", MatrixRow_helper::len) + .def("__getitem__", MatrixRow_helper::getitem,return_value_policy()) + .def("__setitem__", MatrixRow_helper::setitem) + ; + } +}; + +template <> const char *MatrixRow::name = "M33fRow"; +template <> const char *MatrixRow::name = "M33dRow"; + + +template +struct IndexAccessMatrixRow { + typedef MatrixRow result_type; + static MatrixRow apply(Container &c, int i) { return MatrixRow(c[i]); } +}; + +template struct Matrix33Name { static const char *value; }; +template<> const char *Matrix33Name::value = "M33f"; +template<> const char *Matrix33Name::value = "M33d"; + +template +static std::string Matrix33_str(const Matrix33 &v) +{ + std::stringstream stream; + stream << Matrix33Name::value << "("; + for (int row = 0; row < 3; row++) + { + stream << "("; + for (int col = 0; col < 3; col++) + { + stream << v[row][col]; + stream << (col != 2 ? ", " : ""); + } + stream << ")" << (row != 2 ? ", " : ""); + } + stream << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Matrix33_repr(const Matrix33 &v) +{ + return Matrix33_str(v); +} + +// Specialization for float to full precision +template <> +std::string Matrix33_repr(const Matrix33 &v) +{ + return (boost::format("%s((%.9g, %.9g, %.9g), (%.9g, %.9g, %.9g), (%.9g, %.9g, %.9g))") + % Matrix33Name::value + % v[0][0] % v[0][1] % v[0][2] + % v[1][0] % v[1][1] % v[1][2] + % v[2][0] % v[2][1] % v[2][2]).str(); +} + +// Specialization for double to full precision +template <> +std::string Matrix33_repr(const Matrix33 &v) +{ + return (boost::format("%s((%.17g, %.17g, %.17g), (%.17g, %.17g, %.17g), (%.17g, %.17g, %.17g))") + % Matrix33Name::value + % v[0][0] % v[0][1] % v[0][2] + % v[1][0] % v[1][1] % v[1][2] + % v[2][0] % v[2][1] % v[2][2]).str(); +} + +template +static const Matrix33 & +invert33 (Matrix33 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.invert(singExc); +} + +template +static Matrix33 +inverse33 (Matrix33 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.inverse(singExc); +} + +template +static const Matrix33 & +gjInvert33 (Matrix33 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.gjInvert(singExc); +} + +template +static Matrix33 +gjInverse33 (Matrix33 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.gjInverse(singExc); +} + +template +static const Matrix33 & +iadd33(Matrix33 &m, const Matrix33 &m2) +{ + MATH_EXC_ON; + Matrix33 m3; + m3.setValue (m2); + return m += m3; +} + +template +static const Matrix33 & +iadd33T(Matrix33 &mat, T a) +{ + MATH_EXC_ON; + return mat += a; +} + +template +static Matrix33 +add33(Matrix33 &m, const Matrix33 &m2) +{ + MATH_EXC_ON; + return m + m2; +} + +template +static const Matrix33 & +isub33(Matrix33 &m, const Matrix33 &m2) +{ + MATH_EXC_ON; + Matrix33 m3; + m3.setValue (m2); + return m -= m3; +} + +template +static const Matrix33 & +isub33T(Matrix33 &mat, T a) +{ + MATH_EXC_ON; + return mat -= a; +} + +template +static Matrix33 +sub33(Matrix33 &m, const Matrix33 &m2) +{ + MATH_EXC_ON; + return m - m2; +} + +template +static const Matrix33 & +negate33 (Matrix33 &m) +{ + MATH_EXC_ON; + return m.negate(); +} + +template +static Matrix33 +neg33 (Matrix33 &m) +{ + MATH_EXC_ON; + return -m; +} + +template +static const Matrix33 & +imul33T(Matrix33 &m, const T &t) +{ + MATH_EXC_ON; + return m *= t; +} + +template +static Matrix33 +mul33T(Matrix33 &m, const T &t) +{ + MATH_EXC_ON; + return m * t; +} + +template +static Matrix33 +rmul33T(Matrix33 &m, const T &t) +{ + MATH_EXC_ON; + return t * m; +} + +template +static const Matrix33 & +idiv33T(Matrix33 &m, const T &t) +{ + MATH_EXC_ON; + return m /= t; +} + +template +static Matrix33 +div33T(Matrix33 &m, const T &t) +{ + MATH_EXC_ON; + return m / t; +} + +template +static void +extractAndRemoveScalingAndShear33(Matrix33 &mat, IMATH_NAMESPACE::Vec2 &dstScl, IMATH_NAMESPACE::Vec2 &dstShr, int exc = 1) +{ + MATH_EXC_ON; + T dstShrTmp; + IMATH_NAMESPACE::extractAndRemoveScalingAndShear(mat, dstScl, dstShrTmp, exc); + + dstShr.setValue(dstShrTmp, T (0)); +} + +template +static void +extractEuler(Matrix33 &mat, Vec2 &dstObj) +{ + MATH_EXC_ON; + T dst; + IMATH_NAMESPACE::extractEuler(mat, dst); + dstObj.setValue(dst, T (0)); +} + +template +static int +extractSHRT33(Matrix33 &mat, Vec2 &s, Vec2 &h, Vec2 &r, Vec2 &t, int exc = 1) +{ + MATH_EXC_ON; + T hTmp, rTmp; + + int b = IMATH_NAMESPACE::extractSHRT(mat, s, hTmp, rTmp, t, exc); + + h.setValue(hTmp, T (0)); + r.setValue(rTmp, T (0)); + + return b; +} + +template +static void +extractScaling33(Matrix33 &mat, Vec2 &dst, int exc = 1) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::extractScaling(mat, dst, exc); +} + +template +void +outerProduct33(Matrix33 &mat, const Vec3 &a, const Vec3 &b) +{ + MATH_EXC_ON; + mat = IMATH_NAMESPACE::outerProduct(a,b); +} + +template +static void +extractScalingAndShear33(Matrix33 &mat, Vec2 &dstScl, Vec2 &dstShr, int exc = 1) +{ + MATH_EXC_ON; + T dstShrTmp; + IMATH_NAMESPACE::extractScalingAndShear(mat, dstScl, dstShrTmp, exc); + + dstShr.setValue(dstShrTmp, T (0)); +} + +template +static void +multDirMatrix33(Matrix33 &mat, const Vec2 &src, Vec2 &dst) +{ + MATH_EXC_ON; + mat.multDirMatrix(src, dst); +} + +template +static Vec2 +multDirMatrix33_return_value(Matrix33 &mat, const Vec2 &src) +{ + MATH_EXC_ON; + Vec2 dst; + mat.multDirMatrix(src, dst); + return dst; +} + +template +static FixedArray > +multDirMatrix33_array(Matrix33 &mat, const FixedArray >&src) +{ + MATH_EXC_ON; + size_t len = src.len(); + FixedArray > dst(len); + for (size_t i=0; i +static void +multVecMatrix33(Matrix33 &mat, const Vec2 &src, Vec2 &dst) +{ + MATH_EXC_ON; + mat.multVecMatrix(src, dst); +} + +template +static Vec2 +multVecMatrix33_return_value(Matrix33 &mat, const Vec2 &src) +{ + MATH_EXC_ON; + Vec2 dst; + mat.multVecMatrix(src, dst); + return dst; +} + +template +static FixedArray > +multVecMatrix33_array(Matrix33 &mat, const FixedArray >&src) +{ + MATH_EXC_ON; + size_t len = src.len(); + FixedArray > dst(len); + for (size_t i=0; i +static int +removeScaling33(Matrix33 &mat, int exc = 1) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::removeScaling(mat, exc); +} + + +template +static int +removeScalingAndShear33(Matrix33 &mat, int exc = 1) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::removeScalingAndShear(mat, exc); +} + +template +static const Matrix33 & +rotate33(Matrix33 &mat, const T &r) +{ + MATH_EXC_ON; + return mat.rotate(r); +} + + +template +static Matrix33 +sansScaling33(const Matrix33 &mat, bool exc = true) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::sansScaling(mat, exc); +} + +template +static Matrix33 +sansScalingAndShear33(const Matrix33 &mat, bool exc = true) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::sansScalingAndShear(mat, exc); +} + +template +static const Matrix33 & +scaleSc33(Matrix33 &mat, const T &s) +{ + MATH_EXC_ON; + Vec2 sVec(s, s); + return mat.scale(sVec); +} + +template +static const Matrix33 & +scaleV33(Matrix33 &mat, const Vec2 &s) +{ + MATH_EXC_ON; + return mat.scale(s); +} + +template +static const Matrix33 & +scale33Tuple(Matrix33 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + + return mat.scale(s); + } + else + throw std::domain_error ("m.scale needs tuple of length 2"); +} + +template +static const Matrix33 & +setRotation33(Matrix33 &mat, const T &r) +{ + MATH_EXC_ON; + return mat.setRotation(r); +} + +template +static const Matrix33 & +setScaleSc33(Matrix33 &mat, const T &s) +{ + MATH_EXC_ON; + return mat.setScale(s); +} + +template +static const Matrix33 & +setScaleV33(Matrix33 &mat, const Vec2 &s) +{ + MATH_EXC_ON; + return mat.setScale(s); +} + +template +static const Matrix33 & +setScale33Tuple(Matrix33 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + + return mat.setScale(s); + } + else + throw std::invalid_argument ("m.setScale needs tuple of length 2"); +} + +template +static const Matrix33 & +setShearSc33(Matrix33 &mat, const T &h) +{ + MATH_EXC_ON; + Vec2 hVec(h, T(0)); + return mat.setShear(hVec); +} + +template +static const Matrix33 & +setShearV33(Matrix33 &mat, const Vec2 &h) +{ + MATH_EXC_ON; + return mat.setShear(h); +} + +template +static const Matrix33 & +setShear33Tuple(Matrix33 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 h; + h.x = extract(t[0]); + h.y = extract(t[1]); + + return mat.setShear(h); + } + else + throw std::domain_error ("m.shear needs tuple of length 2"); +} + +template +static const Matrix33 & +setTranslation33(Matrix33 &mat, const Vec2 &t) +{ + MATH_EXC_ON; + return mat.setTranslation(t); +} + +template +static const Matrix33 & +setTranslation33Tuple(Matrix33 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 trans; + trans.x = extract(t[0]); + trans.y = extract(t[1]); + + return mat.setTranslation(trans); + } + else + throw std::domain_error ("m.translate needs tuple of length 2"); +} + +template +static const Matrix33 & +setTranslation33Obj(Matrix33 &mat, const object &o) +{ + MATH_EXC_ON; + Vec2 v; + if (PyImath::V2::convert (o.ptr(), &v)) + { + return mat.setTranslation(v); + } + else + { + throw std::invalid_argument ("m.setTranslation expected V2 argument"); + return mat; + } +} + +template +static void +setValue33(Matrix33 &mat, const Matrix33 &value) +{ + MATH_EXC_ON; + mat.setValue(value); +} + +template +static const Matrix33 & +shearSc33(Matrix33 &mat, const T &h) +{ + MATH_EXC_ON; + Vec2 hVec(h, T (0)); + + return mat.shear(hVec); +} + +template +static const Matrix33 & +shearV33(Matrix33 &mat, const Vec2 &h) +{ + MATH_EXC_ON; + return mat.shear(h); +} + +template +static const Matrix33 & +shear33Tuple(Matrix33 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 h; + h.x = extract(t[0]); + h.y = extract(t[1]); + + return mat.shear(h); + } + else + throw std::domain_error ("m.shear needs tuple of length 2"); +} + +template +static const Matrix33 & +translate33(Matrix33 &mat, const object &t) +{ + MATH_EXC_ON; + Vec2 v; + if (PyImath::V2::convert (t.ptr(), &v)) + { + return mat.translate(v); + } + else + { + throw std::invalid_argument ("m.translate expected V2 argument"); + return mat; + } +} + +template +static const Matrix33 & +translate33Tuple(Matrix33 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 2) + { + Vec2 trans; + trans.x = extract(t[0]); + trans.y = extract(t[1]); + + return mat.translate(trans); + } + else + throw std::domain_error ("m.translate needs tuple of length 2"); +} + +template +static Matrix33 +subtractTL33(Matrix33 &mat, T a) +{ + MATH_EXC_ON; + Matrix33 m(mat.x); + for(int i = 0; i < 3; ++i) + for(int j = 0; j < 3; ++j) + m.x[i][j] -= a; + + return m; +} + +template +static Matrix33 +subtractTR33(Matrix33 &mat, T a) +{ + MATH_EXC_ON; + Matrix33 m(mat.x); + for(int i = 0; i < 3; ++i) + for(int j = 0; j < 3; ++j) + m.x[i][j] = a - m.x[i][j]; + + return m; +} + + +template +static Matrix33 +add33T(Matrix33 &mat, T a) +{ + MATH_EXC_ON; + Matrix33 m(mat.x); + for(int i = 0; i < 3; ++i) + for(int j = 0; j < 3; ++j) + m.x[i][j] += a; + + return m; +} + +template +static Matrix33 +mul33(Matrix33 &mat1, Matrix33 &mat2) +{ + MATH_EXC_ON; + Matrix33 mat2T; + mat2T.setValue (mat2); + return mat1 * mat2T; +} + +template +static Matrix33 +rmul33(Matrix33 &mat2, Matrix33 &mat1) +{ + MATH_EXC_ON; + Matrix33 mat1T; + mat1T.setValue (mat1); + return mat1T * mat2; +} + +template +static const Matrix33 & +imul33(Matrix33 &mat1, Matrix33 &mat2) +{ + MATH_EXC_ON; + Matrix33 mat2T; + mat2T.setValue (mat2); + return mat1 *= mat2T; +} + +template +static bool +lessThan33(Matrix33 &mat1, const Matrix33 &mat2) +{ + for(int i = 0; i < 3; ++i){ + for(int j = 0; j < 3; ++j){ + if(mat1[i][j] > mat2[i][j]){ + return false; + } + } + } + + return (mat1 != mat2); +} + +template +static bool +lessThanEqual33(Matrix33 &mat1, const Matrix33 &mat2) +{ + for(int i = 0; i < 3; ++i){ + for(int j = 0; j < 3; ++j){ + if(mat1[i][j] > mat2[i][j]){ + return false; + } + } + } + + return true; +} + +template +static bool +greaterThan33(Matrix33 &mat1, const Matrix33 &mat2) +{ + for(int i = 0; i < 3; ++i){ + for(int j = 0; j < 3; ++j){ + if(mat1[i][j] < mat2[i][j]){ + std::cout << mat1[i][j] << " " << mat2[i][j] << std::endl; + return false; + } + } + } + + return (mat1 != mat2); +} + +template +static bool +greaterThanEqual33(Matrix33 &mat1, const Matrix33 &mat2) +{ + for(int i = 0; i < 3; ++i){ + for(int j = 0; j < 3; ++j){ + if(mat1[i][j] < mat2[i][j]){ + return false; + } + } + } + + return true; +} + +template +static tuple +singularValueDecomposition33(const Matrix33& m, bool forcePositiveDeterminant = false) +{ + IMATH_NAMESPACE::Matrix33 U, V; + IMATH_NAMESPACE::Vec3 S; + IMATH_NAMESPACE::jacobiSVD (m, U, S, V, std::numeric_limits::epsilon(), forcePositiveDeterminant); + return make_tuple (U, S, V); +} + +BOOST_PYTHON_FUNCTION_OVERLOADS(invert33_overloads, invert33, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(inverse33_overloads, inverse33, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(gjInvert33_overloads, gjInvert33, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(gjInverse33_overloads, gjInverse33, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(extractAndRemoveScalingAndShear33_overloads, extractAndRemoveScalingAndShear33, 3, 4) +BOOST_PYTHON_FUNCTION_OVERLOADS(extractSHRT33_overloads, extractSHRT33, 5, 6) +BOOST_PYTHON_FUNCTION_OVERLOADS(extractScaling33_overloads, extractScaling33, 2, 3) +BOOST_PYTHON_FUNCTION_OVERLOADS(extractScalingAndShear33_overloads, extractScalingAndShear33, 3, 4) +BOOST_PYTHON_FUNCTION_OVERLOADS(removeScaling33_overloads, removeScaling33, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(removeScalingAndShear33_overloads, removeScalingAndShear33, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(sansScaling33_overloads, sansScaling33, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(sansScalingAndShear33_overloads, sansScalingAndShear33, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(outerProduct33_overloads, outerProduct33, 3, 3); + +template +static Matrix33 * Matrix3_tuple_constructor(const tuple &t0, const tuple &t1, const tuple &t2) +{ + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3 && t2.attr("__len__")() == 3) + { + return new Matrix33(extract(t0[0]), extract(t0[1]), extract(t0[2]), + extract(t1[0]), extract(t1[1]), extract(t1[2]), + extract(t2[0]), extract(t2[1]), extract(t2[2])); + } + else + throw std::domain_error ("Matrix33 takes 3 tuples of length 3"); +} + +template +static Matrix33 *Matrix3_matrix_constructor(const Matrix33 &mat) +{ + Matrix33 *m = new Matrix33; + + for(int i = 0; i < 3; ++i) + for(int j = 0; j < 3; ++j) + m->x[i][j] = T (mat.x[i][j]); + + return m; +} + +template +class_ > +register_Matrix33() +{ + typedef PyImath::StaticFixedArray,T,3,IndexAccessMatrixRow,T,3> > Matrix33_helper; + + MatrixRow::register_class(); + class_ > matrix33_class(Matrix33Name::value, Matrix33Name::value,init >("copy construction")); + matrix33_class + .def(init<>("initialize to identity")) + .def(init("initialize all entries to a single value")) + .def(init("make from components")) + .def("__init__", make_constructor(Matrix3_tuple_constructor)) + .def("__init__", make_constructor(Matrix3_matrix_constructor)) + .def("__init__", make_constructor(Matrix3_matrix_constructor)) + + //.def_readwrite("x00", &Matrix33::x[0][0]) + //.def_readwrite("x01", &Matrix33::x[0][1]) + //.def_readwrite("x02", &Matrix33::x[0][2]) + //.def_readwrite("x10", &Matrix33::x[1][0]) + //.def_readwrite("x11", &Matrix33::x[1][1]) + //.def_readwrite("x12", &Matrix33::x[1][2]) + //.def_readwrite("x20", &Matrix33::x[2][0]) + //.def_readwrite("x21", &Matrix33::x[2][1]) + //.def_readwrite("x22", &Matrix33::x[2][2]) + .def("baseTypeEpsilon", &Matrix33::baseTypeEpsilon,"baseTypeEpsilon() epsilon value of the base type of the vector") + .staticmethod("baseTypeEpsilon") + .def("baseTypeMax", &Matrix33::baseTypeMax,"baseTypeMax() max value of the base type of the vector") + .staticmethod("baseTypeMax") + .def("baseTypeLowest", &Matrix33::baseTypeLowest,"baseTypeLowest() largest negative value of the base type of the vector") + .staticmethod("baseTypeLowest") + .def("baseTypeSmallest", &Matrix33::baseTypeSmallest,"baseTypeSmallest() smallest value of the base type of the vector") + .staticmethod("baseTypeSmallest") + .def("equalWithAbsError", &Matrix33::equalWithAbsError,"m1.equalWithAbsError(m2,e) true if the elements " + "of v1 and v2 are the same with an absolute error of no more than e, " + "i.e., abs(m1[i] - m2[i]) <= e") + .def("equalWithRelError", &Matrix33::equalWithRelError,"m1.equalWithAbsError(m2,e) true if the elements " + "of m1 and m2 are the same with an absolute error of no more than e, " + "i.e., abs(m1[i] - m2[i]) <= e * abs(m1[i])") + // need a different version for matrix data access + .def("__len__", Matrix33_helper::len) + .def("__getitem__", Matrix33_helper::getitem) + //.def("__setitem__", Matrix33_helper::setitem) + .def("makeIdentity",&Matrix33::makeIdentity,"makeIdentity() make this matrix the identity matrix") + .def("transpose",&Matrix33::transpose,return_internal_reference<>(),"transpose() transpose this matrix") + .def("transposed",&Matrix33::transposed,"transposed() return a transposed copy of this matrix") + .def("invert",&invert33,invert33_overloads("invert() invert this matrix")[return_internal_reference<>()]) + .def("inverse",&inverse33,inverse33_overloads("inverse() return an inverted copy of this matrix")) + .def("gjInvert",&gjInvert33,gjInvert33_overloads("gjInvert() invert this matrix")[return_internal_reference<>()]) + .def("gjInverse",&gjInverse33,gjInverse33_overloads("gjInverse() return an inverted copy of this matrix")) + .def("minorOf",&Matrix33::minorOf,"minorOf() return the matrix minor of the (row,col) element of this matrix") + .def("fastMinor",&Matrix33::fastMinor,"fastMinor() return the matrix minor using the specified rows and columns of this matrix") + .def("determinant",&Matrix33::determinant,"determinant() return the determinant of this matrix") + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__iadd__", &iadd33,return_internal_reference<>()) + .def("__iadd__", &iadd33,return_internal_reference<>()) + .def("__iadd__", &iadd33T,return_internal_reference<>()) + .def("__add__", &add33) + .def("__isub__", &isub33,return_internal_reference<>()) + .def("__isub__", &isub33,return_internal_reference<>()) + .def("__isub__", &isub33T,return_internal_reference<>()) + .def("__sub__", &sub33) + .def("negate",&negate33,return_internal_reference<>(),"negate() negate all entries in this matrix") + .def("__neg__", &neg33) + .def("__imul__", &imul33T,return_internal_reference<>()) + .def("__mul__", &mul33T) + .def("__rmul__", &rmul33T) + .def("__idiv__", &idiv33T,return_internal_reference<>()) + .def("__itruediv__", &idiv33T,return_internal_reference<>()) + .def("__div__", &div33T) + .def("__truediv__", &div33T) + .def("__add__", &add33T) + .def("__radd__", &add33T) + .def("__sub__", &subtractTL33) + .def("__rsub__", &subtractTR33) + .def("__mul__", &mul33) + .def("__mul__", &mul33) + .def("__rmul__", &rmul33) + .def("__rmul__", &rmul33) + .def("__imul__", &imul33,return_internal_reference<>()) + .def("__imul__", &imul33,return_internal_reference<>()) + .def("__lt__", &lessThan33) + .def("__le__", &lessThanEqual33) + .def("__gt__", &greaterThan33) + .def("__ge__", &greaterThanEqual33) + //.def(self_ns::str(self)) + .def("__str__",&Matrix33_str) + .def("__repr__",&Matrix33_repr) + .def("extractAndRemoveScalingAndShear", &extractAndRemoveScalingAndShear33, + extractAndRemoveScalingAndShear33_overloads( + "M.extractAndRemoveScalingAndShear(scl, shr, " + "[exc]) -- extracts the scaling component of " + "M into scl and the shearing component of M " + "into shr. Also removes the scaling and " + "shearing components from M. " + "Returns 1 unless the scaling component is " + "nearly 0, in which case 0 is returned. " + "If optional arg. exc == 1, then if the " + "scaling component is nearly 0, then MathExc " + "is thrown. ")) + + .def("extractEuler", &extractEuler, + "M.extractEulerZYX(r) -- extracts the " + "rotation component of M into r. " + "Assumes that M contains no shear or " + "non-uniform scaling; results are " + "meaningless if it does.") + + .def("extractSHRT", &extractSHRT33, extractSHRT33_overloads( + "M.extractSHRT(Vs, Vh, Vr, Vt, [exc]) -- " + "extracts the scaling component of M into Vs, " + "the shearing component of M in Vh (as XY, " + "XZ, YZ shear factors), the rotation of M " + "into Vr (as Euler angles in the order XYZ), " + "and the translaation of M into Vt. " + "If optional arg. exc == 1, then if the " + "scaling component is nearly 0, then MathExc " + "is thrown. ")) + + .def("extractScaling", &extractScaling33, extractScaling33_overloads("extract scaling")) + .def("outerProduct", &outerProduct33, outerProduct33_overloads( + "M.outerProduct(Va,Vb) -- " + "Performs the outer product, or tensor product, of two 3D vectors, Va and Vb")) + + .def("extractScalingAndShear", &extractScalingAndShear33, extractScalingAndShear33_overloads("extract scaling")) + .def("singularValueDecomposition", &singularValueDecomposition33, + "Decomposes the matrix using the singular value decomposition (SVD) into three\n" + "matrices U, S, and V which have the following properties: \n" + " 1. U and V are both orthonormal matrices, \n" + " 2. S is the diagonal matrix of singular values, \n" + " 3. U * S * V.transposed() gives back the original matrix.\n" + "The result is returned as a tuple [U, S, V]. Note that since S is diagonal we\n" + "don't need to return the entire matrix, so we return it as a three-vector. \n" + "\n" + "The 'forcePositiveDeterminant' argument can be used to force the U and V^T to\n" + "have positive determinant (that is, to be proper rotation matrices); if\n" + "forcePositiveDeterminant is False, then the singular values are guaranteed to\n" + "be nonnegative but the U and V matrices might contain negative scale along one\n" + "of the axes; if forcePositiveDeterminant is True, then U and V cannot contain\n" + "negative scale but S[2] might be negative. \n" + "\n" + "Our SVD implementation uses two-sided Jacobi rotations to iteratively\n" + "diagonalize the matrix, which should be quite robust and significantly faster\n" + "than the more general SVD solver in LAPACK. \n", + args("matrix", "forcePositiveDeterminant")) + .def("symmetricEigensolve", &PyImath::jacobiEigensolve >, + "Decomposes the matrix A using a symmetric eigensolver into matrices Q and S \n" + "which have the following properties: \n" + " 1. Q is the orthonormal matrix of eigenvectors, \n" + " 2. S is the diagonal matrix of eigenvalues, \n" + " 3. Q * S * Q.transposed() gives back the original matrix.\n" + "\n" + "IMPORTANT: It is vital that the passed-in matrix be symmetric, or the result \n" + "won't make any sense. This function will return an error if passed an \n" + "unsymmetric matrix.\n" + "\n" + "The result is returned as a tuple [Q, S]. Note that since S is diagonal \n" + "we don't need to return the entire matrix, so we return it as a three-vector. \n" + "\n" + "Our eigensolver implementation uses one-sided Jacobi rotations to iteratively \n" + "diagonalize the matrix, which should be quite robust and significantly faster \n" + "than the more general symmetric eigenvalue solver in LAPACK. \n") + .def("multDirMatrix", &multDirMatrix33, "mult matrix") + .def("multDirMatrix", &multDirMatrix33_return_value, "mult matrix") + .def("multDirMatrix", &multDirMatrix33_array, "mult matrix") + .def("multDirMatrix", &multDirMatrix33, "mult matrix") + .def("multDirMatrix", &multDirMatrix33_return_value, "mult matrix") + .def("multDirMatrix", &multDirMatrix33_array, "mult matrix") + .def("multVecMatrix", &multVecMatrix33, "mult matrix") + .def("multVecMatrix", &multVecMatrix33_return_value, "mult matrix") + .def("multVecMatrix", &multVecMatrix33_array, "mult matrix") + .def("multVecMatrix", &multVecMatrix33, "mult matrix") + .def("multVecMatrix", &multVecMatrix33_return_value, "mult matrix") + .def("multVecMatrix", &multVecMatrix33_array, "mult matrix") + .def("removeScaling", &removeScaling33, removeScaling33_overloads("remove scaling")) + + .def("removeScalingAndShear", &removeScalingAndShear33, removeScalingAndShear33_overloads("remove scaling")) + .def("sansScaling", &sansScaling33, sansScaling33_overloads("sans scaling")) + .def("rotate", &rotate33, return_internal_reference<>(),"rotate matrix") + + .def("sansScalingAndShear", &sansScalingAndShear33, sansScalingAndShear33_overloads("sans scaling and shear")) + .def("scale", &scaleSc33, return_internal_reference<>(),"scale matrix") + .def("scale", &scaleV33, return_internal_reference<>(),"scale matrix") + .def("scale", &scale33Tuple, return_internal_reference<>(),"scale matrix") + + .def("setRotation", &setRotation33, return_internal_reference<>(),"setRotation()") + .def("setScale", &setScaleSc33, return_internal_reference<>(),"setScale()") + .def("setScale", &setScaleV33, return_internal_reference<>(),"setScale()") + .def("setScale", &setScale33Tuple, return_internal_reference<>(),"setScale()") + + .def("setShear", &setShearSc33, return_internal_reference<>(),"setShear()") + .def("setShear", &setShearV33, return_internal_reference<>(),"setShear()") + .def("setShear", &setShear33Tuple, return_internal_reference<>(),"setShear()") + + .def("setTranslation", &setTranslation33, return_internal_reference<>(),"setTranslation()") + .def("setTranslation", &setTranslation33Tuple, return_internal_reference<>(),"setTranslation()") + .def("setTranslation", &setTranslation33Obj, return_internal_reference<>(),"setTranslation()") + .def("setValue", &setValue33, "setValue()") + .def("shear", &shearSc33, return_internal_reference<>(),"shear()") + .def("shear", &shearV33, return_internal_reference<>(),"shear()") + .def("shear", &shear33Tuple, return_internal_reference<>(),"shear()") + .def("translate", &translate33, return_internal_reference<>(),"translate()") + .def("translate", &translate33Tuple, return_internal_reference<>(),"translate()") + .def("translation", &Matrix33::translation, "translation()") + ; + + decoratecopy(matrix33_class); + + return matrix33_class; +/* + const Matrix33 & operator = (const Matrix33 &v); + const Matrix33 & operator = (T a); + T * getValue (); + const T * getValue () const; + template void getValue (Matrix33 &v) const; + template Matrix33 & setValue (const Matrix33 &v); + template Matrix33 & setTheMatrix (const Matrix33 &v); + template void multVecMatrix(const Vec2 &src, Vec2 &dst) const; + template void multDirMatrix(const Vec2 &src, Vec2 &dst) const; + template const Matrix33 & setRotation (S r); + template const Matrix33 & rotate (S r); + const Matrix33 & setScale (T s); + template const Matrix33 & setScale (const Vec2 &s); + template const Matrix33 & scale (const Vec2 &s); + template const Matrix33 & setTranslation (const Vec2 &t); + Vec2 translation () const; + template const Matrix33 & translate (const Vec2 &t); + template const Matrix33 & setShear (const S &h); + template const Matrix33 & setShear (const Vec2 &h); + template const Matrix33 & shear (const S &xy); + template const Matrix33 & shear (const Vec2 &h); +*/ +} + +template +struct Matrix33Array_Constructor : public Task +{ + const FixedArray &a; const FixedArray &b; const FixedArray &c; + const FixedArray &d; const FixedArray &e; const FixedArray &f; + const FixedArray &g; const FixedArray &h; const FixedArray &i; + FixedArray > &result; + + Matrix33Array_Constructor (const FixedArray &a, const FixedArray &b, const FixedArray &c, + const FixedArray &d, const FixedArray &e, const FixedArray &f, + const FixedArray &g, const FixedArray &h, const FixedArray &i, + FixedArray > &result) + : a (a), b (b), c (c), + d (d), e (e), f (f), + g (g), h (h), i (i), result (result) {} + + void execute (size_t start, size_t end) + { + for (size_t index = start; index < end; ++index) + { + result[index] = IMATH_NAMESPACE::Matrix33(a[index], b[index], c[index], + d[index], e[index], f[index], + g[index], h[index], i[index]); + } + } +}; + +template +static FixedArray > * +M33Array_constructor(const FixedArray &a, const FixedArray &b, const FixedArray &c, + const FixedArray &d, const FixedArray &e, const FixedArray &f, + const FixedArray &g, const FixedArray &h, const FixedArray &i) +{ + MATH_EXC_ON; + Py_ssize_t len = a.len(); + if (!( a.len() == len && b.len() == len && c.len() == len && + d.len() == len && e.len() == len && f.len() == len && + g.len() == len && h.len() == len && i.len() == len)) + throw std::invalid_argument("Dimensions do not match" ); + FixedArray >* result = + new FixedArray > (Py_ssize_t(len), UNINITIALIZED); + + Matrix33Array_Constructor task (a, b, c, d, e, f, g, h, i, *result); + dispatchTask (task, len); + return result; +} + +template +static void +setM33ArrayItem(FixedArray > &ma, + Py_ssize_t index, + const IMATH_NAMESPACE::Matrix33 &m) +{ + ma[ma.canonical_index(index)] = m; +} + + +template +struct M33Array_Inverse : public Task +{ + const FixedArray > &mats; + FixedArray > &result; + + M33Array_Inverse (FixedArray > &result, + const FixedArray > &mats) + : mats (mats), result (result) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + result[i] = mats[i].inverse(); + } +}; + +template +static FixedArray > +M33Array_inverse(const FixedArray > &ma) +{ + MATH_EXC_ON; + size_t len = ma.len(); + FixedArray > result (len); + + M33Array_Inverse task (result, ma); + dispatchTask (task, len); + + return result; +} + +template +struct M33Array_RmulVec3 : public Task +{ + const FixedArray > &a; + const Vec3 &v; + FixedArray > &r; + + M33Array_RmulVec3 (const FixedArray > &a, + const Vec3 &v, + FixedArray > &r) + : a (a), v (v), r (r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + r[i] = v * a[i]; + } + } +}; + +template +static FixedArray< Vec3 > +M33Array_rmulVec3 (const FixedArray< IMATH_NAMESPACE::Matrix33 > &a, const Vec3 &v) +{ + MATH_EXC_ON; + size_t len = a.len(); + FixedArray< Vec3 > r (Py_ssize_t(len), UNINITIALIZED); + + M33Array_RmulVec3 task (a, v, r); + dispatchTask (task, len); + return r; +} + +template +struct M33Array_RmulVec3Array : public Task +{ + const FixedArray > &a; + const FixedArray > &b; + FixedArray > &r; + + M33Array_RmulVec3Array (const FixedArray > &a, + const FixedArray > &b, + FixedArray > &r) + : a (a), b (b), r (r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + r[i] = b[i] * a[i]; + } + } +}; + +template +static FixedArray< Vec3 > +M33Array_rmulVec3Array (const FixedArray< IMATH_NAMESPACE::Matrix33 > &a, + const FixedArray< Vec3 > &b) +{ + MATH_EXC_ON; + size_t len = a.match_dimension(b); + FixedArray< Vec3 > r (Py_ssize_t(len), UNINITIALIZED); + + M33Array_RmulVec3Array task (a, b, r); + dispatchTask (task, len); + return r; +} + +template +class_ > > +register_M33Array() +{ + class_ > > matrixArray_class = FixedArray >::register_("Fixed length array of IMATH_NAMESPACE::Matrix33"); + matrixArray_class + .def("__init__", make_constructor(M33Array_constructor)) + .def("__setitem__", &setM33ArrayItem) + .def("inverse", &M33Array_inverse, + "Return M^-1 for each element M.", + (args("vector"))) + .def("__rmul__", &M33Array_rmulVec3) + .def("__rmul__", &M33Array_rmulVec3Array) + ; + + add_comparison_functions(matrixArray_class); + + return matrixArray_class; +} + +template PYIMATH_EXPORT class_ > register_Matrix33(); +template PYIMATH_EXPORT class_ > register_Matrix33(); + +template PYIMATH_EXPORT class_ > > register_M33Array(); +template PYIMATH_EXPORT class_ > > register_M33Array(); + + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Matrix33 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Matrix33(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Matrix33 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Matrix33(); } +} diff --git a/Sources/MetaPy/PyImath/PyImathMatrix44.cpp b/Sources/MetaPy/PyImath/PyImathMatrix44.cpp new file mode 100644 index 00000000..21284b4c --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathMatrix44.cpp @@ -0,0 +1,1525 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#define BOOST_PYTHON_MAX_ARITY 17 + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include +#include +#include "PyImath.h" +#include "PyImathVec.h" +#include "PyImathMathExc.h" +#include "PyImathMatrix.h" +#include "PyImathExport.h" +#include "PyImathDecorators.h" +#include "PyImathTask.h" + +namespace PyImath { +template<> const char PYIMATH_EXPORT *PyImath::M44fArray::name() { return "M44fArray"; } +template<> const char PYIMATH_EXPORT *PyImath::M44dArray::name() { return "M44dArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; +using namespace PyImath; + +template +struct MatrixRow { + explicit MatrixRow(T *data) : _data(data) {} + T & operator [] (int i) { return _data[i]; } + T *_data; + + static const char *name; + static void register_class() + { + typedef PyImath::StaticFixedArray MatrixRow_helper; + class_ matrixRow_class(name,no_init); + matrixRow_class + .def("__len__", MatrixRow_helper::len) + .def("__getitem__", MatrixRow_helper::getitem,return_value_policy()) + .def("__setitem__", MatrixRow_helper::setitem) + ; + } +}; + +template <> const char *MatrixRow::name = "M44fRow"; +template <> const char *MatrixRow::name = "M44dRow"; + + +template +struct IndexAccessMatrixRow { + typedef MatrixRow result_type; + static MatrixRow apply(Container &c, int i) { return MatrixRow(c[i]); } +}; + +template struct Matrix44Name { static const char *value; }; +template<> const char *Matrix44Name::value = "M44f"; +template<> const char *Matrix44Name::value = "M44d"; + +template +static std::string Matrix44_str(const Matrix44 &v) +{ + std::stringstream stream; + stream << Matrix44Name::value << "("; + for (int row = 0; row < 4; row++) + { + stream << "("; + for (int col = 0; col < 4; col++) + { + stream << v[row][col]; + stream << (col != 3 ? ", " : ""); + } + stream << ")" << (row != 3 ? ", " : ""); + } + stream << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Matrix44_repr(const Matrix44 &v) +{ + return Matrix44_str(v); +} + +// Specialization for float to full precision +template <> +std::string Matrix44_repr(const Matrix44 &v) +{ + return (boost::format("%s((%.9g, %.9g, %.9g, %.9g), (%.9g, %.9g, %.9g, %.9g), (%.9g, %.9g, %.9g, %.9g), (%.9g, %.9g, %.9g, %.9g))") + % Matrix44Name::value + % v[0][0] % v[0][1] % v[0][2] % v[0][3] + % v[1][0] % v[1][1] % v[1][2] % v[1][3] + % v[2][0] % v[2][1] % v[2][2] % v[2][3] + % v[3][0] % v[3][1] % v[3][2] % v[3][3]).str(); +} + +// Specialization for double to full precision +template <> +std::string Matrix44_repr(const Matrix44 &v) +{ + return (boost::format("%s((%.17g, %.17g, %.17g, %.17g), (%.17g, %.17g, %.17g, %.17g), (%.17g, %.17g, %.17g, %.17g), (%.17g, %.17g, %.17g, %.17g))") + % Matrix44Name::value + % v[0][0] % v[0][1] % v[0][2] % v[0][3] + % v[1][0] % v[1][1] % v[1][2] % v[1][3] + % v[2][0] % v[2][1] % v[2][2] % v[2][3] + % v[3][0] % v[3][1] % v[3][2] % v[3][3]).str(); +} + +template +static const Matrix44 & +invert44 (Matrix44 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.invert(singExc); +} + +template +static Matrix44 +inverse44 (Matrix44 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.inverse(singExc); +} + +template +static const Matrix44 & +gjInvert44 (Matrix44 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.gjInvert(singExc); +} + +template +static Matrix44 +gjInverse44 (Matrix44 &m, bool singExc = true) +{ + MATH_EXC_ON; + return m.gjInverse(singExc); +} + +template +static const Matrix44 & +iadd44(Matrix44 &m, const Matrix44 &m2) +{ + MATH_EXC_ON; + Matrix44 m3; + m3.setValue (m2); + return m += m3; +} + +template +static const Matrix44 & +iadd44T(Matrix44 &mat, T a) +{ + MATH_EXC_ON; + return mat += a; +} + +template +static Matrix44 +add44(Matrix44 &m, const Matrix44 &m2) +{ + MATH_EXC_ON; + return m + m2; +} + +template +static const Matrix44 & +isub44(Matrix44 &m, const Matrix44 &m2) +{ + MATH_EXC_ON; + Matrix44 m3; + m3.setValue (m2); + return m -= m3; +} + +template +static const Matrix44 & +isub44T(Matrix44 &mat, T a) +{ + MATH_EXC_ON; + return mat -= a; +} + +template +static Matrix44 +sub44(Matrix44 &m, const Matrix44 &m2) +{ + MATH_EXC_ON; + return m - m2; +} + +template +static const Matrix44 & +negate44 (Matrix44 &m) +{ + MATH_EXC_ON; + return m.negate(); +} + +template +static Matrix44 +neg44 (Matrix44 &m) +{ + MATH_EXC_ON; + return -m; +} + +template +static const Matrix44 & +imul44T(Matrix44 &m, const T &t) +{ + MATH_EXC_ON; + return m *= t; +} + +template +static Matrix44 +mul44T(Matrix44 &m, const T &t) +{ + MATH_EXC_ON; + return m * t; +} + +template +static Matrix44 +rmul44T(Matrix44 &m, const T &t) +{ + MATH_EXC_ON; + return t * m; +} + +template +static const Matrix44 & +idiv44T(Matrix44 &m, const T &t) +{ + MATH_EXC_ON; + return m /= t; +} + +template +static Matrix44 +div44T(Matrix44 &m, const T &t) +{ + MATH_EXC_ON; + return m / t; +} + +template +static void +extractAndRemoveScalingAndShear44(Matrix44 &mat, Vec3 &dstScl, Vec3 &dstShr, int exc = 1) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::extractAndRemoveScalingAndShear(mat, dstScl, dstShr, exc); +} + +template +static void +extractEulerXYZ(Matrix44 &mat, IMATH_NAMESPACE::Vec3 &dst) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::extractEulerXYZ(mat, dst); +} + +template +static void +extractEulerZYX(Matrix44 &mat, IMATH_NAMESPACE::Vec3 &dst) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::extractEulerZYX(mat, dst); +} + +template +static int +extractSHRT44(Matrix44 &mat, Vec3 &s, Vec3 &h, Vec3 &r, Vec3 &t, int exc = 1) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::extractSHRT(mat, s, h, r, t, exc); +} + +template +static void +extractScaling44(Matrix44 &mat, Vec3 &dst, int exc = 1) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::extractScaling(mat, dst, exc); +} + +template +static void +extractScalingAndShear44(Matrix44 &mat, Vec3 &dstScl, Vec3 &dstShr, int exc = 1) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::extractScalingAndShear(mat, dstScl, dstShr, exc); +} + +template +static void +multDirMatrix44(Matrix44 &mat, const Vec3 &src, Vec3 &dst) +{ + MATH_EXC_ON; + mat.multDirMatrix(src, dst); +} + +template +static Vec3 +multDirMatrix44_return_value(Matrix44 &mat, const Vec3 &src) +{ + MATH_EXC_ON; + Vec3 dst; + mat.multDirMatrix(src, dst); + return dst; +} + +template +struct op_multDirMatrix { + + static inline void apply(const Matrix44& m, const Vec3& src, Vec3& dst) + { + m.multDirMatrix(src,dst); + } +}; + +template +struct op_multVecMatrix { + + static inline void apply(const Matrix44& m, const Vec3& src, Vec3& dst) + { + m.multVecMatrix(src,dst); + } +}; + +template +struct MatrixVecTask : public Task +{ + const Matrix44 &mat; + const FixedArray >& src; + FixedArray >& dst; + + MatrixVecTask(const Matrix44 &m, const FixedArray >& s, FixedArray >& d) + : mat(m), src(s), dst(d) {} + + void execute(size_t start, size_t end) + { + for(size_t p = start; p < end; ++p) + Op::apply(mat,src[p],dst[p]); + } +}; + +template +static FixedArray > +multDirMatrix44_array(Matrix44 &mat, const FixedArray >&src) +{ + MATH_EXC_ON; + size_t len = src.len(); + FixedArray > dst(len); + + MatrixVecTask > task(mat,src,dst); + dispatchTask(task,len); + + return dst; +} + +template +static void +multVecMatrix44(Matrix44 &mat, const Vec3 &src, Vec3 &dst) +{ + MATH_EXC_ON; + mat.multVecMatrix(src, dst); +} + +template +static Vec3 +multVecMatrix44_return_value(Matrix44 &mat, const Vec3 &src) +{ + MATH_EXC_ON; + Vec3 dst; + mat.multVecMatrix(src, dst); + return dst; +} + + +template +static FixedArray > +multVecMatrix44_array(Matrix44 &mat, const FixedArray >&src) +{ + MATH_EXC_ON; + size_t len = src.len(); + FixedArray > dst(len); + + MatrixVecTask > task(mat,src,dst); + dispatchTask(task,len); + + return dst; +} + +template +static int +removeScaling44(Matrix44 &mat, int exc = 1) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::removeScaling(mat, exc); +} + +template +static int +removeScalingAndShear44(Matrix44 &mat, int exc = 1) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::removeScalingAndShear(mat, exc); +} + +template +static Matrix44 +sansScaling44(const Matrix44 &mat, bool exc = true) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::sansScaling(mat, exc); +} + +template +static Matrix44 +sansScalingAndShear44(const Matrix44 &mat, bool exc = true) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::sansScalingAndShear(mat, exc); +} + + +template +static const Matrix44 & +scaleSc44(Matrix44 &mat, const T &s) +{ + MATH_EXC_ON; + Vec3 sVec(s, s, s); + return mat.scale(sVec); +} + +template +static const Matrix44 & +scaleV44(Matrix44 &mat, const Vec3 &s) +{ + MATH_EXC_ON; + return mat.scale(s); +} + +template +static const Matrix44 & +scale44Tuple(Matrix44 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + s.z = extract(t[2]); + + return mat.scale(s); + } + else + throw std::domain_error ("m.scale needs tuple of length 3"); +} + +template +static const Matrix44 & +rotateV44(Matrix44 &mat, const Vec3 &r) +{ + MATH_EXC_ON; + return mat.rotate(r); +} + +template +static const Matrix44 & +rotationMatrix44(Matrix44 &mat, const object &fromObj, const object &toObj) +{ + MATH_EXC_ON; + Vec3 from, to; + if (PyImath::V3::convert (fromObj.ptr(), &from) && + PyImath::V3::convert (toObj.ptr(), &to)) + { + Matrix44 rot = IMATH_NAMESPACE::rotationMatrix(from, to); + return mat.setValue(rot); + } + else + { + throw std::invalid_argument ("m.rotationMatrix expected V3 arguments"); + } +} + +template +static const Matrix44 & +rotationMatrixWithUp44(Matrix44 &mat, const object &fromObj, const object &toObj, + const object &upObj) +{ + MATH_EXC_ON; + Vec3 from, to, up; + if (PyImath::V3::convert (fromObj.ptr(), &from) && + PyImath::V3::convert (toObj.ptr(), &to) & + PyImath::V3::convert (upObj.ptr(), &up)) + { + Matrix44 rot = IMATH_NAMESPACE::rotationMatrixWithUpDir(from, to, up); + return mat.setValue(rot); + } + else + { + throw std::invalid_argument ("m.rotationMatrix expected V3 arguments"); + } +} + +template +static const Matrix44 & +setScaleSc44(Matrix44 &mat, const T &s) +{ + MATH_EXC_ON; + return mat.setScale(s); +} + +template +static const Matrix44 & +setScaleV44(Matrix44 &mat, const Vec3 &s) +{ + MATH_EXC_ON; + return mat.setScale(s); +} + +template +static const Matrix44 & +setScale44Tuple(Matrix44 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + s.z = extract(t[2]); + + return mat.setScale(s); + } + else + throw std::domain_error ("m.translate needs tuple of length 3"); +} + + +template +static const Matrix44 & +setShearV44(Matrix44 &mat, const Vec3 &sVec) +{ + MATH_EXC_ON; + return mat.setShear(sVec); +} + +template +static const Matrix44 & +setShearS44(Matrix44 &mat, const Shear6 &s) +{ + MATH_EXC_ON; + return mat.setShear(s); +} + +template +static const Matrix44 & +setShear44Tuple(Matrix44 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + s.z = extract(t[2]); + + return mat.setShear(s); + } + else if(t.attr("__len__")() == 6) + { + Shear6 shear; + for(int i = 0; i < 6; ++i) + { + shear[i] = extract(t[i]); + } + + return mat.setShear(shear); + } + else + throw std::domain_error ("m.setShear needs tuple of length 3 or 6"); +} + +template +static const Matrix44 & +setTranslation44(Matrix44 &mat, const Vec3 t) +{ + MATH_EXC_ON; + return mat.setTranslation(t); +} + +template +static const Matrix44 & +setTranslation44Tuple(Matrix44 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 trans; + trans.x = extract(t[0]); + trans.y = extract(t[1]); + trans.z = extract(t[2]); + + return mat.setTranslation(trans); + } + else + throw std::domain_error ("m.translate needs tuple of length 3"); +} + +template +static const Matrix44 & +setTranslation44Obj(Matrix44 &mat, const object &o) +{ + MATH_EXC_ON; + Vec3 v; + if (PyImath::V3::convert (o.ptr(), &v)) + { + return mat.setTranslation(v); + } + else + { + throw std::invalid_argument ("m.setTranslation expected V3 argument"); + return mat; + } +} + +template +static void +setValue44(Matrix44 &mat, const Matrix44 &value) +{ + MATH_EXC_ON; + mat.setValue(value); +} + +template +static void +setEulerAngles44(Matrix44 &mat, const Vec3 &value) +{ + MATH_EXC_ON; + mat.setEulerAngles(value); +} + +template +static void +setAxisAngle44(Matrix44 &mat, const Vec3 &axis, T angle) +{ + MATH_EXC_ON; + mat.setAxisAngle(axis, angle); +} + +template +static const Matrix44 & +shearV44(Matrix44 &mat, const Vec3 &s) +{ + MATH_EXC_ON; + IMATH_NAMESPACE::Shear6 shear(s[0], s[1], s[2], T (0), T (0), T (0)); + return mat.shear(shear); +} + +template +static const Matrix44 & +shearS44(Matrix44 &mat, const Shear6 &s) +{ + MATH_EXC_ON; + return mat.shear(s); +} + +template +static const Matrix44 & +shear44Tuple(Matrix44 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 s; + s.x = extract(t[0]); + s.y = extract(t[1]); + s.z = extract(t[2]); + Shear6 shear(s); + + return mat.shear(shear); + } + else if(t.attr("__len__")() == 6) + { + Shear6 shear; + for(int i = 0; i < 6; ++i) + { + shear[i] = extract(t[i]); + } + + return mat.shear(shear); + } + else + throw std::domain_error ("m.shear needs tuple of length 3 or 6"); +} + + +template +static const Matrix44 & +translate44(Matrix44 &mat, const object &t) +{ + MATH_EXC_ON; + Vec3 v; + if (PyImath::V3::convert (t.ptr(), &v)) + { + return mat.translate(v); + } + else + { + throw std::invalid_argument ("m.translate expected V3 argument"); + return mat; + } +} +template +static const Matrix44 & +translate44Tuple(Matrix44 &mat, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 trans; + trans.x = extract(t[0]); + trans.y = extract(t[1]); + trans.z = extract(t[2]); + + return mat.translate(trans); + } + else + throw std::domain_error ("m.translate needs tuple of length 3"); +} + +template +static Matrix44 +subtractTL44(Matrix44 &mat, T a) +{ + MATH_EXC_ON; + Matrix44 m(mat.x); + for(int i = 0; i < 4; ++i) + for(int j = 0; j < 4; ++j) + m.x[i][j] -= a; + + return m; +} + +template +static Matrix44 +subtractTR44(Matrix44 &mat, T a) +{ + MATH_EXC_ON; + Matrix44 m(mat.x); + for(int i = 0; i < 4; ++i) + for(int j = 0; j < 4; ++j) + m.x[i][j] = a - m.x[i][j]; + + return m; +} + + +template +static Matrix44 +add44T(Matrix44 &mat, T a) +{ + MATH_EXC_ON; + Matrix44 m(mat.x); + for(int i = 0; i < 4; ++i) + for(int j = 0; j < 4; ++j) + m.x[i][j] += a; + + return m; +} + +template +static Matrix44 +mul44(Matrix44 &mat1, Matrix44 &mat2) +{ + MATH_EXC_ON; + Matrix44 mat2T; + mat2T.setValue (mat2); + return mat1 * mat2T; +} + +template +static Matrix44 +rmul44(Matrix44 &mat2, Matrix44 &mat1) +{ + MATH_EXC_ON; + Matrix44 mat1T; + mat1T.setValue (mat1); + return mat1T * mat2; +} + +template +static const Matrix44 & +imul44(Matrix44 &mat1, Matrix44 &mat2) +{ + MATH_EXC_ON; + Matrix44 mat2T; + mat2T.setValue (mat2); + return mat1 *= mat2T; +} + +template +static bool +lessThan44(Matrix44 &mat1, const Matrix44 &mat2) +{ + for(int i = 0; i < 4; ++i){ + for(int j = 0; j < 4; ++j){ + if(mat1[i][j] > mat2[i][j]){ + return false; + } + } + } + + return (mat1 != mat2); +} + +template +static bool +lessThanEqual44(Matrix44 &mat1, const Matrix44 &mat2) +{ + for(int i = 0; i < 4; ++i){ + for(int j = 0; j < 4; ++j){ + if(mat1[i][j] > mat2[i][j]){ + return false; + } + } + } + + return true; +} + +template +static bool +greaterThan44(Matrix44 &mat1, const Matrix44 &mat2) +{ + for(int i = 0; i < 4; ++i){ + for(int j = 0; j < 4; ++j){ + if(mat1[i][j] < mat2[i][j]){ + return false; + } + } + } + + return (mat1 != mat2); +} + +template +static bool +greaterThanEqual44(Matrix44 &mat1, const Matrix44 &mat2) +{ + for(int i = 0; i < 4; ++i){ + for(int j = 0; j < 4; ++j){ + if(mat1[i][j] < mat2[i][j]){ + return false; + } + } + } + + return true; +} + +template +static tuple +singularValueDecomposition44(const Matrix44& m, bool forcePositiveDeterminant = false) +{ + IMATH_NAMESPACE::Matrix44 U, V; + IMATH_NAMESPACE::Vec4 S; + IMATH_NAMESPACE::jacobiSVD (m, U, S, V, std::numeric_limits::epsilon(), forcePositiveDeterminant); + return make_tuple (U, S, V); +} + +BOOST_PYTHON_FUNCTION_OVERLOADS(invert44_overloads, invert44, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(inverse44_overloads, inverse44, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(gjInvert44_overloads, gjInvert44, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(gjInverse44_overloads, gjInverse44, 1, 2); +BOOST_PYTHON_FUNCTION_OVERLOADS(extractAndRemoveScalingAndShear44_overloads, extractAndRemoveScalingAndShear44, 3, 4) +BOOST_PYTHON_FUNCTION_OVERLOADS(extractSHRT44_overloads, extractSHRT44, 5, 6) +BOOST_PYTHON_FUNCTION_OVERLOADS(extractScaling44_overloads, extractScaling44, 2, 3) +BOOST_PYTHON_FUNCTION_OVERLOADS(extractScalingAndShear44_overloads, extractScalingAndShear44, 3, 4) +BOOST_PYTHON_FUNCTION_OVERLOADS(removeScaling44_overloads, removeScaling44, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(removeScalingAndShear44_overloads, removeScalingAndShear44, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(sansScaling44_overloads, sansScaling44, 1, 2) +BOOST_PYTHON_FUNCTION_OVERLOADS(sansScalingAndShear44_overloads, sansScalingAndShear44, 1, 2) + +template +static Matrix44 * Matrix4_tuple_constructor(const tuple &t0, const tuple &t1, const tuple &t2, const tuple &t3) +{ + if(t0.attr("__len__")() == 4 && t1.attr("__len__")() == 4 && t2.attr("__len__")() == 4 && t3.attr("__len__")() == 4) + { + return new Matrix44(extract(t0[0]), extract(t0[1]), extract(t0[2]), extract(t0[3]), + extract(t1[0]), extract(t1[1]), extract(t1[2]), extract(t1[3]), + extract(t2[0]), extract(t2[1]), extract(t2[2]), extract(t2[3]), + extract(t3[0]), extract(t3[1]), extract(t3[2]), extract(t3[3])); + } + else + throw std::domain_error ("Matrix44 takes 4 tuples of length 4"); +} + +template +static Matrix44 *Matrix4_matrix_constructor(const Matrix44 &mat) +{ + Matrix44 *m = new Matrix44; + + for(int i = 0; i < 4; ++i) + for(int j = 0; j < 4; ++j) + m->x[i][j] = T (mat.x[i][j]); + + return m; +} + + +template +class_ > +register_Matrix44() +{ + typedef PyImath::StaticFixedArray,T,4,IndexAccessMatrixRow,T,4> > Matrix44_helper; + + MatrixRow::register_class(); + + class_ > matrix44_class(Matrix44Name::value, Matrix44Name::value,init >("copy construction")); + matrix44_class + .def(init<>("initialize to identity")) + .def(init("initialize all entries to a single value")) + .def("__init__", make_constructor(Matrix4_tuple_constructor),"tuple constructor1") + .def("__init__", make_constructor(Matrix4_matrix_constructor)) + .def("__init__", make_constructor(Matrix4_matrix_constructor)) + + .def(init("make from components")) + //.def_readwrite("x00", &Matrix44::x[0][0]) + //.def_readwrite("x01", &Matrix44::x[0][1]) + //.def_readwrite("x02", &Matrix44::x[0][2]) + //.def_readwrite("x03", &Matrix44::x[0][3]) + //.def_readwrite("x10", &Matrix44::x[1][0]) + //.def_readwrite("x11", &Matrix44::x[1][1]) + //.def_readwrite("x12", &Matrix44::x[1][2]) + //.def_readwrite("x13", &Matrix44::x[1][3]) + //.def_readwrite("x20", &Matrix44::x[2][0]) + //.def_readwrite("x21", &Matrix44::x[2][1]) + //.def_readwrite("x22", &Matrix44::x[2][2]) + //.def_readwrite("x23", &Matrix44::x[2][3]) + //.def_readwrite("x30", &Matrix44::x[3][0]) + //.def_readwrite("x31", &Matrix44::x[3][1]) + //.def_readwrite("x32", &Matrix44::x[3][2]) + //.def_readwrite("x33", &Matrix44::x[3][3]) + .def("baseTypeEpsilon", &Matrix44::baseTypeEpsilon,"baseTypeEpsilon() epsilon value of the base type of the vector") + .staticmethod("baseTypeEpsilon") + .def("baseTypeMax", &Matrix44::baseTypeMax,"baseTypeMax() max value of the base type of the vector") + .staticmethod("baseTypeMax") + .def("baseTypeLowest", &Matrix44::baseTypeLowest,"baseTypeLowest() largest negative value of the base type of the vector") + .staticmethod("baseTypeLowest") + .def("baseTypeSmallest", &Matrix44::baseTypeSmallest,"baseTypeSmallest() smallest value of the base type of the vector") + .staticmethod("baseTypeSmallest") + .def("equalWithAbsError", &Matrix44::equalWithAbsError, + "m1.equalWithAbsError(m2,e) true if the elements " + "of v1 and v2 are the same with an absolute error of no more than e, " + "i.e., abs(m1[i] - m2[i]) <= e") + + .def("equalWithRelError", &Matrix44::equalWithRelError, + "m1.equalWithAbsError(m2,e) true if the elements " + "of m1 and m2 are the same with an absolute error of no more than e, " + "i.e., abs(m1[i] - m2[i]) <= e * abs(m1[i])") + + // need a different version for matrix data access + .def("__len__", Matrix44_helper::len) + .def("__getitem__", Matrix44_helper::getitem) + //.def("__setitem__", Matrix44_helper::setitem) + .def("makeIdentity",&Matrix44::makeIdentity,"makeIdentity() make this matrix the identity matrix") + .def("transpose",&Matrix44::transpose,return_internal_reference<>(),"transpose() transpose this matrix") + .def("transposed",&Matrix44::transposed,"transposed() return a transposed copy of this matrix") + .def("minorOf",&Matrix44::minorOf,"minorOf() return matrix minor of the (row,col) element of this matrix") + .def("fastMinor",&Matrix44::fastMinor,"fastMinor() return matrix minor using the specified rows and columns of this matrix") + .def("determinant",&Matrix44::determinant,"determinant() return the determinant of this matrix") + .def("invert",&invert44,invert44_overloads("invert() invert this matrix")[return_internal_reference<>()]) + .def("inverse",&inverse44,inverse44_overloads("inverse() return an inverted copy of this matrix")) + .def("gjInvert",&gjInvert44,gjInvert44_overloads("gjInvert() invert this matrix")[return_internal_reference<>()]) + .def("gjInverse",&gjInverse44,gjInverse44_overloads("gjInverse() return an inverted copy of this matrix")) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__iadd__", &iadd44,return_internal_reference<>()) + .def("__iadd__", &iadd44,return_internal_reference<>()) + .def("__iadd__", &iadd44T,return_internal_reference<>()) + .def("__add__", &add44) + .def("__isub__", &isub44,return_internal_reference<>()) + .def("__isub__", &isub44,return_internal_reference<>()) + .def("__isub__", &isub44T,return_internal_reference<>()) + .def("__sub__", &sub44) + .def("negate",&negate44,return_internal_reference<>(),"negate() negate all entries in this matrix") + .def("__neg__", &neg44) + .def("__imul__", &imul44T,return_internal_reference<>()) + .def("__mul__", &mul44T) + .def("__rmul__", &rmul44T) + .def("__idiv__", &idiv44T,return_internal_reference<>()) + .def("__itruediv__", &idiv44T,return_internal_reference<>()) + .def("__div__", &div44T) + .def("__truediv__", &div44T) + .def("__add__", &add44T) + .def("__radd__", &add44T) + .def("__sub__", &subtractTL44) + .def("__rsub__", &subtractTR44) + .def("__mul__", &mul44) + .def("__mul__", &mul44) + .def("__rmul__", &rmul44) + .def("__rmul__", &rmul44) + .def("__imul__", &imul44,return_internal_reference<>()) + .def("__imul__", &imul44,return_internal_reference<>()) + .def("__lt__", &lessThan44) + .def("__gt__", &greaterThan44) + .def("__le__", &lessThanEqual44) + .def("__ge__", &greaterThanEqual44) + //.def(self_ns::str(self)) + .def("__repr__",&Matrix44_repr) + + .def("extractAndRemoveScalingAndShear", &extractAndRemoveScalingAndShear44, + extractAndRemoveScalingAndShear44_overloads( + "M.extractAndRemoveScalingAndShear(scl, shr, " + "[exc]) -- extracts the scaling component of " + "M into scl and the shearing component of M " + "into shr. Also removes the scaling and " + "shearing components from M. " + "Returns 1 unless the scaling component is " + "nearly 0, in which case 0 is returned. " + "If optional arg. exc == 1, then if the " + "scaling component is nearly 0, then MathExc " + "is thrown.")) + + .def("extractEulerXYZ", &extractEulerXYZ, "extract Euler") + .def("extractEulerZYX", &extractEulerZYX, "extract Euler") + + .def("extractSHRT", &extractSHRT44, extractSHRT44_overloads( + "M.extractSHRT(Vs, Vh, Vr, Vt, [exc]) -- " + "extracts the scaling component of M into Vs, " + "the shearing component of M in Vh (as XY, " + "XZ, YZ shear factors), the rotation of M " + "into Vr (as Euler angles in the order XYZ), " + "and the translaation of M into Vt. " + "If optional arg. exc == 1, then if the " + "scaling component is nearly 0, then MathExc " + "is thrown. ")) + + .def("extractScaling", &extractScaling44, extractScaling44_overloads("extract scaling")) + .def("extractScalingAndShear", &extractScalingAndShear44, extractScalingAndShear44_overloads("extract scaling")) + .def("singularValueDecomposition", &singularValueDecomposition44, + "Decomposes the matrix using the singular value decomposition (SVD) into three\n" + "matrices U, S, and V which have the following properties: \n" + " 1. U and V are both orthonormal matrices, \n" + " 2. S is the diagonal matrix of singular values, \n" + " 3. U * S * V.transposed() gives back the original matrix.\n" + "The result is returned as a tuple [U, S, V]. Note that since S is diagonal we\n" + "don't need to return the entire matrix, so we return it as a three-vector. \n" + "\n" + "The 'forcePositiveDeterminant' argument can be used to force the U and V^T to\n" + "have positive determinant (that is, to be proper rotation matrices); if\n" + "forcePositiveDeterminant is False, then the singular values are guaranteed to\n" + "be nonnegative but the U and V matrices might contain negative scale along one\n" + "of the axes; if forcePositiveDeterminant is True, then U and V cannot contain\n" + "negative scale but S[3] might be negative. \n" + "\n" + "Our SVD implementation uses two-sided Jacobi rotations to iteratively\n" + "diagonalize the matrix, which should be quite robust and significantly faster\n" + "than the more general SVD solver in LAPACK. \n", + args("matrix", "forcePositiveDeterminant")) + .def("symmetricEigensolve", &PyImath::jacobiEigensolve >, + "Decomposes the matrix A using a symmetric eigensolver into matrices Q and S \n" + "which have the following properties: \n" + " 1. Q is the orthonormal matrix of eigenvectors, \n" + " 2. S is the diagonal matrix of eigenvalues, \n" + " 3. Q.transposed() * S * Q gives back the original matrix.\n" + "\n" + "IMPORTANT: It is vital that the passed-in matrix be symmetric, or the result \n" + "won't make any sense. This function will return an error if passed an \n" + "unsymmetric matrix.\n" + "\n" + "The result is returned as a tuple [Q, S]. Note that since S is diagonal \n" + "we don't need to return the entire matrix, so we return it as a three-vector. \n" + "\n" + "Our eigensolver implementation uses one-sided Jacobi rotations to iteratively \n" + "diagonalize the matrix, which should be quite robust and significantly faster \n" + "than the more general symmetric eigenvalue solver in LAPACK. \n") + .def("multDirMatrix", &multDirMatrix44, "mult matrix") + .def("multDirMatrix", &multDirMatrix44_return_value, "mult matrix") + .def("multDirMatrix", &multDirMatrix44_array, "mult matrix") + .def("multDirMatrix", &multDirMatrix44, "mult matrix") + .def("multDirMatrix", &multDirMatrix44_return_value, "mult matrix") + .def("multDirMatrix", &multDirMatrix44_array, "mult matrix") + .def("multVecMatrix", &multVecMatrix44, "mult matrix") + .def("multVecMatrix", &multVecMatrix44_return_value, "mult matrix") + .def("multVecMatrix", &multVecMatrix44_array, "mult matrix") + .def("multVecMatrix", &multVecMatrix44, "mult matrix") + .def("multVecMatrix", &multVecMatrix44_return_value, "mult matrix") + .def("multVecMatrix", &multVecMatrix44_array, "mult matrix") + .def("removeScaling", &removeScaling44, removeScaling44_overloads("remove scaling")) + .def("removeScalingAndShear", &removeScalingAndShear44, removeScalingAndShear44_overloads("remove scaling")) + .def("sansScaling", &sansScaling44, sansScaling44_overloads("sans scaling")) + .def("sansScalingAndShear", &sansScalingAndShear44, sansScalingAndShear44_overloads("sans scaling and shear")) + .def("scale", &scaleSc44, return_internal_reference<>(), "scale matrix") + .def("scale", &scaleV44, return_internal_reference<>(), "scale matrix") + .def("scale", &scale44Tuple, return_internal_reference<>(), "scale matrix") + .def("rotate", &rotateV44, return_internal_reference<>(), "rotate matrix") + .def("rotationMatrix", &rotationMatrix44, return_internal_reference<>(), "rotationMatrix()") + .def("rotationMatrixWithUpDir", &rotationMatrixWithUp44, return_internal_reference<>(), "roationMatrixWithUp()") + .def("setScale", &setScaleSc44, return_internal_reference<>(),"setScale()") + .def("setScale", &setScaleV44, return_internal_reference<>(),"setScale()") + .def("setScale", &setScale44Tuple, return_internal_reference<>(),"setScale()") + + .def("setShear", &setShearV44, return_internal_reference<>(),"setShear()") + .def("setShear", &setShearS44, return_internal_reference<>(),"setShear()") + .def("setShear", &setShear44Tuple, return_internal_reference<>(),"setShear()") + .def("setTranslation", &setTranslation44, return_internal_reference<>(),"setTranslation()") + .def("setTranslation", &setTranslation44Tuple, return_internal_reference<>(),"setTranslation()") + .def("setTranslation", &setTranslation44Obj, return_internal_reference<>(),"setTranslation()") + .def("setValue", &setValue44, "setValue()") + .def("setEulerAngles", &setEulerAngles44, "setEulerAngles()") + .def("setAxisAngle", &setAxisAngle44, "setAxisAngle()") + .def("shear", &shearV44, return_internal_reference<>(),"shear()") + .def("shear", &shearS44, return_internal_reference<>(),"shear()") + .def("shear", &shear44Tuple, return_internal_reference<>(),"shear()") + .def("translate", &translate44, return_internal_reference<>(),"translate()") + .def("translate", &translate44Tuple, return_internal_reference<>(),"translate()") + .def("translation", &Matrix44::translation, "translation()") + + ; + + decoratecopy(matrix44_class); + + return matrix44_class; +/* + const Matrix44 & operator = (const Matrix44 &v); + const Matrix44 & operator = (T a); + T * getValue (); + const T * getValue () const; + template void getValue (Matrix44 &v) const; + template Matrix44 & setValue (const Matrix44 &v); + template Matrix44 & setTheMatrix (const Matrix44 &v); + template void multVecMatrix(const Vec2 &src, Vec2 &dst) const; + template void multDirMatrix(const Vec2 &src, Vec2 &dst) const; + template const Matrix44 & setRotation (S r); + template const Matrix44 & rotate (S r); + const Matrix44 & setScale (T s); + template const Matrix44 & setScale (const Vec2 &s); + template const Matrix44 & scale (const Vec2 &s); + template const Matrix44 & setTranslation (const Vec2 &t); + Vec2 translation () const; + template const Matrix44 & translate (const Vec2 &t); + template const Matrix44 & setShear (const S &h); + template const Matrix44 & setShear (const Vec2 &h); + template const Matrix44 & shear (const S &xy); + template const Matrix44 & shear (const Vec2 &h); +*/ +} + +template +struct Matrix44Array_Constructor : public Task +{ + const FixedArray &a; const FixedArray &b; const FixedArray &c; const FixedArray &d; + const FixedArray &e; const FixedArray &f; const FixedArray &g; const FixedArray &h; + const FixedArray &i; const FixedArray &j; const FixedArray &k; const FixedArray &l; + const FixedArray &m; const FixedArray &n; const FixedArray &o; const FixedArray &p; + FixedArray > &result; + + Matrix44Array_Constructor (const FixedArray &a, const FixedArray &b, const FixedArray &c, const FixedArray &d, + const FixedArray &e, const FixedArray &f, const FixedArray &g, const FixedArray &h, + const FixedArray &i, const FixedArray &j, const FixedArray &k, const FixedArray &l, + const FixedArray &m, const FixedArray &n, const FixedArray &o, const FixedArray &p, + FixedArray > &result) + : a (a), b (b), c (c), d (d), + e (e), f (f), g (g), h (h), + i (i), j (j), k (k), l (l), + m (m), n (n), o (o), p (p), result (result) {} + + void execute (size_t start, size_t end) + { + for (size_t index = start; index < end; ++index) + { + result[index] = IMATH_NAMESPACE::Matrix44(a[index], b[index], c[index], d[index], + e[index], f[index], g[index], h[index], + i[index], j[index], k[index], l[index], + m[index], n[index], o[index], p[index]); + } + } +}; + +template +static FixedArray > * +M44Array_constructor(const FixedArray &a, const FixedArray &b, const FixedArray &c, const FixedArray &d, + const FixedArray &e, const FixedArray &f, const FixedArray &g, const FixedArray &h, + const FixedArray &i, const FixedArray &j, const FixedArray &k, const FixedArray &l, + const FixedArray &m, const FixedArray &n, const FixedArray &o, const FixedArray &p) +{ + MATH_EXC_ON; + Py_ssize_t len = a.len(); + if (!( a.len() == len && b.len() == len && c.len() == len && d.len() == len && + e.len() == len && f.len() == len && g.len() == len && h.len() == len && + i.len() == len && j.len() == len && k.len() == len && l.len() == len && + m.len() == len && n.len() == len && o.len() == len && p.len() == len)) + throw std::invalid_argument ("Dimensions do not match" ); + + FixedArray >* result = + new FixedArray > (Py_ssize_t(len), UNINITIALIZED); + + Matrix44Array_Constructor task (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, *result); + dispatchTask (task, len); + return result; +} + +template +static void +setM44ArrayItem(FixedArray > &ma, + Py_ssize_t index, + const IMATH_NAMESPACE::Matrix44 &m) +{ + ma[ma.canonical_index(index)] = m; +} + +template +struct M44Array_Inverse : public Task +{ + const FixedArray > &mats; + FixedArray > &result; + + M44Array_Inverse (FixedArray > &result, + const FixedArray > &mats) + : mats (mats), result (result) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + result[i] = mats[i].inverse(); + } +}; + +template +static FixedArray > +M44Array_inverse(const FixedArray > &ma) +{ + MATH_EXC_ON; + size_t len = ma.len(); + FixedArray > result (len); + + M44Array_Inverse task (result, ma); + dispatchTask (task, len); + + return result; +} + +template +struct M44Array_RmulVec4 : public Task +{ + const FixedArray > &a; + const Vec4 &v; + FixedArray > &r; + + M44Array_RmulVec4 (const FixedArray > &a, + const Vec4 &v, + FixedArray > &r) + : a (a), v (v), r (r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + r[i] = v * a[i]; + } + } +}; + +template +static FixedArray< Vec4 > +M44Array_rmulVec4 (const FixedArray< IMATH_NAMESPACE::Matrix44 > &a, const Vec4 &v) +{ + MATH_EXC_ON; + size_t len = a.len(); + FixedArray< Vec4 > r (Py_ssize_t(len), UNINITIALIZED); + + M44Array_RmulVec4 task (a, v, r); + dispatchTask (task, len); + return r; +} + +template +struct M44Array_RmulVec4Array : public Task +{ + const FixedArray > &a; + const FixedArray > &b; + FixedArray > &r; + + M44Array_RmulVec4Array (const FixedArray > &a, + const FixedArray > &b, + FixedArray > &r) + : a (a), b (b), r (r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + r[i] = b[i] * a[i]; + } + } +}; + +template +static FixedArray< Vec4 > +M44Array_rmulVec4Array (const FixedArray< IMATH_NAMESPACE::Matrix44 > &a, + const FixedArray< Vec4 > &b) +{ + MATH_EXC_ON; + size_t len = a.match_dimension(b); + FixedArray< Vec4 > r (Py_ssize_t(len), UNINITIALIZED); + + M44Array_RmulVec4Array task (a, b, r); + dispatchTask (task, len); + return r; +} + +template +struct M44Array_RmulVec3ArrayT : public Task +{ + const FixedArray > &a; + const FixedArray > &b; + FixedArray > &r; + + M44Array_RmulVec3ArrayT (const FixedArray > &a, + const FixedArray > &b, + FixedArray > &r) + : a(a), b(b), r(r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + r[i] = b[i] * a[i]; + } +}; + +template +FixedArray > +M44Array_rmulVec3ArrayT (const FixedArray > &a, + const FixedArray > &b) +{ + MATH_EXC_ON; + size_t len = a.match_dimension(b); + FixedArray< IMATH_NAMESPACE::Vec3 > result (Py_ssize_t(len), UNINITIALIZED); + + M44Array_RmulVec3ArrayT task (a, b, result); + dispatchTask (task, len); + + return result; +} + +template +struct M44Array_Invert : public Task +{ + FixedArray > &m; + + M44Array_Invert (FixedArray > &m) + : m(m) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + m[i].invert(); + } +}; + +template +void +M44Array_invert (FixedArray > &m) +{ + MATH_EXC_ON; + size_t len = m.len(); + + M44Array_Invert task (m); + dispatchTask (task, len); +} + +template +struct M44Array_Transpose : public Task +{ + FixedArray > &m; + + M44Array_Transpose (FixedArray > &m) + : m(m) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + m[i].transpose(); + } +}; + +template +void +M44Array_transpose (FixedArray > &m) +{ + MATH_EXC_ON; + size_t len = m.len(); + + M44Array_Transpose task (m); + dispatchTask (task, len); +} + +template +struct M44Array_MultDirMatrix : public Task +{ + const FixedArray > &m; + const FixedArray > &v; + FixedArray > &r; + + M44Array_MultDirMatrix (const FixedArray > &m, + const FixedArray > &v, + FixedArray > &r) + : m(m), v(v), r(r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + m[i].multDirMatrix (v[i], r[i]); + } +}; + +template +FixedArray > +M44Array_multDirMatrix (const FixedArray > &m, + const FixedArray > &v) +{ + MATH_EXC_ON; + size_t len = m.match_dimension(v); + FixedArray > result (Py_ssize_t(len), UNINITIALIZED); + + M44Array_MultDirMatrix task (m, v, result); + dispatchTask (task, len); + + return result; +} + +template +struct M44Array_MultVecMatrix : public Task +{ + const FixedArray > &m; + const FixedArray > &v; + FixedArray > &r; + + M44Array_MultVecMatrix (const FixedArray > &m, + const FixedArray > &v, + FixedArray > &r) + : m(m), v(v), r(r) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + m[i].multVecMatrix (v[i], r[i]); + } +}; + +template +FixedArray > +M44Array_multVecMatrix (const FixedArray > &m, + const FixedArray > &v) +{ + MATH_EXC_ON; + size_t len = m.match_dimension(v); + FixedArray > result (Py_ssize_t(len), UNINITIALIZED); + + M44Array_MultVecMatrix task (m, v, result); + dispatchTask (task, len); + + return result; +} + +template +class_ > > +register_M44Array() +{ + + class_ > > matrixArray_class = FixedArray >::register_("Fixed length array of IMATH_NAMESPACE::Matrix44"); + matrixArray_class + .def("__init__", make_constructor(M44Array_constructor)) + .def("__setitem__", &setM44ArrayItem) + .def("inverse", &M44Array_inverse, + "Return M^-1 for each element M.", + (args("vector"))) + .def("invert", &M44Array_invert, + "Perform M^-1 in place for each element M.") + .def("transpose", &M44Array_transpose, + "Perform M^T in place for each element M.") + .def("multDirMatrix", &M44Array_multDirMatrix, + "Multiply an array of vectors element by element with the matrix array.", + (args("vector"))) + .def("multVecMatrix", &M44Array_multVecMatrix, + "Multiply an array of normals element by element with the matrix array.", + (args("vector"))) + .def("__rmul__", &M44Array_rmulVec4) + .def("__rmul__", &M44Array_rmulVec4Array) + .def("__rmul__", &M44Array_rmulVec3ArrayT) + ; + + add_comparison_functions(matrixArray_class); + + return matrixArray_class; +} + + +template PYIMATH_EXPORT class_ > register_Matrix44(); +template PYIMATH_EXPORT class_ > register_Matrix44(); + +template PYIMATH_EXPORT class_ > > register_M44Array(); +template PYIMATH_EXPORT class_ > > register_M44Array(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Matrix44 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Matrix44(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Matrix44 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Matrix44(); } +} diff --git a/Sources/MetaPy/PyImath/PyImathPlane.cpp b/Sources/MetaPy/PyImath/PyImathPlane.cpp new file mode 100644 index 00000000..d9da45cc --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathPlane.cpp @@ -0,0 +1,580 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" +#include "PyImathVec.h" +#include "PyImathMathExc.h" +#include "PyImathPlane.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" + +namespace PyImath{ +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct PlaneName {static const char *value;}; +template <> const char *PlaneName::value = "Plane3f"; +template <> const char *PlaneName::value = "Plane3d"; + +template +static Plane3 *Plane3_construct_default() +{ + Vec3 normal(T (1), T (0), T (0)); + return new Plane3(normal, T (0)); +} + +template +static Plane3 *Plane3_plane_construct(const object &planeObj) +{ + MATH_EXC_ON; + extract < Plane3 > ef (planeObj); + extract < Plane3 > ed (planeObj); + + Plane3 *p = 0; + + if (ef.check()) + { + Plane3 efp = ef(); + p = new Plane3; + p->normal = efp.normal; + p->distance = efp.distance; + } + + else if (ed.check()) + { + Plane3 edp = ed(); + p = new Plane3; + p->normal = edp.normal; + p->distance = edp.distance; + } + + else + { + throw std::invalid_argument ("invalid parameter passed to Plane constructor"); + } + + return p; +} + +template +static Plane3 *Plane3_tuple_constructor1(const tuple &t, T distance) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 normal; + normal.x = extract(t[0]); + normal.y = extract(t[1]); + normal.z = extract(t[2]); + + return new Plane3(normal, distance); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static Plane3 *Plane3_tuple_constructor2(const tuple &t0, const tuple &t1) +{ + MATH_EXC_ON; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3) + { + Vec3 point, normal; + point.x = extract(t0[0]); + point.y = extract(t0[1]); + point.z = extract(t0[2]); + + normal.x = extract(t1[0]); + normal.y = extract(t1[1]); + normal.z = extract(t1[2]); + + return new Plane3(point, normal); + } + else + throw std::domain_error ("Plane3 expects tuples of length 3"); +} + +template +static Plane3 *Plane3_tuple_constructor3(const tuple &t0, const tuple &t1, const tuple &t2) +{ + MATH_EXC_ON; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3 && t2.attr("__len__")() == 3) + { + Vec3 point0, point1, point2; + point0.x = extract(t0[0]); + point0.y = extract(t0[1]); + point0.z = extract(t0[2]); + + point1.x = extract(t1[0]); + point1.y = extract(t1[1]); + point1.z = extract(t1[2]); + + point2.x = extract(t2[0]); + point2.y = extract(t2[1]); + point2.z = extract(t2[2]); + + return new Plane3(point0, point1, point2); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static Plane3 +mul (const Plane3 &plane, const Matrix44 &M) +{ + MATH_EXC_ON; + return plane * M; +} + +template +static void +set1 (Plane3 &plane, const Vec3 &v, T t) +{ + MATH_EXC_ON; + plane.set (v, t); +} + +template +static void +set2 (Plane3 &plane, const Vec3 &v1, const Vec3 &v2) +{ + MATH_EXC_ON; + plane.set (v1, v2); +} + +template +static void +set3 (Plane3 &plane, const Vec3 &v1, const Vec3 &v2, const Vec3 &v3) +{ + MATH_EXC_ON; + plane.set (v1, v2, v3); +} + +template +static std::string Plane3_str(const Plane3 &plane) +{ + std::stringstream stream; + + typename return_by_value::apply >::type converter; + handle<> normH (converter (plane.normal)); + handle<> normRepr (PYUTIL_OBJECT_REPR (normH.get())); + std::string normalReprStr = extract (normRepr.get()); + + stream << PlaneName::value << "(" << normalReprStr << ", " + << plane.distance << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Plane3_repr(const Plane3 &plane) +{ + return Plane3_str(plane); +} + +// Specialization for float to full precision +template <> +std::string Plane3_repr(const Plane3 &plane) +{ + typename return_by_value::apply >::type converter; + + handle<> normH (converter (plane.normal)); + handle<> normRepr (PYUTIL_OBJECT_REPR (normH.get())); + std::string normalReprStr = extract (normRepr.get()); + + return (boost::format("%s(%s, %.9g)") + % PlaneName::value + % normalReprStr.c_str() + % plane.distance).str(); +} + +// Specialization for double to full precision +template <> +std::string Plane3_repr(const Plane3 &plane) +{ + typename return_by_value::apply >::type converter; + + handle<> normH (converter (plane.normal)); + handle<> normRepr (PYUTIL_OBJECT_REPR (normH.get())); + std::string normalReprStr = extract (normRepr.get()); + + return (boost::format("%s(%s, %.17g)") + % PlaneName::value + % normalReprStr.c_str() + % plane.distance).str(); +} + + +template +static T +distance(Plane3 &plane) +{ + return plane.distance; +} + +template +static Vec3 +normal(Plane3 &plane) +{ + return plane.normal; +} + +template +static void +setNormal(Plane3 &plane, const Vec3 &normal) +{ + MATH_EXC_ON; + plane.normal = normal.normalized(); +} + +template +static void +setDistance(Plane3 &plane, const T &distance) +{ + plane.distance = distance; +} + +template +static object intersectT(const Plane3 &plane, const Line3 &line) +{ + MATH_EXC_ON; + T param; + Line3 l; + l.pos = line.pos; + l.dir = line.dir; + + if(plane.intersectT(l, param)) + return object(param); + + return object(); +} + +template +static bool +intersect2(const Plane3 &plane, const Line3 &line, Vec3 &intersection) +{ + MATH_EXC_ON; + return plane.intersect(line, intersection); +} + +template +static object +intersect1(const Plane3 &plane, const Line3 &line) +{ + MATH_EXC_ON; + Vec3 intersection; + Line3 l; + l.pos = line.pos; + l.dir = line.dir; + if(plane.intersect(l, intersection)) + return object(intersection); + + return object(); + +} + +template +static void +setTuple1(Plane3 &plane, const tuple &t, T distance) +{ + MATH_EXC_ON; + if(t.attr("__len__")() == 3) + { + Vec3 normal; + normal.x = extract(t[0]); + normal.y = extract(t[1]); + normal.z = extract(t[2]); + + plane.set(normal, distance); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static void +setTuple2(Plane3 &plane, const tuple &t0, const tuple &t1) +{ + MATH_EXC_ON; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3) + { + Vec3 point, normal; + point.x = extract(t0[0]); + point.y = extract(t0[1]); + point.z = extract(t0[2]); + + normal.x = extract(t1[0]); + normal.y = extract(t1[1]); + normal.z = extract(t1[2]); + + plane.set(point, normal); + } + else + throw std::domain_error ("Plane3 expects tuples of length 3"); +} + +template +static void +setTuple3(Plane3 &plane, const tuple &t0, const tuple &t1, const tuple &t2) +{ + MATH_EXC_ON; + if(t0.attr("__len__")() == 3 && t1.attr("__len__")() == 3 && t2.attr("__len__")() == 3) + { + Vec3 point0, point1, point2; + point0.x = extract(t0[0]); + point0.y = extract(t0[1]); + point0.z = extract(t0[2]); + + point1.x = extract(t1[0]); + point1.y = extract(t1[1]); + point1.z = extract(t1[2]); + + point2.x = extract(t2[0]); + point2.y = extract(t2[1]); + point2.z = extract(t2[2]); + + plane.set(point0, point1, point2); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static Vec3 +reflectPoint(Plane3 &plane, const Vec3 &p) +{ + MATH_EXC_ON; + return plane.reflectPoint(p); +} + +template +static Vec3 +reflectPointTuple(Plane3 &plane, const tuple &t) +{ + MATH_EXC_ON; + Vec3 point; + if(t.attr("__len__")() == 3) + { + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + + return plane.reflectPoint(point); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static T +distanceTo(Plane3 &plane, const Vec3 &v) +{ + MATH_EXC_ON; + return plane.distanceTo(v); +} + +template +static T +distanceToTuple(Plane3 &plane, const tuple &t) +{ + MATH_EXC_ON; + Vec3 point; + if(t.attr("__len__")() == 3) + { + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + + return plane.distanceTo(point); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static Vec3 +reflectVector(Plane3 &plane, const Vec3 &v) +{ + MATH_EXC_ON; + return plane.reflectVector(v); +} + +template +static Vec3 +reflectVectorTuple(Plane3 &plane, const tuple &t) +{ + MATH_EXC_ON; + Vec3 point; + if(t.attr("__len__")() == 3) + { + point.x = extract(t[0]); + point.y = extract(t[1]); + point.z = extract(t[2]); + + return plane.reflectVector(point); + } + else + throw std::domain_error ("Plane3 expects tuple of length 3"); +} + +template +static bool +equal(const Plane3 &p1, const Plane3 &p2) +{ + if(p1.normal == p2.normal && p1.distance == p2.distance) + return true; + else + return false; +} + +template +static bool +notequal(const Plane3 &p1, const Plane3 &p2) +{ + if(p1.normal != p2.normal || p1.distance != p2.distance) + return true; + else + return false; +} + +template +static Plane3 +negate(const Plane3 &plane) +{ + MATH_EXC_ON; + Plane3 p; + p.set(-plane.normal, -plane.distance); + + return p; +} + + + +template +class_ > +register_Plane() +{ + const char *name = PlaneName::value; + + class_< Plane3 > plane_class(name); + plane_class + .def("__init__",make_constructor(Plane3_construct_default),"initialize normal to (1,0,0), distance to 0") + .def("__init__",make_constructor(Plane3_tuple_constructor1)) + .def("__init__",make_constructor(Plane3_tuple_constructor2)) + .def("__init__",make_constructor(Plane3_tuple_constructor3)) + .def("__init__",make_constructor(Plane3_plane_construct)) + .def(init &, T>("Plane3(normal, distance) construction")) + .def(init &, const Vec3 &>("Plane3(point, normal) construction")) + .def(init &, const Vec3 &, const Vec3 &>("Plane3(point1, point2, point3) construction")) + .def("__eq__", &equal) + .def("__ne__", ¬equal) + .def("__mul__", &mul) + .def("__neg__", &negate) + .def("__str__", &Plane3_str) + .def("__repr__", &Plane3_repr) + + .def_readwrite("normal", &Plane3::normal) + .def_readwrite("distance", &Plane3::distance) + + .def("normal", &normal, "normal()", + "pl.normal() -- returns the normal of plane pl") + + .def("distance", &distance, "distance()", + "pl.distance() -- returns the signed distance\n" + "of plane pl from the coordinate origin") + + .def("setNormal", &setNormal, "setNormal()", + "pl.setNormal(n) -- sets the normal of plane\n" + "pl to n.normalized()") + + .def("setDistance", &setDistance, "setDistance()", + "pl.setDistance(d) -- sets the signed distance\n" + "of plane pl from the coordinate origin to d") + + .def("set", &set1, "set()", + "pl.set(n,d) -- sets the normal and the signed\n" + " distance of plane pl to n and d\n" + "\n" + "pl.set(p,n) -- sets the normal of plane pl to\n" + " n.normalized() and adjusts the distance of\n" + " pl from the coordinate origin so that pl\n" + " passes through point p\n" + "\n" + "pl.set(p1,p2,p3) -- sets the normal of plane pl\n" + " to (p2-p1)%(p3-p1)).normalized(), and adjusts\n" + " the distance of pl from the coordinate origin\n" + " so that pl passes through points p1, p2 and p3") + + .def("set", &set2, "set()") + .def("set", &set3, "set()") + + .def("set", &setTuple1, "set()") + .def("set", &setTuple2, "set()") + .def("set", &setTuple3, "set()") + + .def("intersect", &intersect2, + "pl.intersect(ln, pt) -- returns true if the line intersects\n" + "the plane, false if it doesn't. The point where plane\n" + "pl and line ln intersect is stored in pt") + + .def("intersect", &intersect1, + "pl.intersect(ln) -- returns the point where plane\n" + "pl and line ln intersect, or None if pl and ln do\n" + "not intersect") + .def("intersect", &intersect1, + "pl.intersect(ln) -- returns the point where plane\n" + "pl and line ln intersect, or None if pl and ln do\n" + "not intersect") + + .def("intersectT", &intersectT, + "pl.intersectT(ln) -- computes the intersection,\n" + "i, of plane pl and line ln, and returns t, so that\n" + "ln.pos() + t * ln.dir() == i.\n" + "If pl and ln do not intersect, pl.intersectT(ln)\n" + "returns None.\n") + + .def("intersectT", &intersectT) + + .def("distanceTo", &distanceTo, "distanceTo()", + "pl.distanceTo(p) -- returns the signed distance\n" + "between plane pl and point p (positive if p is\n" + "on the side of pl where the pl's normal points)\n") + + .def("distanceTo", &distanceToTuple) + + .def("reflectPoint", &reflectPoint, "reflectPoint()", + "pl.reflectPoint(p) -- returns the image,\n" + "q, of point p after reflection on plane pl:\n" + "the distance between p and q is twice the\n" + "distance between p and pl, and the line from\n" + "p to q is parallel to pl's normal.") + + .def("reflectPoint", &reflectPointTuple) + + .def("reflectVector", &reflectVector, "reflectVector()", + "pl.reflectVector(v) -- returns the direction\n" + "of a ray with direction v after reflection on\n" + "plane pl") + .def("reflectVector", &reflectVectorTuple) + + ; + + decoratecopy(plane_class); + + return plane_class; +} + +template PYIMATH_EXPORT class_ > register_Plane(); +template PYIMATH_EXPORT class_ > register_Plane(); + +} //namespace PyIMath diff --git a/Sources/MetaPy/PyImath/PyImathQuat.cpp b/Sources/MetaPy/PyImath/PyImathQuat.cpp new file mode 100644 index 00000000..b0b2937e --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathQuat.cpp @@ -0,0 +1,1132 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include "PyImathQuat.h" +#include "PyImathExport.h" +#include "PyImathDecorators.h" +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathOperators.h" +#include "PyImathQuatOperators.h" + +// XXX incomplete array wrapping, docstrings missing + +namespace PyImath { +template <> const char *PyImath::QuatfArray::name() { return "QuatfArray"; } +template <> const char *PyImath::QuatdArray::name() { return "QuatdArray"; } +} + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct QuatName { static const char *value; }; +template<> const char *QuatName::value = "Quatf"; +template<> const char *QuatName::value = "Quatd"; + +template +static std::string Quat_str(const Quat &v) +{ + std::stringstream stream; + stream << QuatName::value << "(" << v[0] << ", " << v[1] << ", " + << v[2] << ", " << v[3] << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Quat_repr(const Quat &v) +{ + return Quat_str(v); +} + +// Specialization for float to full precision +template <> +std::string Quat_repr(const Quat &v) +{ + return (boost::format("%s(%.9g, %.9g, %.9g, %.9g)") + % QuatName::value + % v[0] % v[1] % v[2] % v[3]).str(); +} + +// Specialization for double to full precision +template <> +std::string Quat_repr(const Quat &v) +{ + return (boost::format("%s(%.17g, %.17g, %.17g, %.17g)") + % QuatName::value + % v[0] % v[1] % v[2] % v[3]).str(); +} + + +template +static Quat & +invert(Quat &quat) +{ + MATH_EXC_ON; + return quat.invert(); +} + +template +static Quat +identity(Quat &quat) +{ + MATH_EXC_ON; + return Quat(); +} + +template +static Quat +inverse(Quat &quat) +{ + MATH_EXC_ON; + return quat.inverse(); +} + +template +static Quat & +normalize(Quat &quat) +{ + MATH_EXC_ON; + return quat.normalize(); +} + +template +static Quat +normalized(Quat &quat) +{ + MATH_EXC_ON; + return quat.normalized(); +} + +template +static T +length (Quat &quat) +{ + MATH_EXC_ON; + return quat.length(); +} + +template +static Vec3 +rotateVector(const Quat &quat, const Vec3 &original) +{ + MATH_EXC_ON; + return quat.rotateVector(original); +} + +template +static Quat & +setAxisAngle(Quat &quat, const Vec3 &axis, T radians) +{ + MATH_EXC_ON; + return quat.setAxisAngle(axis, radians); +} + +template +static Quat & +setRotation(Quat &quat, const Vec3 &from, const Vec3 &to) +{ + MATH_EXC_ON; + return quat.setRotation(from, to); +} + +template +static T +angle (Quat &quat) +{ + MATH_EXC_ON; + return quat.angle(); +} + +template +static Vec3 +axis (Quat &quat) +{ + MATH_EXC_ON; + return quat.axis(); +} + +template +static Matrix33 +toMatrix33 (Quat &quat) +{ + MATH_EXC_ON; + return quat.toMatrix33(); +} + +template +static Matrix44 +toMatrix44 (Quat &quat) +{ + MATH_EXC_ON; + return quat.toMatrix44(); +} + +template +static Quat +log(Quat &quat) +{ + MATH_EXC_ON; + return quat.log(); +} + +template +static Quat +exp(Quat &quat) +{ + MATH_EXC_ON; + return quat.exp(); +} + +template +static void +setR(Quat &quat, const double &r) +{ + quat.r = r; +} + +template +static void +setV(Quat &quat, const Vec3 &v) +{ + quat.v = v; +} + +template +static void +extract(Quat &quat, const Matrix44 &mat) +{ + MATH_EXC_ON; + Quat q = IMATH_NAMESPACE::extractQuat(mat); + quat.r = q.r; + quat.v = q.v; +} + +template +static T scalar(Quat &quat) +{ + return quat.r; +} + +template +static Vec3 vector(Quat &quat) +{ + return quat.v; +} + +template +static Quat +slerp(const Quat &quat, const Quat &other, T t) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::slerp (quat, other, t); +} + +template +static Quat +slerpShortestArc(const Quat& quat, const Quat& other, T t) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::slerpShortestArc (quat, other, t); +} + +template +static const Quat & +imul (Quat &quat, const Quat &other) +{ + MATH_EXC_ON; + return quat *= other; +} + +template +static const Quat & +imulT (Quat &quat, T t) +{ + MATH_EXC_ON; + return quat *= t; +} + +template +static const Quat & +idiv (Quat &quat, const Quat &other) +{ + MATH_EXC_ON; + return quat /= other; +} + +template +static const Quat & +idivT (Quat &quat, T t) +{ + MATH_EXC_ON; + return quat /= t; +} + +template +static const Quat & +iadd (Quat &quat, const Quat &other) +{ + MATH_EXC_ON; + return quat += other; +} + +template +static const Quat & +isub (Quat &quat, const Quat &other) +{ + MATH_EXC_ON; + return quat -= other; +} + +template +static Matrix33 +rmulM33(Quat &quat, Matrix33 &m) +{ + MATH_EXC_ON; + return m * quat; +} + +template +static Matrix33 +mulM33(Quat &quat, Matrix33 &m) +{ + MATH_EXC_ON; + return quat * m; +} + +template +static Quat +mul(Quat &quat, Quat &other) +{ + MATH_EXC_ON; + return quat * other; +} + +template +static Quat +div(Quat &quat, Quat &other) +{ + MATH_EXC_ON; + return quat / other; +} + +template +static Quat +divT(Quat &quat, T t) +{ + MATH_EXC_ON; + return quat / t; +} + +template +static Quat +mulT(Quat &quat, T t) +{ + MATH_EXC_ON; + return quat * t; +} + +template +static Quat +add(Quat &quat, Quat &other) +{ + MATH_EXC_ON; + return quat + other; +} + +template +static Quat +sub(Quat &quat, Quat &other) +{ + MATH_EXC_ON; + return quat - other; +} + +template +static Quat +neg(Quat &quat) +{ + MATH_EXC_ON; + return -quat; +} + +template +static Quat +conj(Quat &quat) +{ + MATH_EXC_ON; + return ~quat; +} + +template +static T +dot(Quat &quat, Quat &other) +{ + MATH_EXC_ON; + return quat ^ other; +} + +template +static Vec3 +rmulVec3(Quat &quat, const Vec3 &v) +{ + MATH_EXC_ON; + return v * quat.toMatrix44(); +} + +template +static FixedArray< Vec3 > +rmulVec3Array(Quat &quat, const FixedArray< Vec3 > &a) +{ + MATH_EXC_ON; + Matrix44 m = quat.toMatrix44(); + size_t len = a.len(); + FixedArray< Vec3 > r(len); + for (size_t i = 0; i < len; i++) + r[i] = a[i] * m; + return r; +} + +template +static Quat * +quatConstructor1(const Euler &euler) +{ + MATH_EXC_ON; + return new Quat(euler.toQuat()); +} + +template +static Quat * +quatConstructor2(const Matrix33 &mat) +{ + MATH_EXC_ON; + return new Quat(Euler(mat).toQuat()); +} + +template +static Quat * +quatConstructor3(const Matrix44 &mat) +{ + MATH_EXC_ON; + return new Quat(Euler(mat).toQuat()); +} + +template +class_ > +register_Quat() +{ + class_ > quat_class(QuatName::value, QuatName::value,init >("copy construction")); + quat_class + .def(init<>("imath Quat initialization") ) + .def(init >("imath Quat copy initialization") ) + .def(init >("imath Quat copy initialization") ) + .def(init("make Quat from components") ) + .def(init >("make Quat from components") ) + .def("__init__", make_constructor(quatConstructor1)) + .def("__init__", make_constructor(quatConstructor2)) + .def("__init__", make_constructor(quatConstructor3)) + .def("identity",&identity, "q.identity() -- return an identity quaternion\n") + .def("invert",&invert,return_internal_reference<>(), + "q.invert() -- inverts quaternion q\n" + "(modifying q); returns q") + + .def("inverse",&inverse, + "q.inverse() -- returns the inverse of\n" + "quaternion q; q is not modified\n") + + .def("normalize",&normalize,return_internal_reference<>(), + "q.normalize() -- normalizes quaternion q\n" + "(modifying q); returns q") + + .def("normalized",&normalized, + "q.normalized() -- returns a normalized version\n" + "of quaternion q; q is not modified\n") + + .def("length",&length) + + .def("rotateVector",&rotateVector, + "q.rotateVector(orig) -- Given a vector orig,\n" + " calculate orig' = q x orig x q*\n\n" + " Assumes unit quaternions") + + .def("setAxisAngle",&setAxisAngle,return_internal_reference<>(), + "q.setAxisAngle(x,r) -- sets the value of\n" + "quaternion q so that q represents a rotation\n" + "of r radians around axis x") + + .def("setRotation",&setRotation,return_internal_reference<>(), + "q.setRotation(v,w) -- sets the value of\n" + "quaternion q so that rotating vector v by\n" + "q produces vector w") + + .def("angle",&angle, + "q.angle() -- returns the rotation angle\n" + "(in radians) represented by quaternion q") + + .def("axis",&axis, + "q.axis() -- returns the rotation axis\n" + "represented by quaternion q") + + .def("toMatrix33",&toMatrix33, + "q.toMatrix33() -- returns a 3x3 matrix that\n" + "represents the same rotation as quaternion q") + + .def("toMatrix44",&toMatrix44, + "q.toMatrix44() -- returns a 4x4 matrix that\n" + "represents the same rotation as quaternion q") + + .def("log",&log) + .def("exp",&exp) + .def_readwrite("v",&Quat::v) + .def_readwrite("r",&Quat::r) + .def("v", &vector, + "q.v() -- returns the v (vector) component\n" + "of quaternion q") + + .def("r", &scalar, + "q.r() -- returns the r (scalar) component\n" + "of quaternion q") + + .def("setR", &setR, + "q.setR(s) -- sets the r (scalar) component\n" + "of quaternion q to s") + + .def("setV", &setV, + "q.setV(w) -- sets the v (vector) component\n" + "of quaternion q to w") + + .def("extract", &extract, + "q.extract(m) -- extracts the rotation component\n" + "from 4x4 matrix m and stores the result in q") + + .def("slerp", &slerp, + "q.slerp(p,t) -- performs sperical linear\n" + "interpolation between quaternions q and p:\n" + "q.slerp(p,0) returns q; q.slerp(p,1) returns p.\n" + "q and p must be normalized\n") + + .def("slerpShortestArc", &slerpShortestArc, + "q.slerpShortestArc(p,t) -- performs spherical linear\n" + "interpolation along the shortest arc between\n" + "quaternions q and either p or -p, whichever is\n" + "closer. q and p must be normalized\n") + + .def("__str__",Quat_str) + .def("__repr__",Quat_repr) + .def ("__imul__", &imul, return_internal_reference<>()) + .def ("__imul__", &imulT, return_internal_reference<>()) + .def ("__idiv__", idiv, return_internal_reference<>()) + .def ("__idiv__", &idivT, return_internal_reference<>()) + .def ("__itruediv__", idiv, return_internal_reference<>()) + .def ("__itruediv__", &idivT, return_internal_reference<>()) + .def ("__iadd__", &iadd, return_internal_reference<>()) + .def ("__isub__", &isub, return_internal_reference<>()) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def ("__rmul__", &rmulM33) + .def ("__mul__", &mulM33) + .def ("__mul__", &mul) + .def ("__div__", &div) + .def ("__div__", &divT) + .def ("__truediv__", &div) + .def ("__truediv__", &divT) + .def ("__mul__", &mulT) + .def ("__rmul__", &mulT) + .def ("__add__", &add) + .def ("__sub__", &sub) + .def ("__neg__", &neg) + .def ("__invert__", &conj) + .def ("__xor__", &dot) + .def ("__rmul__", &rmulVec3) + .def ("__rmul__", &rmulVec3Array) + ; + + decoratecopy(quat_class); + + return quat_class; +} + +// XXX fixme - template this +// really this should get generated automatically... + +template +static FixedArray +QuatArray_get(FixedArray > &qa) +{ + return FixedArray(&(qa.unchecked_index(0).r) + index, + qa.len(), 4*qa.stride(), qa.handle(), qa.writable()); +} + +template +struct QuatArray_SetRotationTask : public Task +{ + const FixedArray > &from; + const FixedArray > &to; + FixedArray > &result; + + QuatArray_SetRotationTask (const FixedArray > &fromIn, + const FixedArray > &toIn, + FixedArray > &resultIn) + : from (fromIn), to (toIn), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + result[i].setRotation (from[i], to[i]); + } +}; + +template static void +QuatArray_setRotation (FixedArray > &va, + const FixedArray > &from, + const FixedArray > &to) +{ + MATH_EXC_ON; + size_t len = va.match_dimension(from); + va.match_dimension(to); + + // Validate that 'va' is writable before entering the thread-task. + if (!va.writable()) + throw std::invalid_argument ("Input fixed array is read-only."); + + QuatArray_SetRotationTask task (from, to, va); + dispatchTask (task, len); +} + +template +struct QuatArray_OrientToVectors : public Task +{ + const FixedArray > &forward; + const FixedArray > &up; + FixedArray > &result; + bool alignForward; + + QuatArray_OrientToVectors (const FixedArray > &forwardIn, + const FixedArray > &upIn, + FixedArray > &resultIn, + bool alignForwardIn) + : forward (forwardIn), up (upIn), result (resultIn), + alignForward (alignForwardIn) {} + + void execute (size_t start, size_t end) + { + Vec3 f(0), u(0); + Euler eu(0,0,0); + const Vec3 fRef(1,0,0); + + for (size_t i = start; i < end; ++i) + { + if (alignForward) + { + f = forward[i].normalized(); + u = up[i] - f.dot(up[i])*f; + u.normalize(); + } + else + { + u = up[i].normalized(); + f = forward[i] - u.dot(forward[i])*u; + f.normalize(); + } + + extractEulerXYZ (rotationMatrixWithUpDir (fRef, f, u), eu); + result[i] = eu.toQuat(); + } + } +}; + +template static void +QuatArray_orientToVectors (FixedArray > &va, + const FixedArray > &forward, + const FixedArray > &up, + bool alignForward) +{ + MATH_EXC_ON; + size_t len = va.match_dimension(forward); + va.match_dimension(up); + + // Validate that 'va' is writable before entering the thread-task. + if (!va.writable()) + throw std::invalid_argument ("Input fixed array is read-only."); + + QuatArray_OrientToVectors task (forward, up, va, alignForward); + dispatchTask (task, len); +} + +template +struct QuatArray_Axis : public Task +{ + const FixedArray > &va; + FixedArray > &result; + + QuatArray_Axis (const FixedArray > &vaIn, + FixedArray > &resultIn) + : va (vaIn), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + result[i] = va[i].axis(); + } +}; + +template static FixedArray > +QuatArray_axis(const FixedArray > &va) +{ + MATH_EXC_ON; + size_t len = va.len(); + FixedArray > retval (Py_ssize_t(len), UNINITIALIZED); + + QuatArray_Axis task (va, retval); + dispatchTask (task, len); + return retval; +} + +template +struct QuatArray_Angle : public Task +{ + const FixedArray > &va; + FixedArray &result; + + QuatArray_Angle (const FixedArray > &vaIn, + FixedArray &resultIn) + : va (vaIn), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + result[i] = va[i].angle(); + } +}; + + +template static FixedArray +QuatArray_angle(const FixedArray > &va) +{ + MATH_EXC_ON; + size_t len = va.len(); + FixedArray retval (Py_ssize_t(len), UNINITIALIZED); + + QuatArray_Angle task (va, retval); + dispatchTask (task, len); + return retval; +} + +template +struct QuatArray_RmulVec3 : public Task +{ + const FixedArray > &a; + const Vec3 &v; + FixedArray > &r; + + QuatArray_RmulVec3 (const FixedArray > &aIn, + const Vec3 &vIn, FixedArray > &rIn) + : a (aIn), v (vIn), r (rIn) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + Matrix44 m = a[i].toMatrix44(); + r[i] = v * m; + } + } +}; + +template +static FixedArray< Vec3 > +QuatArray_rmulVec3 (const FixedArray< IMATH_NAMESPACE::Quat > &a, const Vec3 &v) +{ + MATH_EXC_ON; + size_t len = a.len(); + FixedArray< Vec3 > r (Py_ssize_t(len), UNINITIALIZED); + + QuatArray_RmulVec3 task (a, v, r); + dispatchTask (task, len); + return r; +} + +template +struct QuatArray_RmulVec3Array : public Task +{ + const FixedArray > &a; + const FixedArray > &b; + FixedArray > &r; + + QuatArray_RmulVec3Array (const FixedArray > &aIn, + const FixedArray > &bIn, + FixedArray > &rIn) + : a (aIn), b (bIn), r (rIn) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + Matrix44 m = a[i].toMatrix44(); + r[i] = b[i] * m; + } + } +}; + +template +static FixedArray< Vec3 > +QuatArray_rmulVec3Array (const FixedArray< IMATH_NAMESPACE::Quat > &a, + const FixedArray< Vec3 > &b) +{ + MATH_EXC_ON; + size_t len = a.match_dimension(b); + FixedArray< Vec3 > r (Py_ssize_t(len), UNINITIALIZED); + + QuatArray_RmulVec3Array task (a, b, r); + dispatchTask (task, len); + return r; +} + +template +struct QuatArray_SetAxisAngle : public Task +{ + const FixedArray > &axis; + const FixedArray &angles; + FixedArray > &quats; + + QuatArray_SetAxisAngle (const FixedArray > &axisIn, + const FixedArray &anglesIn, + FixedArray > &quatsIn) + : axis (axisIn), angles (anglesIn), quats (quatsIn) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + quats[i].setAxisAngle (axis[i], angles[i]); + } + } +}; + +template +static FixedArray< IMATH_NAMESPACE::Quat > & +QuatArray_setAxisAngle (FixedArray< IMATH_NAMESPACE::Quat > &quats, + const FixedArray< IMATH_NAMESPACE::Vec3 > &axis, + const FixedArray &angles) +{ + MATH_EXC_ON; + size_t len = quats.match_dimension(axis); + quats.match_dimension(angles); + + // Validate that 'va' is writable before entering the thread-task. + if (!quats.writable()) + throw std::invalid_argument ("Input fixed array is read-only."); + + QuatArray_SetAxisAngle task (axis, angles, quats); + dispatchTask (task, len); + return quats; +} + +template +struct QuatArray_RotateVector : public Task +{ + FixedArray > &result; + const FixedArray > &vectors; + const FixedArray > &quats; + + QuatArray_RotateVector (FixedArray > &resultIn, + const FixedArray > &vectorsIn, + const FixedArray > &quatsIn) + : result (resultIn), vectors (vectorsIn), quats (quatsIn) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + result[i] = quats[i].rotateVector (vectors[i]); + } + } +}; + +template +static FixedArray > +QuatArray_rotateVector (const FixedArray< IMATH_NAMESPACE::Quat > &quats, + const FixedArray< IMATH_NAMESPACE::Vec3 > &vectors) +{ + MATH_EXC_ON; + size_t len = quats.match_dimension(vectors); + FixedArray< IMATH_NAMESPACE::Vec3 > result (len); + + QuatArray_RotateVector task (result, vectors, quats); + dispatchTask (task, len); + return result; +} + +template +struct QuatArray_Inverse : public Task +{ + const FixedArray > &quats; + FixedArray > &result; + + QuatArray_Inverse (FixedArray > &resultIn, + const FixedArray > &quatsIn) + : quats (quatsIn), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + result[i] = quats[i].inverse(); + } +}; + +template +static FixedArray > +QuatArray_inverse(const FixedArray > &quats) +{ + MATH_EXC_ON; + size_t len = quats.len(); + FixedArray > result (len); + + QuatArray_Inverse task (result, quats); + dispatchTask (task, len); + + return result; +} + +template +struct QuatArray_SetEulerXYZ : public Task +{ + const FixedArray > &rot; + FixedArray > &quats; + + QuatArray_SetEulerXYZ (const FixedArray > &rotIn, + FixedArray > &quatsIn) + : rot (rotIn), quats (quatsIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + Eulerf e(rot[i]); + quats[i] = e.toQuat(); + } + } +}; + +template +static void +QuatArray_setEulerXYZ (FixedArray< IMATH_NAMESPACE::Quat > &quats, + const FixedArray< IMATH_NAMESPACE::Vec3 > &rot) +{ + MATH_EXC_ON; + size_t len = quats.match_dimension(rot); + + // Validate that 'va' is writable before entering the thread-task. + if (!quats.writable()) + throw std::invalid_argument ("Input fixed array is read-only."); + + QuatArray_SetEulerXYZ task (rot, quats); + dispatchTask (task, len); +} + +template +struct QuatArray_Mul : public Task +{ + const FixedArray > &q1; + const FixedArray > &q2; + FixedArray > &result; + + QuatArray_Mul (const FixedArray > &q1In, + const FixedArray > &q2In, + FixedArray > &resultIn) + : q1 (q1In), q2 (q2In), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + result[i] = q1[i] * q2[i]; + } + } +}; + +template +static FixedArray< IMATH_NAMESPACE::Quat > +QuatArray_mul(const FixedArray< IMATH_NAMESPACE::Quat > &q1, + const FixedArray< IMATH_NAMESPACE::Quat > &q2) +{ + MATH_EXC_ON; + size_t len = q1.match_dimension(q2); + FixedArray< IMATH_NAMESPACE::Quat > result (Py_ssize_t(len), UNINITIALIZED); + + QuatArray_Mul task (q1, q2, result); + dispatchTask (task, len); + return result; +} + +template +struct QuatArray_QuatConstructor1 : public Task +{ + const FixedArray > &euler; + FixedArray > &result; + + QuatArray_QuatConstructor1 (const FixedArray > &eulerIn, + FixedArray > &resultIn) + : euler (eulerIn), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + result[i] = euler[i].toQuat(); + } + } +}; + +template +static FixedArray > * +QuatArray_quatConstructor1(const FixedArray > &e) +{ + MATH_EXC_ON; + size_t len = e.len(); + FixedArray >* result = + new FixedArray > (Py_ssize_t(len), UNINITIALIZED); + + QuatArray_QuatConstructor1 task (e, *result); + dispatchTask (task, len); + return result; +} + +template +struct QuatArray_ExtractTask : public Task +{ + const FixedArray > &m; + FixedArray > &result; + + QuatArray_ExtractTask (const FixedArray > &mIn, + FixedArray > &resultIn) + : m (mIn), result (resultIn) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + result[i] = IMATH_NAMESPACE::extractQuat (m[i]); + } + } +}; + +template +static void +QuatArray_extract(FixedArray > &q, + const FixedArray > &m) +{ + MATH_EXC_ON; + const size_t len = q.match_dimension(m); + + QuatArray_ExtractTask task (m, q); + dispatchTask (task, len); +} + +template +class_ > > +register_QuatArray() +{ + using boost::mpl::true_; + using boost::mpl::false_; + + typedef IMATH_NAMESPACE::Quat QuatT; + + class_ > quatArray_class = FixedArray::register_("Fixed length array of IMATH_NAMESPACE::Quat"); + quatArray_class + .add_property("r",&QuatArray_get) + .add_property("x",&QuatArray_get) + .add_property("y",&QuatArray_get) + .add_property("z",&QuatArray_get) + .def("setRotation", &QuatArray_setRotation, + "set rotation angles for each quat", + (args("from", "to"))) + .def("orientToVectors", &QuatArray_orientToVectors, + "Sets the orientations to match the given forward and up vectors, " + "matching the forward vector exactly if 'alignForward' is True, matching " + "the up vector exactly if 'alignForward' is False. If the vectors are " + "already orthogonal, both vectors will be matched exactly.", + (args("forward", "up", "alignForward"))) + .def("extract", &QuatArray_extract, + "Extract the rotation component of an M44d and return it as a quaternion.", + (args("lxform"))) + .def("axis", &QuatArray_axis, + "get rotation axis for each quat") + .def("angle", &QuatArray_angle, + "get rotation angle about the axis returned by axis() for each quat") + .def("setAxisAngle", &QuatArray_setAxisAngle, + "set the quaternion arrays from a given axis and angle", + (args("axis", "angle")), return_value_policy()) + .def("setEulerXYZ", &QuatArray_setEulerXYZ, + "set the quaternion arrays from a given euler XYZ angle vector", + (args("euler"))) + .def("rotateVector", &QuatArray_rotateVector, + "Rotate the supplied vectors by the quaternions. Assumes quaternions are normalized.", + (args("vector"))) + .def("inverse", &QuatArray_inverse, + "Return 1/Q for each quaternion.", + (args("QuatArray"))) + .def("__rmul__", &QuatArray_rmulVec3) + .def("__rmul__", &QuatArray_rmulVec3Array) + .def("__init__", make_constructor(QuatArray_quatConstructor1)) + ; + + generate_member_bindings, true_> + (quatArray_class, "dot", + "Return the element-by-element Euclidean inner product", + args("qB")); + generate_member_bindings, true_> + (quatArray_class, "euclideanInnerProduct", + "Return the element-by-element Euclidean inner product", + args("qB")); + generate_member_bindings > + (quatArray_class, "normalize", + "Normalize each quaternion in the array"); + generate_member_bindings > + (quatArray_class, "normalized", + "Return a new quaternion array with unit quaternions."); + + generate_member_bindings > + (quatArray_class, "__neg__" , "-self"); + generate_member_bindings, true_ > + (quatArray_class, "__mul__", "self * qB", args("qB")); + generate_member_bindings, false_ > + (quatArray_class, "__mul__", "self * x", args("x")); + generate_member_bindings, false_ > + (quatArray_class, "__rmul__", "self * x", args("x")); + generate_member_bindings, true_ > + (quatArray_class, "__xor__", "self.dot(qB)", args("qB")); + + generate_member_bindings, true_, false_ > + (quatArray_class, + "slerp", + "Return the element-by-element shortest arc spherical linear interpolation between self and B.", + args("qB", "t")); + + add_comparison_functions(quatArray_class); + decoratecopy(quatArray_class); + + return quatArray_class; +} + +template PYIMATH_EXPORT class_ > register_Quat(); +template PYIMATH_EXPORT class_ > register_Quat(); + +template PYIMATH_EXPORT class_ > > register_QuatArray(); +template PYIMATH_EXPORT class_ > > register_QuatArray(); +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Quat FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Quat(); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Quat FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Quat(); } +} diff --git a/Sources/MetaPy/PyImath/PyImathRandom.cpp b/Sources/MetaPy/PyImath/PyImathRandom.cpp new file mode 100644 index 00000000..3ff1b0d9 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathRandom.cpp @@ -0,0 +1,327 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathFixedArray.h" +#include "PyImathRandom.h" +#include "PyImathDecorators.h" + +namespace PyImath{ +using namespace boost::python; + +template +static T +nextf2 (Rand &rand, T min, T max) +{ + MATH_EXC_ON; + return rand.nextf(min, max); +} + +template +static float +nextGauss (Rand &rand) +{ + MATH_EXC_ON; + return gaussRand(rand); +} + +template +static IMATH_NAMESPACE::Vec3 nextGaussSphere(Rand &rand, const IMATH_NAMESPACE::Vec3 &v) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::gaussSphereRand,Rand>(rand); +} +template +static IMATH_NAMESPACE::Vec2 nextGaussSphere(Rand &rand, const IMATH_NAMESPACE::Vec2 &v) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::gaussSphereRand,Rand>(rand); +} + +template +static IMATH_NAMESPACE::Vec3 nextHollowSphere(Rand &rand, const IMATH_NAMESPACE::Vec3 &v) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::hollowSphereRand,Rand>(rand); +} + +template +static IMATH_NAMESPACE::Vec2 nextHollowSphere(Rand &rand, const IMATH_NAMESPACE::Vec2 &v) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::hollowSphereRand,Rand>(rand); +} + +template +static IMATH_NAMESPACE::Vec3 nextSolidSphere(Rand &rand, const IMATH_NAMESPACE::Vec3 &v) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::solidSphereRand,Rand>(rand); +} + +template +static IMATH_NAMESPACE::Vec2 nextSolidSphere(Rand &rand, const IMATH_NAMESPACE::Vec2 &v) +{ + MATH_EXC_ON; + return IMATH_NAMESPACE::solidSphereRand,Rand>(rand); +} + +template +static Rand *Rand_constructor1(unsigned long int seed) +{ + return new Rand(seed); +} + +template +static Rand *Rand_constructor2(Rand rand) +{ + Rand *r = new Rand(); + *r = rand; + + return r; +} + +template +static PyImath::FixedArray > +hollowSphereRand(Rand &rand, int num) +{ + MATH_EXC_ON; + PyImath::FixedArray > retval(num); + for (int i=0; i,Rand>(rand); + } + return retval; +} + +template +static PyImath::FixedArray > +solidSphereRand(Rand &rand, int num) +{ + MATH_EXC_ON; + PyImath::FixedArray > retval(num); + for (int i=0; i,Rand>(rand); + } + return retval; +} + +PYIMATH_EXPORT +class_ +register_Rand32() +{ + float (IMATH_NAMESPACE::Rand32::*nextf1)(void) = &IMATH_NAMESPACE::Rand32::nextf; + + IMATH_NAMESPACE::Vec3 (*nextGaussSphere1)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec3 &v) = &nextGaussSphere; + IMATH_NAMESPACE::Vec3 (*nextGaussSphere2)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec3 &v) = &nextGaussSphere; + IMATH_NAMESPACE::Vec2 (*nextGaussSphere3)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec2 &v) = &nextGaussSphere; + IMATH_NAMESPACE::Vec2 (*nextGaussSphere4)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec2 &v) = &nextGaussSphere; + + IMATH_NAMESPACE::Vec3 (*nextHollowSphere1)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec3 &v) = &nextHollowSphere; + IMATH_NAMESPACE::Vec3 (*nextHollowSphere2)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec3 &v) = &nextHollowSphere; + IMATH_NAMESPACE::Vec2 (*nextHollowSphere3)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec2 &v) = &nextHollowSphere; + IMATH_NAMESPACE::Vec2 (*nextHollowSphere4)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec2 &v) = &nextHollowSphere; + + IMATH_NAMESPACE::Vec3 (*nextSolidSphere1)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec3 &v) = &nextSolidSphere; + IMATH_NAMESPACE::Vec3 (*nextSolidSphere2)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec3 &v) = &nextSolidSphere; + IMATH_NAMESPACE::Vec2 (*nextSolidSphere3)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec2 &v) = &nextSolidSphere; + IMATH_NAMESPACE::Vec2 (*nextSolidSphere4)(IMATH_NAMESPACE::Rand32 &, const IMATH_NAMESPACE::Vec2 &v) = &nextSolidSphere; + + class_< IMATH_NAMESPACE::Rand32 > rand32_class("Rand32"); + rand32_class + .def(init<>("default construction")) + .def("__init__", make_constructor(Rand_constructor1)) + .def("__init__", make_constructor(Rand_constructor2)) + .def("init", &IMATH_NAMESPACE::Rand32::init, + "r.init(i) -- initialize with integer " + "seed i") + + .def("nexti", &IMATH_NAMESPACE::Rand32::nexti, + "r.nexti() -- return the next integer " + "value in the uniformly-distributed " + "sequence") + .def("nextf", nextf1, + "r.nextf() -- return the next floating-point " + "value in the uniformly-distributed " + "sequence\n" + + "r.nextf(float, float) -- return the next floating-point " + "value in the uniformly-distributed " + "sequence") + .def("nextf", &nextf2 ) + + .def("nextb", &IMATH_NAMESPACE::Rand32::nextb, + "r.nextb() -- return the next boolean " + "value in the uniformly-distributed " + "sequence") + + .def("nextGauss", &nextGauss, + "r.nextGauss() -- returns the next " + "floating-point value in the normally " + "(Gaussian) distributed sequence") + + .def("nextGaussSphere", nextGaussSphere1, + "r.nextGaussSphere(v) -- returns the next " + "point whose distance from the origin " + "has a normal (Gaussian) distribution with " + "mean 0 and variance 1. The vector " + "argument, v, specifies the dimension " + "and number type.") + .def("nextGaussSphere", nextGaussSphere2) + .def("nextGaussSphere", nextGaussSphere3) + .def("nextGaussSphere", nextGaussSphere4) + + .def("nextHollowSphere", nextHollowSphere1, + "r.nextHollowSphere(v) -- return the next " + "point uniformly distributed on the surface " + "of a sphere of radius 1 centered at the " + "origin. The vector argument, v, specifies " + "the dimension and number type.") + .def("nextHollowSphere", nextHollowSphere2) + .def("nextHollowSphere", nextHollowSphere3) + .def("nextHollowSphere", nextHollowSphere4) + + .def("nextSolidSphere", nextSolidSphere1, + "r.nextSolidSphere(v) -- return the next " + "point uniformly distributed in a sphere " + "of radius 1 centered at the origin. The " + "vector argument, v, specifies the " + "dimension and number type.") + .def("nextSolidSphere", nextSolidSphere2) + .def("nextSolidSphere", nextSolidSphere3) + .def("nextSolidSphere", nextSolidSphere4) + ; + + def("hollowSphereRand",&hollowSphereRand,"hollowSphereRand(randObj,num) return XYZ vectors uniformly " + "distributed across the surface of a sphere generated from the given Rand32 object", + args("randObj","num")); + + def("solidSphereRand",&solidSphereRand,"solidSphereRand(randObj,num) return XYZ vectors uniformly " + "distributed through the volume of a sphere generated from the given Rand32 object", + args("randObj","num")); + + decoratecopy(rand32_class); + + return rand32_class; +} + +PYIMATH_EXPORT +class_ +register_Rand48() +{ + double (IMATH_NAMESPACE::Rand48::*nextf1)(void) = &IMATH_NAMESPACE::Rand48::nextf; + + IMATH_NAMESPACE::Vec3 (*nextGaussSphere1)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec3 &v) = &nextGaussSphere; + IMATH_NAMESPACE::Vec3 (*nextGaussSphere2)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec3 &v) = &nextGaussSphere; + IMATH_NAMESPACE::Vec2 (*nextGaussSphere3)(IMATH_NAMESPACE::Rand48&, const IMATH_NAMESPACE::Vec2 &v) = &nextGaussSphere; + IMATH_NAMESPACE::Vec2 (*nextGaussSphere4)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec2 &v) = &nextGaussSphere; + + IMATH_NAMESPACE::Vec3 (*nextHollowSphere1)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec3 &v) = &nextHollowSphere; + IMATH_NAMESPACE::Vec3 (*nextHollowSphere2)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec3 &v) = &nextHollowSphere; + IMATH_NAMESPACE::Vec2 (*nextHollowSphere3)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec2 &v) = &nextHollowSphere; + IMATH_NAMESPACE::Vec2 (*nextHollowSphere4)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec2 &v) = &nextHollowSphere; + + IMATH_NAMESPACE::Vec3 (*nextSolidSphere1)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec3 &v) = &nextSolidSphere; + IMATH_NAMESPACE::Vec3 (*nextSolidSphere2)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec3 &v) = &nextSolidSphere; + IMATH_NAMESPACE::Vec2 (*nextSolidSphere3)(IMATH_NAMESPACE::Rand48&, const IMATH_NAMESPACE::Vec2 &v) = &nextSolidSphere; + IMATH_NAMESPACE::Vec2 (*nextSolidSphere4)(IMATH_NAMESPACE::Rand48 &, const IMATH_NAMESPACE::Vec2 &v) = &nextSolidSphere; + + class_< IMATH_NAMESPACE::Rand48 > rand48_class("Rand48"); + rand48_class + .def(init<>("default construction")) + .def("__init__", make_constructor(Rand_constructor1)) + .def("__init__", make_constructor(Rand_constructor2)) + .def("init", &IMATH_NAMESPACE::Rand48::init, + "r.init(i) -- initialize with integer " + "seed i") + + .def("nexti", &IMATH_NAMESPACE::Rand48::nexti, + "r.nexti() -- return the next integer " + "value in the uniformly-distributed " + "sequence") + + .def("nextf", nextf1, + "r.nextf() -- return the next double " + "value in the uniformly-distributed " + "sequence\n" + + "r.nextf(double,double) -- return the next double " + "value in the uniformly-distributed " + "sequence") + .def("nextf", &nextf2 ) + + .def("nextb", &IMATH_NAMESPACE::Rand48::nextb, + "r.nextb() -- return the next boolean " + "value in the uniformly-distributed " + "sequence") + + .def("nextGauss", &nextGauss, + "r.nextGauss() -- returns the next " + "floating-point value in the normally " + "(Gaussian) distributed sequence") + + .def("nextGaussSphere", nextGaussSphere1, + "r.nextGaussSphere(v) -- returns the next " + "point whose distance from the origin " + "has a normal (Gaussian) distribution with " + "mean 0 and variance 1. The vector " + "argument, v, specifies the dimension " + "and number type.") + .def("nextGaussSphere", nextGaussSphere2) + .def("nextGaussSphere", nextGaussSphere3) + .def("nextGaussSphere", nextGaussSphere4) + + .def("nextHollowSphere", nextHollowSphere1, + "r.nextHollowSphere(v) -- return the next " + "point uniformly distributed on the surface " + "of a sphere of radius 1 centered at the " + "origin. The vector argument, v, specifies " + "the dimension and number type.") + .def("nextHollowSphere", nextHollowSphere2) + .def("nextHollowSphere", nextHollowSphere3) + .def("nextHollowSphere", nextHollowSphere4) + + .def("nextSolidSphere", nextSolidSphere1, + "r.nextSolidSphere(v) -- return the next " + "point uniformly distributed in a sphere " + "of radius 1 centered at the origin. The " + "vector argument, v, specifies the " + "dimension and number type.") + .def("nextSolidSphere", nextSolidSphere2) + .def("nextSolidSphere", nextSolidSphere3) + .def("nextSolidSphere", nextSolidSphere4) + ; + + decoratecopy(rand48_class); + + return rand48_class; +} + +// + +PyObject * +Rand32::wrap (const IMATH_NAMESPACE::Rand32 &r) +{ + boost::python::return_by_value::apply ::type converter; + PyObject *p = converter (r); + return p; +} + +PyObject * +Rand48::wrap (const IMATH_NAMESPACE::Rand48 &r) +{ + boost::python::return_by_value::apply ::type converter; + PyObject *p = converter (r); + return p; +} + +} //namespace PyIMath diff --git a/Sources/MetaPy/PyImath/PyImathShear.cpp b/Sources/MetaPy/PyImath/PyImathShear.cpp new file mode 100644 index 00000000..0092cd1e --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathShear.cpp @@ -0,0 +1,559 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathShear.h" +#include "PyImathPlane.h" +#include "PyImathDecorators.h" +#include "PyImathExport.h" + + +namespace PyImath{ +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct ShearName {static const char *value;}; +template <> const char *ShearName::value = "Shear6f"; +template <> const char *ShearName::value = "Shear6d"; + +template +static std::string Shear_str(const Shear6 &v) +{ + std::stringstream stream; + stream << ShearName::value << "(" + << v[0] << ", " << v[1] << ", " << v[2] << ", " + << v[3] << ", " << v[4] << ", " << v[5] << ")"; + return stream.str(); +} + +// Non-specialized repr is same as str +template +static std::string Shear_repr(const Shear6 &v) +{ + return Shear_str(v); +} + +// Specialization for float to full precision +template <> +std::string Shear_repr(const Shear6 &v) +{ + return (boost::format("%s(%.9g, %.9g, %.9g, %.9g, %.9g, %.9g)") + % ShearName::value + % v[0] % v[1] % v[2] + % v[3] % v[4] % v[5]).str(); +} + +// Specialization for double to full precision +template <> +std::string Shear_repr(const Shear6 &v) +{ + return (boost::format("%s(%.17g, %.17g, %.17g, %.17g, %.17g, %.17g)") + % ShearName::value + % v[0] % v[1] % v[2] + % v[3] % v[4] % v[5]).str(); +} + +template +static Shear6 * shearTupleConstructor(tuple t) +{ + if(t.attr("__len__")() == 3){ + return new Shear6(extract(t[0]), extract(t[1]), extract(t[2]), + T(0), T(0), T(0)); + } + else if(t.attr("__len__")() == 6){ + return new Shear6(extract(t[0]), extract(t[1]), extract(t[2]), + extract(t[3]), extract(t[4]), extract(t[5])); + } + else + throw std::invalid_argument ("Shear6 expects tuple of length 3 or 6"); +} + +template +static Shear6 * shearConstructor1(T a) +{ + return new Shear6(a, a, a, a, a, a); +} + +template +static Shear6 * shearConversionConstructor(const Shear6 &shear) +{ + Shear6 *s = new Shear6; + *s = shear; + return s; +} + +template +static const Shear6 & +iadd(Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear += other; +} + +template +static Shear6 +add(const Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear + other; +} + +template +static const Shear6 & +isub(Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear -= other; +} + +template +static Shear6 +sub(const Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear - other; +} + +template +static Shear6 +neg(const Shear6 &shear) +{ + MATH_EXC_ON; + return -shear; +} + +template +static const Shear6 & +imul(Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear *= other; +} + +template +static const Shear6 & +imulT(Shear6 &shear, T t) +{ + MATH_EXC_ON; + return shear *= t; +} + +template +static Shear6 +mul(const Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear * other; +} + +template +static Shear6 +mulT(const Shear6 &shear, T t) +{ + MATH_EXC_ON; + return shear * t; +} + +template +static const Shear6 & +idiv(Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear /= other; +} + +template +static const Shear6 & +idivT(Shear6 &shear, T t) +{ + MATH_EXC_ON; + return shear /= t; +} + +template +static Shear6 +div(const Shear6 &shear, const Shear6 &other) +{ + MATH_EXC_ON; + return shear / other; +} + +template +static Shear6 +divT(const Shear6 &shear, T t) +{ + MATH_EXC_ON; + return shear / t; +} + +template +static Shear6 +subtract1(Shear6 &v, tuple t) +{ + MATH_EXC_ON; + Shear6 w; + + if(t.attr("__len__")() == 6){ + w[0] = v[0] - extract(t[0]); + w[1] = v[1] - extract(t[1]); + w[2] = v[2] - extract(t[2]); + w[3] = v[3] - extract(t[3]); + w[4] = v[4] - extract(t[4]); + w[5] = v[5] - extract(t[5]); + } + else + throw std::domain_error ("tuple must have length of 6"); + + return w; +} + +// obsolete? duplicate with subtract1 +template +static Shear6 +subtract2(Shear6 &v, tuple t) +{ + MATH_EXC_ON; + Shear6 w; + + if(t.attr("__len__")() == 6){ + w[0] = extract(t[0]) - v[0]; + w[1] = extract(t[1]) - v[1]; + w[2] = extract(t[2]) - v[2]; + w[3] = extract(t[3]) - v[3]; + w[4] = extract(t[4]) - v[4]; + w[5] = extract(t[5]) - v[5]; + } + else + throw std::domain_error ("tuple must have length of 6"); + + return w; +} + +template +static Shear6 +subtractT1(Shear6 &v, T a) +{ + MATH_EXC_ON; + Shear6 w; + + w[0] = v[0] - a; + w[1] = v[1] - a; + w[2] = v[2] - a; + w[3] = v[3] - a; + w[4] = v[4] - a; + w[5] = v[5] - a; + + return w; +} + +template +static Shear6 +subtractT2(Shear6 &v, T a) +{ + MATH_EXC_ON; + Shear6 w; + + w[0] = a - v[0]; + w[1] = a - v[1]; + w[2] = a - v[2]; + w[3] = a - v[3]; + w[4] = a - v[4]; + w[5] = a - v[5]; + + return w; +} + + +template +static Shear6 +addTuple(Shear6 &v, tuple t) +{ + MATH_EXC_ON; + Shear6 w; + + if(t.attr("__len__")() == 6){ + w[0] = v[0] + extract(t[0]); + w[1] = v[1] + extract(t[1]); + w[2] = v[2] + extract(t[2]); + w[3] = v[3] + extract(t[3]); + w[4] = v[4] + extract(t[4]); + w[5] = v[5] + extract(t[5]); + } + else + throw std::domain_error ("tuple must have length of 6"); + + return w; +} + +template +static Shear6 +addT(Shear6 &v, T a) +{ + MATH_EXC_ON; + Shear6 w; + + w[0] = v[0] + a; + w[1] = v[1] + a; + w[2] = v[2] + a; + w[3] = v[3] + a; + w[4] = v[4] + a; + w[5] = v[5] + a; + + return w; +} + +template +static Shear6 +multTuple(Shear6 &v, tuple t) +{ + MATH_EXC_ON; + Shear6 w; + + if(t.attr("__len__")() == 6){ + w[0] = v[0] * extract(t[0]); + w[1] = v[1] * extract(t[1]); + w[2] = v[2] * extract(t[2]); + w[3] = v[3] * extract(t[3]); + w[4] = v[4] * extract(t[4]); + w[5] = v[5] * extract(t[5]); + } + else + throw std::domain_error ("tuple must have length of 6"); + + return w; +} + +template +static Shear6 +rdiv(Shear6 &v, T a) +{ + MATH_EXC_ON; + Shear6 w; + + if(v != Shear6()){ + w[0] = a/v[0]; + w[1] = a/v[1]; + w[2] = a/v[2]; + w[3] = a/v[3]; + w[4] = a/v[4]; + w[5] = a/v[5]; + } + else + throw std::domain_error ("Division by Zero"); + + return w; +} + +template +static Shear6 +divTuple(Shear6 &v, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() != 6) + throw std::domain_error ("Shear6 expects tuple of length 6"); + + Shear6 w; + for(int i = 0; i < 6; ++i) + { + T a = extract(t[i]); + if(a != T (0)) + w[i] = v[i] / a; + else + throw std::domain_error ("Division by Zero"); + } + + return w; +} + +template +static Shear6 +rdivTuple(Shear6 &v, const tuple &t) +{ + MATH_EXC_ON; + if(t.attr("__len__")() != 6) + throw std::domain_error ("Shear6 expects tuple of length 6"); + + Shear6 w; + for(int i = 0; i < 6; ++i) + { + T a = extract(t[i]); + if(v[i] != T (0)) + w[i] = a / v[i]; + else + throw std::domain_error ("Division by Zero"); + } + + return w; +} + +template +static bool +lessThan(Shear6 &v, const Shear6 &w) +{ + bool isLessThan = (v[0] <= w[0] && v[1] <= w[1] && v[2] <= w[2] + && v[3] <= w[3] && v[4] <= w[4] && v[5] <= w[5]) + && v != w; + + return isLessThan; +} + +template +static bool +greaterThan(Shear6 &v, const Shear6 &w) +{ + bool isGreaterThan = (v[0] >= w[0] && v[1] >= w[1] && v[2] >= w[2] + && v[3] >= w[3] && v[4] >= w[4] && v[5] >= w[5]) + && v != w; + + return isGreaterThan; +} + +template +static bool +lessThanEqual(Shear6 &v, const Shear6 &w) +{ + bool isLessThanEqual = (v[0] <= w[0] && v[1] <= w[1] && v[2] <= w[2] + && v[3] <= w[3] && v[4] <= w[4] && v[5] <= w[5]); + + return isLessThanEqual; +} + +template +static bool +greaterThanEqual(Shear6 &v, const Shear6 &w) +{ + bool isGreaterThanEqual = (v[0] >= w[0] && v[1] >= w[1] && v[2] >= w[2] + && v[3] >= w[3] && v[4] >= w[4] && v[5] >= w[5]); + + return isGreaterThanEqual; +} + +template +static T +getitem(Shear6 &shear, int i) +{ + return shear[i]; +} + +template +static void +setitem(Shear6 &shear, int i, T a) +{ + if(i < 0 || i > 5) + throw std::domain_error ("Index out of range"); + + shear[i] = a; +} + +template +static int +len(Shear6 &shear) +{ + return 6; +} + + + +template +class_ > +register_Shear() +{ + const char *name = ShearName::value; + + void (IMATH_NAMESPACE::Shear6::*setValue1)(T,T,T,T,T,T) = &IMATH_NAMESPACE::Shear6::setValue; + void (IMATH_NAMESPACE::Shear6::*setValue2)(const Shear6 &) = &IMATH_NAMESPACE::Shear6::setValue; + void (IMATH_NAMESPACE::Shear6::*getValue1)(Shear6 &) const = &IMATH_NAMESPACE::Shear6::getValue; + + class_ > shear_class(name, name, init >("copy construction")); + shear_class + .def(init<>("default construction: (0 0 0 0 0 0)")) + .def(init("Shear(XY,XZ,YZ) construction: (XY XZ YZ 0 0 0)")) + .def(init &>("Shear(v) construction: (v.x v.y v.z 0 0 0)")) + .def(init &>("Shear(v) construction: (v.x v.y v.z 0 0 0)")) + .def(init &>("Shear(v) construction: (v.x v.y v.z 0 0 0)")) + .def(init("Shear(XY, XZ, YZ, YX, ZX, ZY) construction")) + .def("__init__", make_constructor(shearConstructor1)) + .def("__init__", make_constructor(shearTupleConstructor),"Construction from tuple") + .def("__init__", make_constructor(shearConversionConstructor)) + .def("__init__", make_constructor(shearConversionConstructor)) + .def("__init__", make_constructor(shearConversionConstructor)) + .def("__iadd__",&iadd,return_internal_reference<>()) + .def("__add__",&add) + .def("__isub__",&isub,return_internal_reference<>()) + .def("__sub__",&sub) + .def("__neg__",&neg) + .def("__imul__",&imul,return_internal_reference<>()) + .def("__imul__",&imulT,return_internal_reference<>()) + .def("__mul__",&mul) + .def("__mul__",&mulT) + .def("__rmul__",&mulT) + .def("__idiv__",&idiv,return_internal_reference<>()) + .def("__idiv__",&idivT,return_internal_reference<>()) + .def("__itruediv__",&idiv,return_internal_reference<>()) + .def("__itruediv__",&idivT,return_internal_reference<>()) + .def("__div__",&div) + .def("__div__",&divT) + .def("__truediv__",&div) + .def("__truediv__",&divT) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def("__str__",&Shear_str) + .def("__repr__",&Shear_repr) + .def("setValue", setValue1) + .def("setValue", setValue2) + .def("getValue", getValue1) + .def("negate", &Shear6::negate, return_internal_reference<>()) + .def("baseTypeLowest", &Shear6::baseTypeLowest) + .staticmethod("baseTypeLowest") + .def("baseTypeMax", &Shear6::baseTypeMax) + .staticmethod("baseTypeMax") + .def("baseTypeSmallest", &Shear6::baseTypeSmallest) + .staticmethod("baseTypeSmallest") + .def("baseTypeEpsilon", &Shear6::baseTypeEpsilon) + .staticmethod("baseTypeEpsilon") + .def("equalWithAbsError", &Shear6::equalWithAbsError) + .def("equalWithRelError", &Shear6::equalWithRelError) + .def("__sub__", &subtract1) + .def("__sub__", &subtractT1) + .def("__rsub__", &subtract2) + .def("__rsub__", &subtractT2) + .def("__add__", &addTuple) + .def("__add__", &addT) + .def("__radd__", &addTuple) + .def("__radd__", &addT) + .def("__mul__", &multTuple) + .def("__rmul__", &multTuple) + .def("__div__", &divTuple) + .def("__truediv__", &divTuple) + .def("__rdiv__", &rdiv) + .def("__rdiv__", &rdivTuple) + .def("__rtruediv__", &rdiv) + .def("__rtruediv__", &rdivTuple) + .def("__lt__", &lessThan) + .def("__gt__", &greaterThan) + .def("__le__", &lessThanEqual) + .def("__ge__", &greaterThanEqual) + .def("__getitem__", &getitem) + .def("__setitem__", &setitem) + .def("__len__", &len) + ; + + decoratecopy(shear_class); + + return shear_class; +} + +template PYIMATH_EXPORT class_ > register_Shear(); +template PYIMATH_EXPORT class_ > register_Shear(); + +}//namespace PyIMath diff --git a/Sources/MetaPy/PyImath/PyImathStringArray.cpp b/Sources/MetaPy/PyImath/PyImathStringArray.cpp new file mode 100644 index 00000000..a049ff7d --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathStringArray.cpp @@ -0,0 +1,358 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathStringArrayRegister.h" +#include "PyImathStringArray.h" +#include "PyImathExport.h" + +namespace PyImath { + +using namespace boost::python; + +template +StringArrayT* StringArrayT::createDefaultArray(size_t length) +{ + return StringArrayT::createUniformArray(T(), length); +} + +template +StringArrayT* StringArrayT::createUniformArray(const T& initialValue, size_t length) +{ + typedef boost::shared_array StringTableIndexArrayPtr; + typedef boost::shared_ptr > StringTablePtr; + + BOOST_STATIC_ASSERT(boost::is_pod::value); + + StringTableIndexArrayPtr indexArray(reinterpret_cast(new char[sizeof(StringTableIndex)*length])); + StringTablePtr table(new StringTableT); + + const StringTableIndex index = table->intern(initialValue); + + for(size_t i=0; i(*table, indexArray.get(), length, 1, indexArray, boost::any(table)); +} + +template +StringArrayT* StringArrayT::createFromRawArray(const T* rawArray, size_t length, bool writable) +{ + typedef boost::shared_array StringTableIndexArrayPtr; + typedef boost::shared_ptr > StringTablePtr; + + BOOST_STATIC_ASSERT(boost::is_pod::value); + + StringTableIndexArrayPtr indexArray(reinterpret_cast(new char[sizeof(StringTableIndex)*length])); + StringTablePtr table(new StringTableT); + + for(size_t i=0; iintern(rawArray[i]); + + return new StringArrayT(*table, indexArray.get(), length, 1, indexArray, table, writable); +} + +template +StringArrayT::StringArrayT(StringTableT &table, StringTableIndex *ptr, size_t length, + size_t stride, boost::any tableHandle, bool writable) + : super(ptr,length,stride,writable), _table(table), _tableHandle(tableHandle) +{ + // nothing +} + +template +StringArrayT::StringArrayT(StringTableT &table, StringTableIndex *ptr, size_t length, + size_t stride, boost::any handle, boost::any tableHandle, bool writable) + : super(ptr,length,stride,handle,writable), _table(table), _tableHandle(tableHandle) +{ + // nothing +} + +template +StringArrayT::StringArrayT(const StringTableT &table, const StringTableIndex *ptr, + size_t length, size_t stride, boost::any tableHandle) + : super(ptr,length,stride), _table(const_cast &>(table)), + _tableHandle(tableHandle) +{ + // nothing +} + +template +StringArrayT::StringArrayT(const StringTableT &table, const StringTableIndex *ptr, + size_t length, size_t stride, boost::any handle, boost::any tableHandle) + : super(ptr,length,stride,handle), _table(const_cast &>(table)), + _tableHandle(tableHandle) +{ + // nothing +} + +template +StringArrayT::StringArrayT(StringArrayT& s, const FixedArray& mask) + : super(s, mask), + _table(s._table), + _tableHandle(s._tableHandle) +{ +} + +template +StringArrayT* +StringArrayT::getslice_string(PyObject *index) const +{ + typedef boost::shared_array StringTableIndexArrayPtr; + typedef boost::shared_ptr > StringTablePtr; + + BOOST_STATIC_ASSERT(boost::is_pod::value); + + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength); + + StringTableIndexArrayPtr indexArray(reinterpret_cast(new char[sizeof(StringTableIndex)*slicelength])); + StringTablePtr table(new StringTableT); + + for(size_t i=0; iintern(getitem_string(start+i*step)); + + return new StringArrayT(*table, indexArray.get(), slicelength, 1, indexArray, boost::any(table)); +} + +template +StringArrayT* +StringArrayT::getslice_mask_string(const FixedArray& mask) +{ + return new StringArrayT(*this, mask); +} + +template +void +StringArrayT::setitem_string_scalar(PyObject *index, const T &data) +{ + if (!writable()) + throw std::invalid_argument("Fixed string-array is read-only."); + + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength); + StringTableIndex di = _table.intern(data); + for (size_t i=0; i +void +StringArrayT::setitem_string_scalar_mask(const FixedArray &mask, const T &data) +{ + if (!writable()) + throw std::invalid_argument("Fixed string-array is read-only."); + + size_t len = match_dimension(mask); + StringTableIndex di = _table.intern(data); + for (size_t i=0; i +void +StringArrayT::setitem_string_vector(PyObject *index, const StringArrayT &data) +{ + if (!writable()) + throw std::invalid_argument("Fixed string-array is read-only."); + + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength); + + // we have a valid range of indices + if ((size_t) data.len() != slicelength) { + PyErr_SetString(PyExc_IndexError, "Dimensions of source do not match destination"); + throw_error_already_set(); + } + for (size_t i=0; i +void +StringArrayT::setitem_string_vector_mask(const FixedArray &mask, const StringArrayT &data) +{ + if (!writable()) + throw std::invalid_argument("Fixed string-array is read-only."); + + size_t len = match_dimension(mask); + if ((size_t) data.len() == len) { + for (size_t i=0; i +FixedArray operator == (const StringArrayT &a0, const StringArrayT &a1) { + size_t len = a0.match_dimension(a1); + FixedArray f(len); + const StringTableT &t0 = a0.stringTable(); + const StringTableT &t1 = a1.stringTable(); + for (size_t i=0;i +FixedArray operator == (const StringArrayT &a0, const T &v1) { + size_t len = a0.len(); + FixedArray f(len); + const StringTableT &t0 = a0.stringTable(); + if (t0.hasString(v1)) { + StringTableIndex v1i = t0.lookup(v1); + for (size_t i=0;i +FixedArray operator == (const T &v1,const StringArrayT &a0) { + return a0 == v1; +} + +template +FixedArray operator != (const StringArrayT &a0, const StringArrayT &a1) { + size_t len = a0.match_dimension(a1); + FixedArray f(len); + const StringTableT &t0 = a0.stringTable(); + const StringTableT &t1 = a1.stringTable(); + for (size_t i=0;i +FixedArray operator != (const StringArrayT &a0, const T &v1) { + size_t len = a0.len(); + FixedArray f(len); + const StringTableT &t0 = a0.stringTable(); + if (t0.hasString(v1)) { + StringTableIndex v1i = t0.lookup(v1); + for (size_t i=0;i +FixedArray operator != (const T &v1,const StringArrayT &a0) { + return a0 != v1; +} + +template<> PYIMATH_EXPORT StringTableIndex FixedArrayDefaultValue::value() { return StringTableIndex(0); } +template<> PYIMATH_EXPORT const char* FixedArray::name() { return "StringTableArray"; } + +template class PYIMATH_EXPORT StringArrayT; +template class PYIMATH_EXPORT StringArrayT; + +template FixedArray operator == (const StringArray& a0, const StringArray& a1); +template FixedArray operator == (const StringArray& a0, const std::string& v1); +template FixedArray operator == (const std::string& a0, const StringArray& v1); +template FixedArray operator != (const StringArray& a0, const StringArray& a1); +template FixedArray operator != (const StringArray& a0, const std::string& v1); +template FixedArray operator != (const std::string& a0, const StringArray& v1); + +template FixedArray operator == (const WstringArray& a0, const WstringArray& a1); +template FixedArray operator == (const WstringArray& a0, const std::wstring& v1); +template FixedArray operator == (const std::wstring& a0, const WstringArray& v1); +template FixedArray operator != (const WstringArray& a0, const WstringArray& a1); +template FixedArray operator != (const WstringArray& a0, const std::wstring& v1); +template FixedArray operator != (const std::wstring& a0, const WstringArray& v1); + +void register_StringArrays() +{ + typedef StringArrayT StringArray; + typedef StringArrayT WstringArray; + + class_ string_array_class = + class_("StringArray",no_init); + string_array_class + .def("__init__", make_constructor(StringArray::createDefaultArray)) + .def("__init__", make_constructor(StringArray::createUniformArray)) + .def("__getitem__", &StringArray::getslice_string, return_value_policy()) + .def("__getitem__", &StringArray::getitem_string) + .def("__getitem__", &StringArray::getslice_mask_string, return_value_policy()) + .def("__setitem__", &StringArray::setitem_string_scalar) + .def("__setitem__", &StringArray::setitem_string_scalar_mask) + .def("__setitem__", &StringArray::setitem_string_vector) + .def("__setitem__", &StringArray::setitem_string_vector_mask) + .def("__len__", &StringArray::len) + .def("writable", &StringArray::writable) + .def("makeReadOnly",&StringArray::makeReadOnly) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self == other()) + .def(other() == self) + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def(self != other()) + .def(other() != self) + ; + + class_ wstring_array_class = + class_("WstringArray",no_init); + wstring_array_class + .def("__init__", make_constructor(WstringArray::createDefaultArray)) + .def("__init__", make_constructor(WstringArray::createUniformArray)) + .def("__getitem__", &WstringArray::getslice_string, return_value_policy()) + .def("__getitem__", &WstringArray::getitem_string) + .def("__getitem__", &WstringArray::getslice_mask_string, return_value_policy()) + .def("__setitem__", &WstringArray::setitem_string_scalar) + .def("__setitem__", &WstringArray::setitem_string_scalar_mask) + .def("__setitem__", &WstringArray::setitem_string_vector) + .def("__setitem__", &WstringArray::setitem_string_vector_mask) + .def("__len__",&WstringArray::len) + .def(self == self) // NOSONAR - suppress SonarCloud bug report. + .def(self == other()) + .def(other() == self) + .def(self != self) // NOSONAR - suppress SonarCloud bug report. + .def(self != other()) + .def(other() != self) + ; +} + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathStringTable.cpp b/Sources/MetaPy/PyImath/PyImathStringTable.cpp new file mode 100644 index 00000000..344fa958 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathStringTable.cpp @@ -0,0 +1,99 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#include +#include "PyImathExport.h" +#include "PyImathStringTable.h" + +namespace PyImath { + +template +StringTableIndex +StringTableT::lookup(const T &s) const +{ + typedef typename Table::template nth_index<1>::type StringSet; + const StringSet &strings = _table.template get<1>(); + + typename StringSet::const_iterator it = strings.find(s); + if (it == strings.end()) { + throw std::domain_error ("String table access out of bounds"); + } + + return it->i; +} + +template +const T & +StringTableT::lookup(StringTableIndex index) const +{ + typedef typename Table::template nth_index<0>::type IndexSet; + const IndexSet &indices = _table.template get<0>(); + + typename IndexSet::const_iterator it = indices.find(index); + if (it == indices.end()) { + throw std::domain_error ("String table access out of bounds"); + } + + return it->s; +} + +template +StringTableIndex +StringTableT::intern(const T &s) +{ + typedef typename Table::template nth_index<1>::type StringSet; + const StringSet &strings = _table.template get<1>(); + + typename StringSet::const_iterator it = strings.find(s); + if (it == strings.end()) { + size_t next_index = _table.size(); + if (next_index > std::numeric_limits::max()) { + throw std::domain_error ("Unable to intern string - string table would exceed maximum size"); + } + StringTableIndex index = StringTableIndex(StringTableIndex::index_type(next_index)); + _table.insert(StringTableEntry(index,s)); + return index; + } + + return it->i; +} + +template +size_t +StringTableT::size() const +{ + return _table.size(); +} + +template +bool +StringTableT::hasString(const T &s) const +{ + typedef typename Table::template nth_index<1>::type StringSet; + const StringSet &strings = _table.template get<1>(); + return strings.find(s) != strings.end(); +} + +template +bool +StringTableT::hasStringIndex(const StringTableIndex &s) const +{ + typedef typename Table::template nth_index<0>::type IndexSet; + const IndexSet &indices = _table.template get<0>(); + return indices.find(s) != indices.end(); +} + +namespace { +template class StringTableDetailT; +template class StringTableDetailT; +} + +template class PYIMATH_EXPORT StringTableT; +template class PYIMATH_EXPORT StringTableT; + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathTask.cpp b/Sources/MetaPy/PyImath/PyImathTask.cpp new file mode 100644 index 00000000..bf3ad7ac --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathTask.cpp @@ -0,0 +1,58 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathTask.h" + +namespace PyImath { + +static WorkerPool *_currentPool = nullptr; + +// Its not worth dispatching parallel tasks unless the iteration count +// is high enough. The time to create and launch parallel tasks takes +// longer than to just do the iterations directly. This value of '200' +// is actually very conservative; in some tests, this number should +// probably be in the thousands. +static const size_t _minIterations = 200; + +WorkerPool * +WorkerPool::currentPool() +{ + return _currentPool; +} + +void +WorkerPool::setCurrentPool(WorkerPool *pool) +{ + _currentPool = pool; +} + +void +dispatchTask(Task &task,size_t length) +{ + if (length > _minIterations) + { + WorkerPool *curpool = WorkerPool::currentPool(); + if (curpool && !curpool->inWorkerThread()) + { + curpool->dispatch(task,length); + return; + } + } + task.execute(0,length,0); +} + + +size_t +workers() +{ + WorkerPool *curpool = WorkerPool::currentPool(); + if (curpool && !curpool->inWorkerThread()) + return curpool->workers(); + return 1; +} + +} diff --git a/Sources/MetaPy/PyImath/PyImathUtil.cpp b/Sources/MetaPy/PyImath/PyImathUtil.cpp new file mode 100644 index 00000000..0b8e67b3 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathUtil.cpp @@ -0,0 +1,70 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include "PyImathUtil.h" + +namespace PyImath { + +PyAcquireLock::PyAcquireLock() +{ + _gstate = PyGILState_Ensure(); +} + +PyAcquireLock::~PyAcquireLock() +{ + PyGILState_Release(_gstate); +} + +#ifdef PLATFORM_LINUX +// On Windows, this extern is not needed and produces a symbol mismatch at link time. +// We should verify that it's still needed on Linux for Python 2.6. +extern "C" PyThreadState *_PyThreadState_Current; +#endif + +static bool +pyHaveLock() +{ +#if PY_MAJOR_VERSION > 2 + return PyGILState_Check() != 0; +#else + // This is very much dependent on the current Python + // implementation of this functionality. If we switch versions of + // Python and the implementation changes, we'll have to change + // this code as well and introduce a #define for the Python + // version. + + if (!Py_IsInitialized()) + throw std::invalid_argument ("PyReleaseLock called without the interpreter initialized"); + + PyThreadState *myThreadState = PyGILState_GetThisThreadState(); + + // If the interpreter is initialized the gil is held if the + // current thread's thread state is the current thread state + return myThreadState != 0 && myThreadState == _PyThreadState_Current; +#endif +} + +PyReleaseLock::PyReleaseLock() +{ + // only call PyEval_SaveThread if we have the interpreter lock held, + // otherwise PyReleaseLock is a no-op. + if (pyHaveLock()) + _save = PyEval_SaveThread(); + else + _save = 0; +} + +PyReleaseLock::~PyReleaseLock() +{ + if (_save != 0) + PyEval_RestoreThread(_save); +} + +} // namespace PyImath diff --git a/Sources/MetaPy/PyImath/PyImathVec2fd.cpp b/Sources/MetaPy/PyImath/PyImathVec2fd.cpp new file mode 100644 index 00000000..d872adcf --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec2fd.cpp @@ -0,0 +1,46 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec2Impl.h" +#include "PyImathExport.h" + +namespace PyImath { +template <> const char *PyImath::V2fArray::name() { return "V2fArray"; } +template <> const char *PyImath::V2dArray::name() { return "V2dArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template<> const char *Vec2Name::value = "V2f"; +template<> const char *Vec2Name::value = "V2d"; + +// Specialization for float to full precision +template <> +std::string Vec2_repr(const Vec2 &v) +{ + return (boost::format("%s(%.9g, %.9g)") + % Vec2Name::value % v.x % v.y).str(); +} + +// Specialization for double to full precision +template <> +std::string Vec2_repr(const Vec2 &v) +{ + return (boost::format("%s(%.17g, %.17g)") + % Vec2Name::value % v.x % v.y).str(); +} + +template PYIMATH_EXPORT class_ > register_Vec2(); +template PYIMATH_EXPORT class_ > register_Vec2(); + +template PYIMATH_EXPORT class_ > > register_Vec2Array(); +template PYIMATH_EXPORT class_ > > register_Vec2Array(); + +template<> IMATH_NAMESPACE::Vec2 PYIMATH_EXPORT FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec2(0,0); } +template<> IMATH_NAMESPACE::Vec2 PYIMATH_EXPORT FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec2(0,0); } +} + diff --git a/Sources/MetaPy/PyImath/PyImathVec2si.cpp b/Sources/MetaPy/PyImath/PyImathVec2si.cpp new file mode 100644 index 00000000..796e7810 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec2si.cpp @@ -0,0 +1,34 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec2Impl.h" +#include "PyImathExport.h" + +namespace PyImath { +template <> const char *PyImath::V2sArray::name() { return "V2sArray"; } +template <> const char *PyImath::V2iArray::name() { return "V2iArray"; } +template <> const char *PyImath::V2i64Array::name() { return "V2i64Array"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template<> const char *Vec2Name::value = "V2s"; +template<> const char *Vec2Name::value = "V2i"; +template<> const char *Vec2Name::value = "V2i64"; + +template PYIMATH_EXPORT class_ > register_Vec2(); +template PYIMATH_EXPORT class_ > register_Vec2(); +template PYIMATH_EXPORT class_ > register_Vec2(); + +template PYIMATH_EXPORT class_ > > register_Vec2Array(); +template PYIMATH_EXPORT class_ > > register_Vec2Array(); +template PYIMATH_EXPORT class_ > > register_Vec2Array(); + +template<> IMATH_NAMESPACE::Vec2 PYIMATH_EXPORT FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec2(0,0); } +template<> IMATH_NAMESPACE::Vec2 PYIMATH_EXPORT FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec2(0,0); } +template<> IMATH_NAMESPACE::Vec2 PYIMATH_EXPORT FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec2(0,0); } +} diff --git a/Sources/MetaPy/PyImath/PyImathVec3fd.cpp b/Sources/MetaPy/PyImath/PyImathVec3fd.cpp new file mode 100644 index 00000000..33a6ada9 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec3fd.cpp @@ -0,0 +1,46 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec3Impl.h" +#include "PyImathVec3ArrayImpl.h" +#include "PyImathExport.h" + +namespace PyImath { +template <> const char *PyImath::V3fArray::name() { return "V3fArray"; } +template <> const char *PyImath::V3dArray::name() { return "V3dArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template<> const char *Vec3Name::value() { return "V3f"; } +template<> const char *Vec3Name::value() { return "V3d"; } + +// Specialization for float to full precision +template <> +std::string Vec3_repr(const Vec3 &v) +{ + return (boost::format("%s(%.9g, %.9g, %.9g)") + % Vec3Name::value() % v.x % v.y % v.z).str(); +} + +// Specialization for double to full precision +template <> +std::string Vec3_repr(const Vec3 &v) +{ + return (boost::format("%s(%.17g, %.17g, %.17g)") + % Vec3Name::value() % v.x % v.y % v.z).str(); +} + +template PYIMATH_EXPORT class_ > register_Vec3(); +template PYIMATH_EXPORT class_ > register_Vec3(); + +template PYIMATH_EXPORT class_ > > register_Vec3Array(); +template PYIMATH_EXPORT class_ > > register_Vec3Array(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec3 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec3(0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec3 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec3(0,0,0); } +} diff --git a/Sources/MetaPy/PyImath/PyImathVec3si.cpp b/Sources/MetaPy/PyImath/PyImathVec3si.cpp new file mode 100644 index 00000000..517b4ae2 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec3si.cpp @@ -0,0 +1,31 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec3Impl.h" +#include "PyImathExport.h" + +namespace PyImath { +template <> const char *PyImath::V3cArray::name() { return "V3cArray"; } +template <> const char *PyImath::V3sArray::name() { return "V3sArray"; } +template <> const char *PyImath::V3iArray::name() { return "V3iArray"; } +template <> const char *PyImath::V3i64Array::name() { return "V3i64Array"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template<> const char *Vec3Name::value() { return "V3c"; } +template<> const char *Vec3Name::value() { return "V3s"; } +template<> const char *Vec3Name::value() { return "V3i"; } +template<> const char *Vec3Name::value() { return "V3i64"; } + +template PYIMATH_EXPORT class_ > register_Vec3(); +template PYIMATH_EXPORT class_ > register_Vec3(); +template PYIMATH_EXPORT class_ > register_Vec3(); +template PYIMATH_EXPORT class_ > register_Vec3(); + +} + diff --git a/Sources/MetaPy/PyImath/PyImathVec3siArray.cpp b/Sources/MetaPy/PyImath/PyImathVec3siArray.cpp new file mode 100644 index 00000000..33ec25f2 --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec3siArray.cpp @@ -0,0 +1,24 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec3ArrayImpl.h" +#include "PyImathExport.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template PYIMATH_EXPORT class_ > > register_Vec3Array(); +template PYIMATH_EXPORT class_ > > register_Vec3Array(); +template PYIMATH_EXPORT class_ > > register_Vec3Array(); +template PYIMATH_EXPORT class_ > > register_Vec3Array(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec3 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec3(0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec3 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec3(0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec3 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec3(0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec3 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec3(0,0,0); } +} diff --git a/Sources/MetaPy/PyImath/PyImathVec4fd.cpp b/Sources/MetaPy/PyImath/PyImathVec4fd.cpp new file mode 100644 index 00000000..17c1cd5b --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec4fd.cpp @@ -0,0 +1,46 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec4Impl.h" +#include "PyImathVec4ArrayImpl.h" +#include "PyImathExport.h" + +namespace PyImath { +template <> const char *PyImath::V4fArray::name() { return "V4fArray"; } +template <> const char *PyImath::V4dArray::name() { return "V4dArray"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template<> const char *Vec4Name::value() { return "V4f"; } +template<> const char *Vec4Name::value() { return "V4d"; } + +// Specialization for float to full precision +template <> +std::string Vec4_repr(const Vec4 &v) +{ + return (boost::format("%s(%.9g, %.9g, %.9g, %.9g)") + % Vec4Name::value() % v.x % v.y % v.z % v.w).str(); +} + +// Specialization for double to full precision +template <> +std::string Vec4_repr(const Vec4 &v) +{ + return (boost::format("%s(%.17g, %.17g, %.17g, %.17g)") + % Vec4Name::value() % v.x % v.y % v.z % v.w).str(); +} + +template PYIMATH_EXPORT class_ > register_Vec4(); +template PYIMATH_EXPORT class_ > register_Vec4(); + +template PYIMATH_EXPORT class_ > > register_Vec4Array(); +template PYIMATH_EXPORT class_ > > register_Vec4Array(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec4 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec4(0,0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec4 PyImath::FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec4(0,0,0,0); } +} diff --git a/Sources/MetaPy/PyImath/PyImathVec4si.cpp b/Sources/MetaPy/PyImath/PyImathVec4si.cpp new file mode 100644 index 00000000..5cfbe6ac --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec4si.cpp @@ -0,0 +1,31 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec4Impl.h" +#include "PyImathExport.h" + +namespace PyImath { +template <> const char *PyImath::V4cArray::name() { return "V4cArray"; } +template <> const char *PyImath::V4sArray::name() { return "V4sArray"; } +template <> const char *PyImath::V4iArray::name() { return "V4iArray"; } +template <> const char *PyImath::V4i64Array::name() { return "V4i64Array"; } + +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template<> const char *Vec4Name::value() { return "V4c"; } +template<> const char *Vec4Name::value() { return "V4s"; } +template<> const char *Vec4Name::value() { return "V4i"; } +template<> const char *Vec4Name::value() { return "V4i64"; } + +template PYIMATH_EXPORT class_ > register_Vec4(); +template PYIMATH_EXPORT class_ > register_Vec4(); +template PYIMATH_EXPORT class_ > register_Vec4(); +template PYIMATH_EXPORT class_ > register_Vec4(); + +} + diff --git a/Sources/MetaPy/PyImath/PyImathVec4siArray.cpp b/Sources/MetaPy/PyImath/PyImathVec4siArray.cpp new file mode 100644 index 00000000..3b8692bf --- /dev/null +++ b/Sources/MetaPy/PyImath/PyImathVec4siArray.cpp @@ -0,0 +1,24 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include "PyImathVec4ArrayImpl.h" +#include "PyImathExport.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template PYIMATH_EXPORT class_ > > register_Vec4Array(); +template PYIMATH_EXPORT class_ > > register_Vec4Array(); +template PYIMATH_EXPORT class_ > > register_Vec4Array(); +template PYIMATH_EXPORT class_ > > register_Vec4Array(); + +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec4 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec4(0,0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec4 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec4(0,0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec4 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec4(0,0,0,0); } +template<> PYIMATH_EXPORT IMATH_NAMESPACE::Vec4 FixedArrayDefaultValue >::value() { return IMATH_NAMESPACE::Vec4(0,0,0,0); } +} diff --git a/Sources/MetaPy/PyImath/imathmodule.cpp b/Sources/MetaPy/PyImath/imathmodule.cpp new file mode 100644 index 00000000..21a60106 --- /dev/null +++ b/Sources/MetaPy/PyImath/imathmodule.cpp @@ -0,0 +1,652 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include +#include + +#include "PyImathFixedArray.h" +#include "PyImath.h" +#include "PyImathExport.h" +#include "PyImathBasicTypes.h" +#include "PyImathVec.h" +#include "PyImathMatrix.h" +#include "PyImathBox.h" +#include "PyImathFun.h" +#include "PyImathQuat.h" +#include "PyImathEuler.h" +#include "PyImathColor.h" +#include "PyImathFrustum.h" +#include "PyImathPlane.h" +#include "PyImathLine.h" +#include "PyImathRandom.h" +#include "PyImathShear.h" +#include "PyImathMathExc.h" +#include "PyImathAutovectorize.h" +#include "PyImathStringArrayRegister.h" +#include "PyImathBufferProtocol.h" + +using namespace boost::python; +using namespace PyImath; + +namespace { + +template +IMATH_NAMESPACE::Box > +computeBoundingBox(const FixedArray >& position) +{ + IMATH_NAMESPACE::Box > bounds; + int len = position.len(); + for (int i = 0; i < len; ++i) + bounds.extendBy(position[i]); + return bounds; +} + +IMATH_NAMESPACE::M44d +procrustes1 (PyObject* from_input, + PyObject* to_input, + PyObject* weights_input = 0, + bool doScale = false) +{ + // Verify the sequences: + if (!PySequence_Check (from_input)) + { + PyErr_SetString (PyExc_TypeError, "Expected a sequence type for 'from'"); + throw_error_already_set(); + } + + if (!PySequence_Check (to_input)) + { + PyErr_SetString (PyExc_TypeError, "Expected a sequence type for 'to'"); + throw_error_already_set(); + } + + bool useWeights = PySequence_Check (weights_input); + + // Now verify the lengths: + const Py_ssize_t n = PySequence_Length (from_input); + if (n != PySequence_Length (to_input) || + (useWeights && n != PySequence_Length (weights_input))) + { + PyErr_SetString (PyExc_TypeError, "'from, 'to', and 'weights' should all have the same lengths."); + throw_error_already_set(); + } + + std::vector from; from.reserve (n); + std::vector to; to.reserve (n); + std::vector weights; weights.reserve (n); + + for (Py_ssize_t i = 0; i < n; ++i) + { + PyObject* f = PySequence_GetItem (from_input, i); + PyObject* t = PySequence_GetItem (to_input, i); + PyObject* w = 0; + if (useWeights) + w = PySequence_GetItem (weights_input, i); + + if (f == 0 || t == 0 || (useWeights && w == 0)) + { + PyErr_SetString (PyExc_TypeError, + "Missing element in array"); + throw_error_already_set(); + } + + from.push_back (extract (f)); + to.push_back (extract (t)); + if (useWeights) + weights.push_back (extract (w)); + } + + if (useWeights) + return IMATH_NAMESPACE::procrustesRotationAndTranslation (&from[0], &to[0], &weights[0], n, doScale); + else + return IMATH_NAMESPACE::procrustesRotationAndTranslation (&from[0], &to[0], n, doScale); +} + +template +const T* +flatten(const PyImath::FixedArray& q, std::unique_ptr& handle) +{ + if (q.isMaskedReference()) + { + const size_t len = q.len(); + handle.reset(new T[len]); + for (size_t i = 0; i < len; ++i) + handle[i] = q[i]; + + return handle.get(); + } + + return &q[0]; +} + +template +IMATH_NAMESPACE::M44d +procrustesRotationAndTranslation(const FixedArray >& from, + const FixedArray >& to, + const FixedArray* weights = 0, + bool doScale = false) +{ + const size_t len = from.match_dimension(to); + if (len == 0) + return IMATH_NAMESPACE::M44d(); + + std::unique_ptr[]> fromHandle; + const Imath::Vec3* fromPtr = flatten(from, fromHandle); + + std::unique_ptr[]> toHandle; + const Imath::Vec3* toPtr = flatten(to, toHandle); + + std::unique_ptr weightsHandle; + const T* weightsPtr = nullptr; + if (weights) + { + weights->match_dimension(from); + flatten(*weights, weightsHandle); + } + + if (weightsPtr) + return IMATH_NAMESPACE::procrustesRotationAndTranslation(fromPtr, toPtr, weightsPtr, len, doScale); + else + return IMATH_NAMESPACE::procrustesRotationAndTranslation(fromPtr, toPtr, len, doScale); +} + +BOOST_PYTHON_FUNCTION_OVERLOADS(procrustesRotationAndTranslationf_overloads, procrustesRotationAndTranslation, 2, 4); +BOOST_PYTHON_FUNCTION_OVERLOADS(procrustesRotationAndTranslationd_overloads, procrustesRotationAndTranslation, 2, 4); + + +FixedArray2D rangeX(int sizeX, int sizeY) +{ + FixedArray2D f(sizeX, sizeY); + for (int j=0; j rangeY(int sizeX, int sizeY) +{ + FixedArray2D f(sizeX, sizeY); + for (int j=0; j iclass2D = IntArray2D::register_("IntArray2D","Fixed length array of ints"); + add_arithmetic_math_functions(iclass2D); + add_mod_math_functions(iclass2D); + add_comparison_functions(iclass2D); + add_ordered_comparison_functions(iclass2D); + add_explicit_construction_from_type(iclass2D); + add_explicit_construction_from_type(iclass2D); + + class_ imclass = IntMatrix::register_("IntMatrix","Fixed size matrix of ints"); + add_arithmetic_math_functions(imclass); + + class_ fclass2D = FloatArray2D::register_("FloatArray2D","Fixed length 2D array of floats"); + add_arithmetic_math_functions(fclass2D); + add_pow_math_functions(fclass2D); + add_comparison_functions(fclass2D); + add_ordered_comparison_functions(fclass2D); + add_explicit_construction_from_type(fclass2D); + add_explicit_construction_from_type(fclass2D); + + class_ fmclass = FloatMatrix::register_("FloatMatrix","Fixed size matrix of floats"); + add_arithmetic_math_functions(fmclass); + add_pow_math_functions(fmclass); + + class_ dclass2D = DoubleArray2D::register_("DoubleArray2D","Fixed length array of doubles"); + add_arithmetic_math_functions(dclass2D); + add_pow_math_functions(dclass2D); + add_comparison_functions(dclass2D); + add_ordered_comparison_functions(dclass2D); + add_explicit_construction_from_type(dclass2D); + add_explicit_construction_from_type(dclass2D); + + class_ dmclass = DoubleMatrix::register_("DoubleMatrix","Fixed size matrix of doubles"); + add_arithmetic_math_functions(dmclass); + add_pow_math_functions(dmclass); + + def("rangeX", &rangeX); + def("rangeY", &rangeY); + + def("IntArrayFromBuffer", &fixedArrayFromBuffer >, + return_value_policy(), + args("bufferObject"), + "Construct an IntArray from a buffer object"); + + def("FloatArrayFromBuffer", &fixedArrayFromBuffer >, + return_value_policy(), + args("bufferObject"), + "Construct a FloatArray from a buffer object"); + + def("DoubleArrayFromBuffer", &fixedArrayFromBuffer >, + return_value_policy(), + args("bufferObject"), + "Construct a DoubleArray from a buffer object"); + + // + // Vec2 + // + register_Vec2(); + register_Vec2(); + register_Vec2(); + register_Vec2(); + register_Vec2(); + class_ > v2s_class = register_Vec2Array(); + class_ > v2i_class = register_Vec2Array(); + class_ > v2i64_class = register_Vec2Array(); + class_ > v2f_class = register_Vec2Array(); + class_ > v2d_class = register_Vec2Array(); + + add_explicit_construction_from_type(v2i_class); + add_explicit_construction_from_type(v2i64_class); + add_explicit_construction_from_type(v2f_class); + add_explicit_construction_from_type(v2d_class); + + add_explicit_construction_from_type(v2s_class); + add_explicit_construction_from_type(v2i64_class); + add_explicit_construction_from_type(v2f_class); + add_explicit_construction_from_type(v2d_class); + + add_explicit_construction_from_type(v2s_class); + add_explicit_construction_from_type(v2i_class); + add_explicit_construction_from_type(v2f_class); + add_explicit_construction_from_type(v2d_class); + + add_explicit_construction_from_type(v2s_class); + add_explicit_construction_from_type(v2i_class); + add_explicit_construction_from_type(v2i64_class); + add_explicit_construction_from_type(v2d_class); + + add_explicit_construction_from_type(v2s_class); + add_explicit_construction_from_type(v2i_class); + add_explicit_construction_from_type(v2i64_class); + add_explicit_construction_from_type(v2f_class); + + add_buffer_protocol > (v2s_class); + add_buffer_protocol > (v2i_class); + add_buffer_protocol > (v2i64_class); + add_buffer_protocol > (v2f_class); + add_buffer_protocol > (v2d_class); + + def("V2iArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V2iArray from a buffer object"); + + def("V2fArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V2fArray from a buffer object"); + + def("V2dArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V2dArray from a buffer object"); + + // + // Vec3 + // + register_Vec3(); + register_Vec3(); + register_Vec3(); + register_Vec3(); + register_Vec3(); + register_Vec3(); + class_ > v3s_class = register_Vec3Array(); + class_ > v3i_class = register_Vec3Array(); + class_ > v3i64_class = register_Vec3Array(); + class_ > v3f_class = register_Vec3Array(); + class_ > v3d_class = register_Vec3Array(); + + add_explicit_construction_from_type(v3i_class); + add_explicit_construction_from_type(v3i64_class); + add_explicit_construction_from_type(v3f_class); + add_explicit_construction_from_type(v3d_class); + + add_explicit_construction_from_type(v3s_class); + add_explicit_construction_from_type(v3i64_class); + add_explicit_construction_from_type(v3f_class); + add_explicit_construction_from_type(v3d_class); + + add_explicit_construction_from_type(v3s_class); + add_explicit_construction_from_type(v3i_class); + add_explicit_construction_from_type(v3f_class); + add_explicit_construction_from_type(v3d_class); + + add_explicit_construction_from_type(v3s_class); + add_explicit_construction_from_type(v3i_class); + add_explicit_construction_from_type(v3i64_class); + add_explicit_construction_from_type(v3d_class); + + add_explicit_construction_from_type(v3s_class); + add_explicit_construction_from_type(v3i_class); + add_explicit_construction_from_type(v3i64_class); + add_explicit_construction_from_type(v3f_class); + + add_buffer_protocol > (v3s_class); + add_buffer_protocol > (v3i_class); + add_buffer_protocol > (v3i64_class); + add_buffer_protocol > (v3f_class); + add_buffer_protocol > (v3d_class); + + def("V3iArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V3iArray from a buffer object"); + + def("V3fArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V3fArray from a buffer object"); + + def("V3dArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V3dArray from a buffer object"); + + // + // Vec4 + // + register_Vec4(); + register_Vec4(); + register_Vec4(); + register_Vec4(); + register_Vec4(); + register_Vec4(); + class_ > v4s_class = register_Vec4Array(); + class_ > v4i_class = register_Vec4Array(); + class_ > v4i64_class = register_Vec4Array(); + class_ > v4f_class = register_Vec4Array(); + class_ > v4d_class = register_Vec4Array(); + + add_explicit_construction_from_type(v4i_class); + add_explicit_construction_from_type(v4i64_class); + add_explicit_construction_from_type(v4f_class); + add_explicit_construction_from_type(v4d_class); + + add_explicit_construction_from_type(v4s_class); + add_explicit_construction_from_type(v4i64_class); + add_explicit_construction_from_type(v4f_class); + add_explicit_construction_from_type(v4d_class); + + add_explicit_construction_from_type(v4s_class); + add_explicit_construction_from_type(v4i_class); + add_explicit_construction_from_type(v4f_class); + add_explicit_construction_from_type(v4d_class); + + add_explicit_construction_from_type(v4s_class); + add_explicit_construction_from_type(v4i_class); + add_explicit_construction_from_type(v4i64_class); + add_explicit_construction_from_type(v4d_class); + + add_explicit_construction_from_type(v4s_class); + add_explicit_construction_from_type(v4i_class); + add_explicit_construction_from_type(v4i64_class); + add_explicit_construction_from_type(v4f_class); + + def("V4iArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V4iArray from a buffer object"); + + def("V4fArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V4fArray from a buffer object"); + + def("V4dArrayFromBuffer", &fixedArrayFromBuffer > >, + return_value_policy(), + args("bufferObject"), + "Construct a V4dArray from a buffer object"); + + // + // Quat + // + register_Quat(); + register_Quat(); + class_ > quatf_class = register_QuatArray(); + class_ > quatd_class = register_QuatArray(); + add_explicit_construction_from_type(quatf_class); + add_explicit_construction_from_type(quatd_class); + + // + // Euler + // + register_Euler(); + register_Euler(); + class_ > eulerf_class = register_EulerArray(); + class_ > eulerd_class = register_EulerArray(); + add_explicit_construction_from_type(eulerf_class); + add_explicit_construction_from_type(eulerd_class); + + // + // Box2 + // + register_Box2(); + register_Box2(); + register_Box2(); + register_Box2(); + register_Box2(); + class_ > b2s_class = register_BoxArray(); + class_ > b2i_class = register_BoxArray(); + class_ > b2i64_class = register_BoxArray(); + class_ > b2f_class = register_BoxArray(); + class_ > b2d_class = register_BoxArray(); + + // + // Box3 + // + register_Box3(); + register_Box3(); + register_Box3(); + register_Box3(); + register_Box3(); + class_ > b3s_class = register_BoxArray(); + class_ > b3i_class = register_BoxArray(); + class_ > b3i64_class = register_BoxArray(); + class_ > b3f_class = register_BoxArray(); + class_ > b3d_class = register_BoxArray(); + + // + // Matrix22/33/44 + // + register_Matrix22(); + register_Matrix22(); + register_Matrix33(); + register_Matrix33(); + register_Matrix44(); + register_Matrix44(); + + // + // M22/M33/44Array + // + class_ > m44d_class = register_M44Array(); + class_ > m44f_class = register_M44Array(); + add_explicit_construction_from_type< IMATH_NAMESPACE::Matrix44 >(m44d_class); + add_explicit_construction_from_type< IMATH_NAMESPACE::Matrix44 > (m44f_class); + + class_ > m33d_class = register_M33Array(); + class_ > m33f_class = register_M33Array(); + add_explicit_construction_from_type< IMATH_NAMESPACE::Matrix33 >(m33d_class); + add_explicit_construction_from_type< IMATH_NAMESPACE::Matrix33 > (m33f_class); + + class_ > m22d_class = register_M22Array(); + class_ > m22f_class = register_M22Array(); + add_explicit_construction_from_type< IMATH_NAMESPACE::Matrix22 >(m22d_class); + add_explicit_construction_from_type< IMATH_NAMESPACE::Matrix22 > (m22f_class); + + // + // String Array + // + register_StringArrays(); + + // + // Color3/4 + // + register_Color3(); + register_Color3(); + register_Color4(); + register_Color4(); + + // + // C3/4Array + // + class_ > c3f_class = register_Color3Array(); + class_ > c3c_class = register_Color3Array(); + add_explicit_construction_from_type(c3f_class); + add_explicit_construction_from_type(c3f_class); + + class_ > c4f_class = register_Color4Array(); + class_ > c4c_class = register_Color4Array(); + + // + // Color4Array + // + register_Color4Array2D(); + register_Color4Array2D(); + + // + // Frustum + // + register_Frustum(); + register_Frustum(); + register_FrustumTest(); + register_FrustumTest(); + + // + // Plane + // + register_Plane(); + register_Plane(); + + // + // Line + // + register_Line(); + register_Line(); + + // + // Shear + // + register_Shear(); + register_Shear(); + + // + // Utility Functions + // + register_functions(); + + + def("procrustesRotationAndTranslation", procrustes1, + args("fromPts", "toPts", "weights", "doScale"), // Can't use 'from' and 'to' because 'from' is a reserved keywork in Python + "Computes the orthogonal transform (consisting only of rotation and translation) mapping the " + "'fromPts' points as close as possible to the 'toPts' points in the least squares norm. The 'fromPts' and " + "'toPts' lists must be the same length or the function will error out. If weights " + "are provided, then the points are weighted (that is, some points are considered more important " + "than others while computing the transform). If the 'doScale' parameter is True, then " + "the resulting matrix is also allowed to have a uniform scale."); + + def("procrustesRotationAndTranslation", &procrustesRotationAndTranslation, procrustesRotationAndTranslationf_overloads( + args("fromPts", "toPts", "weights", "doScale"), + "Computes the orthogonal transform (consisting only of rotation and translation) mapping the " + "'fromPts' points as close as possible to the 'toPts' points in the least squares norm. The 'fromPts' and " + "'toPts' lists must be the same length or the function will error out. If weights " + "are provided, then the points are weighted (that is, some points are considered more important " + "than others while computing the transform). If the 'doScale' parameter is True, then " + "the resulting matrix is also allowed to have a uniform scale.")); + + def("procrustesRotationAndTranslation", &procrustesRotationAndTranslation, procrustesRotationAndTranslationd_overloads( + args("fromPts", "toPts", "weights", "doScale"), + "Computes the orthogonal transform (consisting only of rotation and translation) mapping the " + "'fromPts' points as close as possible to the 'toPts' points in the least squares norm. The 'fromPts' and " + "'toPts' lists must be the same length or the function will error out. If weights " + "are provided, then the points are weighted (that is, some points are considered more important " + "than others while computing the transform). If the 'doScale' parameter is True, then " + "the resulting matrix is also allowed to have a uniform scale.")); + + // + // Rand + // + register_Rand32(); + register_Rand48(); + + // + // Initialize constants + // + + scope().attr("EULER_XYZ") = IMATH_NAMESPACE::Eulerf::XYZ; + scope().attr("EULER_XZY") = IMATH_NAMESPACE::Eulerf::XZY; + scope().attr("EULER_YZX") = IMATH_NAMESPACE::Eulerf::YZX; + scope().attr("EULER_YXZ") = IMATH_NAMESPACE::Eulerf::YXZ; + scope().attr("EULER_ZXY") = IMATH_NAMESPACE::Eulerf::ZXY; + scope().attr("EULER_ZYX") = IMATH_NAMESPACE::Eulerf::ZYX; + scope().attr("EULER_XZX") = IMATH_NAMESPACE::Eulerf::XZX; + scope().attr("EULER_XYX") = IMATH_NAMESPACE::Eulerf::XYX; + scope().attr("EULER_YXY") = IMATH_NAMESPACE::Eulerf::YXY; + scope().attr("EULER_YZY") = IMATH_NAMESPACE::Eulerf::YZY; + scope().attr("EULER_ZYZ") = IMATH_NAMESPACE::Eulerf::ZYZ; + scope().attr("EULER_ZXZ") = IMATH_NAMESPACE::Eulerf::ZXZ; + scope().attr("EULER_XYZr") = IMATH_NAMESPACE::Eulerf::XYZr; + scope().attr("EULER_XZYr") = IMATH_NAMESPACE::Eulerf::XZYr; + scope().attr("EULER_YZXr") = IMATH_NAMESPACE::Eulerf::YZXr; + scope().attr("EULER_YXZr") = IMATH_NAMESPACE::Eulerf::YXZr; + scope().attr("EULER_ZXYr") = IMATH_NAMESPACE::Eulerf::ZXYr; + scope().attr("EULER_ZYXr") = IMATH_NAMESPACE::Eulerf::ZYXr; + scope().attr("EULER_XZXr") = IMATH_NAMESPACE::Eulerf::XZXr; + scope().attr("EULER_XYXr") = IMATH_NAMESPACE::Eulerf::XYXr; + scope().attr("EULER_YXYr") = IMATH_NAMESPACE::Eulerf::YXYr; + scope().attr("EULER_YZYr") = IMATH_NAMESPACE::Eulerf::YZYr; + scope().attr("EULER_ZYZr") = IMATH_NAMESPACE::Eulerf::ZYZr; + scope().attr("EULER_ZXZr") = IMATH_NAMESPACE::Eulerf::ZXZr; + scope().attr("EULER_X_AXIS") = IMATH_NAMESPACE::Eulerf::X; + scope().attr("EULER_Y_AXIS") = IMATH_NAMESPACE::Eulerf::Y; + scope().attr("EULER_Z_AXIS") = IMATH_NAMESPACE::Eulerf::Z; + scope().attr("EULER_IJKLayout") = IMATH_NAMESPACE::Eulerf::IJKLayout; + scope().attr("EULER_XYZLayout") = IMATH_NAMESPACE::Eulerf::XYZLayout; + + scope().attr("INT_MIN") = std::numeric_limits::min(); + scope().attr("INT_MAX") = std::numeric_limits::max(); + scope().attr("INT_LOWEST") = std::numeric_limits::lowest(); + scope().attr("INT_EPS") = std::numeric_limits::epsilon(); + + scope().attr("FLT_MIN") = std::numeric_limits::min(); + scope().attr("FLT_MAX") = std::numeric_limits::max(); + scope().attr("FLT_LOWEST") = std::numeric_limits::lowest(); + scope().attr("FLT_EPS") = std::numeric_limits::epsilon(); + + scope().attr("DBL_MIN") = std::numeric_limits::min(); + scope().attr("DBL_MAX") = std::numeric_limits::max(); + scope().attr("DBL_LOWEST") = std::numeric_limits::lowest(); + scope().attr("DBL_EPS") = std::numeric_limits::epsilon(); + + def("computeBoundingBox", &computeBoundingBox, + "computeBoundingBox(position) -- computes the bounding box from the position array."); + + def("computeBoundingBox", &computeBoundingBox, + "computeBoundingBox(position) -- computes the bounding box from the position array."); +} + diff --git a/Sources/MetaPy/PyImath/varraySemantics.txt b/Sources/MetaPy/PyImath/varraySemantics.txt new file mode 100644 index 00000000..7d59886d --- /dev/null +++ b/Sources/MetaPy/PyImath/varraySemantics.txt @@ -0,0 +1,145 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +----------- +Terminology +----------- + +Items - Similar to 'list items'. Can think of as the 'vertical' + dimension similar to the other FixedArray dimension. Each item + contains an array of varying length. + +Elements - The 'variable-length' array members of each item. In this + case, each 'element' is an int. For a FloatVArray, the + elements would be floats. + + +------------ +Construction +------------ + +v = IntVArray() + : Do not support; FixedArrays generally don't have empty construction. + +v = IntVArray(10) + : Creates 10 items, each item has zero elements (i.e. empty). + +v = IntVArray(int initialValue , 10, 5) + : Creates 10 items, each item has 5 elements that are initialized + to the initialValue. + +v = IntVArray(IntArray initialValue, 10) + : Creates 10 items, each initialized with a copy of the elements of + the provided initialValue IntArray. + +v = IntVArray([1, 2, 3], 10) + : Creates 10 items, each initialized with the elements of the provided + list. This would be similar to the previous constructor, but with + a different initialValue type. We probably don't want to support + this right away, but possibly at some point in the future. + +v = IntVArray(int intialValue, IntArray() initialLengths) + : Creates initialLengths.len() items each with a number of elements + matching the values provided by the initialLengths array. The + initial value for all elements is 'initalValue'. + +v = IntVArray(IntVArray clone) + : Created as a copy of 'clone'. + + +Usage (Accessing) +----------------- + +int = v.len() (number of items) + +IntArray = v[4] (reference of v's data) +IntArray = v[-1] (same as previous) +IntVArray = v[3:9] (reference of v's data; stride provides indexing) +IntVArray = v[:] (same as previous, stride probably not needed) + +IntVArray = v[IntArray mask] + : Returns a reference of v's data; uses mask variable internally +IntVArray = v[BoolArray mask] + : Not currently supported, but would provide the same as previous. + This 'BoolArray' mask should be implemented sometime soon (for + this and all other FixedArrays). + +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +Question: Support v[5][2] semantics. This might work out-of-the-box since + v[5] would return an IntArray, which supports [] also. In this + case it would be fine and a single 'int' would be returned. + But for v[5][1:3], we would return another IntArray + instead of a regular int, so the levels of indirection + for original internal IntVArray data might get too complicated. Do + we support this semantic or not. The problem is that if we don't + want to support it, we'll have to specifically disable it somehow + since we'll get it by default (v[5] returns IntArray, which would + automatically support [1:3]). + +Question: To avoid the previous issue, we'll probably want a special element + accessor method (probably called 'element'). That'll have to have + the ability to take in a 'slice' as an argument. Would that all + work? + +In the continuing text, we'll assume we support an 'element' accessor +method and not the [5][6] double-box notation. +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + +int = v[4].element(1) (returns a single integer) +int = v[4].element(-1) (same as previous) +IntArray = v[4].element(2:7) (return IntArray referencing original data) +IntArray = v[4].element(:) (same as previous; no 'stride' needed) +int = v[4].len() (would not work; int doesn't support 'len') +int = v[4].element(:).len() (the number of elements for item 4 ???) + +IntArray = v[3:9].element(1) (All of the element-1 members for items 3 - 9) +IntArray = v.element(1) (All of the element-1 members for each item) +IntArray = v[3:9].element(-1) (same as previous, but returns last elements) +IntVArray = v[3:9].element(2:7) (subset of the original ?????) +IntVArray = v[3:9].element(:) (subset of the original v; only items 3 - 9) +int 6 = v[3:9].len() + +IntArray = v[:].element(1) (List of all element-1s from all items ???) +IntArray = v[:].element(-1) (List of all last elements ???) +IntVArray = v[:].element(2:7) (subset of the original ????) +IntVArray = v[:].element(:) (basically a reference of the original) +int = v[:].len() (number of items in v) + +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +Question: We want to support easy indexing right into a 'X' V3fArray + or something similar. Lets say we want to add a V3f to all + coordinates of the entire system. We'd want to be able to + write expressions like: + + x[ v[:].element(:) ] += imath.V3f(1,2,3) + x[ v[:].element(0) ] += imath.V3f(1,2,3) (the 'root' point) + x[ v[3].element(:) ] += ... + x[ v[1:10].element(:) ] += ... + x[ v[1:10].element(1:4) ] += ... + + But in many cases, the expression returns another IntVArray. + Should/can we provide indexing into V3fArray from an IntVArray? + Do we currently support indexing into a V3fArray from IntArray? + What other ways can we make this convenient. +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +Question: What about cases where not all items support the same number + of elements. What happens in these cases: + + IntArray = v[:].element(7) + + for cases where some or all of the items don't have an element-7. + Would the IntArray be a subset of v's items (i.e. if only 3 items + could return an element-7, the IntArray would be 3 long). Or + would the IntArray contain invalid/None/undefined integers within + it. +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +Question: What other accessor/modification methods do we want to support. + append, remove, pop, push, etc? +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + diff --git a/Sources/MetaPy/include/python/PyImath/PyImath.h b/Sources/MetaPy/include/python/PyImath/PyImath.h new file mode 100644 index 00000000..38ba488a --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImath.h @@ -0,0 +1,49 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImath_h_ +#define _PyImath_h_ + +#include +#include +#include +#include "PyImathFixedArray.h" +#include "PyImathFixedMatrix.h" +#include "PyImathFixedArray2D.h" +#include "PyImathFixedVArray.h" + +namespace PyImath { + +typedef FixedArray BoolArray; +typedef FixedArray SignedCharArray; +typedef FixedArray UnsignedCharArray; +typedef FixedArray ShortArray; +typedef FixedArray UnsignedShortArray; +typedef FixedArray IntArray; +typedef FixedArray UnsignedIntArray; +typedef FixedArray FloatArray; +typedef FixedArray DoubleArray; + +typedef FixedArray QuatfArray; +typedef FixedArray QuatdArray; + +typedef FixedMatrix IntMatrix; +typedef FixedMatrix FloatMatrix; +typedef FixedMatrix DoubleMatrix; + +typedef FixedArray2D FloatArray2D; +typedef FixedArray2D IntArray2D; +typedef FixedArray2D DoubleArray2D; + +typedef FixedVArray VIntArray; +typedef FixedVArray VFloatArray; +typedef FixedVArray > VV2iArray; +typedef FixedVArray > VV2fArray; + +} + +#endif diff --git a/Sources/MetaPy/include/python/PyImath/PyImathAPI.h b/Sources/MetaPy/include/python/PyImath/PyImathAPI.h new file mode 100644 index 00000000..9da24ec1 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathAPI.h @@ -0,0 +1,60 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathAPI_h_ +#define _PyImathAPI_h_ + +#include + +#if PY_MAJOR_VERSION >= 3 + + // Big changes in Python3 with regard to PyClass. Most of these functions + // are gone so the equivalent functionality is done this way... + #define PyClass_Check(object) \ + PyObject_IsInstance (object, reinterpret_cast (&PyType_Type)) + + // Py_FindMethod is gone and so you must search for functions by searching + // through an object's attributes. + #define Py_FindMethod(methods, self, name) \ + PyObject_GenericGetAttr(self, PyBytes_FromString(name)) + + // One of the biggest differences between 2&3 is use support for Unicode. + // Working with strings at the C API level one has be careful that the + // returned object will not be Unicode and thus need to be decoded before + // being interpreted. These macros will return the PyBytes type of PyObject + // pointer that replaces PyString. + #define PyString_Check(str) PyBytes_Check(str) + #define PyString_FromString(str) PyBytes_FromString(str) + #define PyString_AsString(obj) PyBytes_AsString(obj) + #define PyString_AsStringAndSize(obj, str, len) PyBytes_AsStringAndSize(obj, str, len) + + // Python3 interprets all integers as long types and has deprecated PyInt. + #define PyInt_Check(x) PyLong_Check(x) + #define PyInt_AsLong(x) PyLong_AsLong(x) + #define PyInt_AS_LONG(x) PyLong_AsLong(x) + #define PyInt_AsSsize_t(x) PyLong_AsSsize_t(x) + #define PyInt_FromLong(x) PyLong_FromLong(x) + + // These flags are not present in Python3 and must be replaced with the + // default set of flags so that OR'ing them together doesn't alter the + // flags. + #define Py_TPFLAGS_CHECKTYPES Py_TPFLAGS_DEFAULT + #define Py_TPFLAGS_HAVE_RICHCOMPARE Py_TPFLAGS_DEFAULT + + // The __repr__ for a TypeObject will be encoded and needs to be + // processed as a PyBytes object before it can be return as a string. + #define PYUTIL_OBJECT_REPR(obj) PyObject_Str (PyObject_Repr (obj)) + +#else + + // Python2 code will need to access PyObject_Repr() via this macro so + // that both 2&3 can compile without modification. + #define PYUTIL_OBJECT_REPR(obj) PyObject_Repr (obj) + +#endif + +#endif // _PyImathAPI_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathAutovectorize.h b/Sources/MetaPy/include/python/PyImath/PyImathAutovectorize.h new file mode 100644 index 00000000..4418cb52 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathAutovectorize.h @@ -0,0 +1,2865 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + + +#ifndef _PyImathAutovectorize_h_ +#define _PyImathAutovectorize_h_ + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "PyImathFixedArray.h" +#include "PyImathTask.h" +#include "PyImathUtil.h" + +namespace PyImath { + +struct op_with_precomputation {}; + +namespace detail { + +using boost::is_base_of; +using boost::is_same; +using boost::is_const; +using boost::remove_const; +using boost::remove_reference; +using boost::function_traits; + +using boost::mpl::at; +using boost::mpl::at_c; +using boost::mpl::push_front; +using boost::mpl::vector; +using boost::mpl::push_back; +using boost::mpl::transform; +using boost::mpl::fold; +using boost::mpl::_; +using boost::mpl::_1; +using boost::mpl::_2; +using boost::mpl::long_; +using boost::mpl::false_; +using boost::mpl::true_; +using boost::mpl::not_; +using boost::mpl::or_; +using boost::mpl::and_; +using boost::mpl::size; +using boost::mpl::remove_if; +using boost::mpl::if_; +using boost::mpl::for_each; + + +struct null_precomputation { + static void precompute(size_t len) { return; } +}; + +template struct op_precompute +{ + static void + apply(size_t len) + { + if_, + T, + null_precomputation>::type::precompute(len); + } +}; + +template +struct possible_vectorizations +{ + typedef typename fold< + typename possible_vectorizations::type, + vector<>, + push_back >,push_back<_2,true_> > + >::type type; +}; + +template <> +struct possible_vectorizations<0> +{ + typedef vector > type; +}; + +template +struct disallow_vectorization +{ + template + struct apply + { + // Valid = !Vectorize || Vectorizable + typedef typename transform >::type DontVectorize; + typedef typename transform >::type Valid; + typedef typename not_ > >::type type; + }; +}; + +template +struct allowable_vectorizations +{ + typedef typename possible_vectorizations::value>::type possible; + typedef typename remove_if >::type type; +}; + +template +bool any_masked(const T &value) +{ + return false; +}; + +template +bool any_masked(const PyImath::FixedArray &value) +{ + return value.isMaskedReference(); +}; + +template +bool any_masked(const T1 &a, const T2 &b) +{ + return any_masked(a) || any_masked(b); +} + +template +bool any_masked(const T1 &a, const T2 &b, const T3 &c) +{ + return any_masked(a,b) || any_masked(c); +} + +template +bool any_masked(const T1 &a, const T2 &b, const T3 &c, const T4 &d) +{ + return any_masked(a,b) || any_masked(c,d); +} + +//----------------------------------------------------------------------------------------- + +// +// measure_argument returns a pair indicating the integral length of the argument +// (scalar arguments have implicit length 1), and a bool indicating whether the argument +// is a vectorized argument. +// +template +struct measure_argument +{ + static inline std::pair apply(T arg) { return std::make_pair(1,false); } +}; + +template +struct measure_argument > +{ + static inline std::pair apply(const PyImath::FixedArray &arg) { return std::make_pair(arg.len(),true); } +}; + +// +// match_lengths returns the compatible length given two argument lengths +// +static inline std::pair +match_lengths(const std::pair &len1, const std::pair &len2) +{ + // scalar arguemnts are always compatible with other arguments + if (len1.second == false) return len2; + if (len2.second == false) return len1; + + // now both arguments are vectorized, check for dimension match + if (len1.first != len2.first) + throw std::invalid_argument("Array dimensions passed into function do not match"); + + return len1; +} + + +// +// measure_arguments finds the length that a return value from a given +// set of arguments should have, throwing an exception if the lengths +// are incompatible. If all arguments are scalar, length 1 is returned. +// +template +size_t +measure_arguments(const arg1_type &arg1) +{ + std::pair len = measure_argument::apply(arg1); + return len.first; +} + +template +size_t +measure_arguments(const arg1_type &arg1, const arg2_type &arg2) +{ + std::pair len = measure_argument::apply(arg1); + len = match_lengths(len,measure_argument::apply(arg2)); + return len.first; +} + +template +size_t +measure_arguments(const arg1_type &arg1, const arg2_type &arg2, const arg3_type &arg3) +{ + std::pair len = measure_argument::apply(arg1); + len = match_lengths(len,measure_argument::apply(arg2)); + len = match_lengths(len,measure_argument::apply(arg3)); + return len.first; +} + +template +size_t +measure_arguments(const arg1_type &arg1, const arg2_type &arg2, const arg3_type &arg3, const arg4_type &arg4) +{ + std::pair len = measure_argument::apply(arg1); + len = match_lengths(len,measure_argument::apply(arg2)); + len = match_lengths(len,measure_argument::apply(arg3)); + len = match_lengths(len,measure_argument::apply(arg4)); + return len.first; +} + +template +size_t +measure_arguments(const arg1_type &arg1, const arg2_type &arg2, const arg3_type &arg3, const arg4_type &arg4, const arg5_type &arg5) +{ + std::pair len = measure_argument::apply(arg1); + len = match_lengths(len,measure_argument::apply(arg2)); + len = match_lengths(len,measure_argument::apply(arg3)); + len = match_lengths(len,measure_argument::apply(arg4)); + len = match_lengths(len,measure_argument::apply(arg5)); + return len.first; +} + +//----------------------------------------------------------------------------------------- + +template +struct create_uninitalized_return_value +{ + static T apply(size_t length) + { + return T(); + } +}; + +template +struct create_uninitalized_return_value > +{ + static PyImath::FixedArray apply(size_t length) + { + return PyImath::FixedArray(Py_ssize_t(length),PyImath::UNINITIALIZED); + } +}; + +template +struct vectorized_result_type +{ + typedef typename if_,T>::type type; +}; + +template +struct SimpleNonArrayWrapper +{ + struct ReadOnlyDirectAccess + { + ReadOnlyDirectAccess (const T& arg) + : _arg (arg) {} + ReadOnlyDirectAccess (const ReadOnlyDirectAccess& other) + : _arg (other._arg) {} + + const T& operator[] (size_t) const { return _arg; } + + private: + const T& _arg; + }; + + struct WritableDirectAccess : public ReadOnlyDirectAccess + { + WritableDirectAccess (T& arg) + : ReadOnlyDirectAccess (arg), _arg (arg) {} + WritableDirectAccess (const WritableDirectAccess& other) + : ReadOnlyDirectAccess (other), _arg (other._arg) {} + + T& operator[] (size_t) { return _arg; } + + private: + T& _arg; + }; + + typedef ReadOnlyDirectAccess ReadOnlyMaskedAccess; + typedef WritableDirectAccess WritableMaskedAccess; +}; + + +template +struct access_type +{ + typedef typename remove_reference::type prim_type; + typedef typename remove_const::type base_type; + typedef typename if_, + const PyImath::FixedArray &, + PyImath::FixedArray &>::type reference_type; + typedef typename remove_reference::type class_type; + + typedef typename if_, + typename class_type::ReadOnlyMaskedAccess, + typename class_type::WritableMaskedAccess>::type masked; + typedef typename if_, + typename class_type::ReadOnlyDirectAccess, + typename class_type::WritableDirectAccess>::type direct; +}; + +template +struct argument_access_type +{ + typedef typename remove_const::type>::type base_type; + typedef typename if_ &,T>::type type; + + typedef typename if_::type, + SimpleNonArrayWrapper >::type _class_type; + + typedef typename _class_type::ReadOnlyMaskedAccess masked; + typedef typename _class_type::ReadOnlyDirectAccess direct; +}; + +template +struct result_access_type +{ + typedef typename remove_const::type>::type base_type; + typedef typename if_,T>::type type; + + typedef typename if_ >::type _class_type; + + typedef typename _class_type::WritableMaskedAccess masked; + typedef typename _class_type::WritableDirectAccess direct; +}; + +template +AccessType getArrayAccess (T& value) + { return AccessType (value); } + +template +AccessType getArrayAccess (const PyImath::FixedArray& array) + { return AccessType (array); } + +template +AccessType getArrayAccess (PyImath::FixedArray& array) + { return AccessType (array); } + +// + +template +struct VectorizedOperation1 : public Task +{ + result_access_type retAccess; + access_type access; + + VectorizedOperation1 (result_access_type r, access_type a1) + : retAccess (r), access (a1) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + retAccess[i] = Op::apply (access[i]); + } + } +}; + +template +struct VectorizedOperation2 : public Task +{ + result_access_type retAccess; + access_type access; + arg1_access_type argAccess; + + VectorizedOperation2(result_access_type r, access_type a1, arg1_access_type a2) + : retAccess (r), access (a1), argAccess (a2) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + retAccess[i] = Op::apply (access[i], argAccess[i]); + } + } +}; + +template +struct VectorizedOperation3 : public Task +{ + result_access_type retAccess; + access_type access; + arg1_access_type arg1Access; + arg2_access_type arg2Access; + + VectorizedOperation3(result_access_type r, access_type a, + arg1_access_type a1, arg2_access_type a2) + : retAccess(r), access(a), arg1Access(a1), arg2Access(a2) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + retAccess[i] = Op::apply(access[i], arg1Access[i], arg2Access[i]); + } + } +}; + +template +struct VectorizedOperation4 : public Task +{ + result_access_type retAccess; + access_type access; + arg1_access_type arg1Access; + arg2_access_type arg2Access; + arg3_access_type arg3Access; + + VectorizedOperation4(result_access_type r, access_type a, + arg1_access_type a1, arg2_access_type a2, arg3_access_type a3) + : retAccess(r), access(a), arg1Access(a1), arg2Access(a2), arg3Access(a3) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + retAccess[i] = Op::apply(access[i], arg1Access[i], arg2Access[i], arg3Access[i]); + } + } +}; + +template +struct VectorizedOperation5 : public Task +{ + result_access_type retAccess; + access_type access; + arg1_access_type arg1Access; + arg2_access_type arg2Access; + arg3_access_type arg3Access; + arg4_access_type arg4Access; + + VectorizedOperation5(result_access_type r, access_type a, + arg1_access_type a1, arg2_access_type a2, arg3_access_type a3, arg4_access_type a4) + : retAccess(r), access(a), arg1Access(a1), arg2Access(a2), arg3Access(a3), arg4Access(a4) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + retAccess[i] = Op::apply(access[i], arg1Access[i], arg2Access[i], arg3Access[i], arg4Access[i]); + } + } +}; + +template +struct VectorizedFunction1 { + BOOST_STATIC_ASSERT((size::value == function_traits::arity)); + + typedef function_traits traits; + typedef typename fold >::type any_vectorized; + + typedef typename result_access_type::type result_type; + typedef typename result_access_type::direct result_access_type; + // Result array is created here 'from scratch', so is always 'direct' access. + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + static result_type + apply(arg1_type arg1) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(arg1); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type resultAccess = getArrayAccess (retval); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedOperation1 + vop (resultAccess, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedOperation1 + vop (resultAccess, argAccess); + dispatchTask(vop,len); + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<1> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+") - "; + } +}; + +template +struct VectorizedFunction2 { + BOOST_STATIC_ASSERT((size::value == function_traits::arity)); + + typedef function_traits traits; + typedef typename fold >::type any_vectorized; + + typedef typename result_access_type::type result_type; + typedef typename result_access_type::direct result_access_type; + // Result array is created here 'from scratch', so is always 'direct' access. + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + typedef typename argument_access_type >::type>::type arg2_type; + typedef typename argument_access_type >::type>::direct arg2_direct_access_type; + typedef typename argument_access_type >::type>::masked arg2_masked_access_type; + + static result_type + apply(arg1_type arg1, arg2_type arg2) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(arg1,arg2); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type resultAccess = getArrayAccess (retval); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation2 + vop (resultAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation2 + vop (resultAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation2 + vop (resultAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation2 + vop (resultAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<2> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+","+args.elements[1].name+") - "; + } +}; + +template +struct VectorizedFunction3 { + BOOST_STATIC_ASSERT((size::value == function_traits::arity)); + + typedef function_traits traits; + typedef typename fold >::type any_vectorized; + + typedef typename result_access_type::type result_type; + typedef typename result_access_type::direct result_access_type; + // Result array is created here 'from scratch', so is always 'direct' access. + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + typedef typename argument_access_type >::type>::type arg2_type; + typedef typename argument_access_type >::type>::direct arg2_direct_access_type; + typedef typename argument_access_type >::type>::masked arg2_masked_access_type; + + typedef typename argument_access_type >::type>::type arg3_type; + typedef typename argument_access_type >::type>::direct arg3_direct_access_type; + typedef typename argument_access_type >::type>::masked arg3_masked_access_type; + + static result_type + apply(arg1_type arg1, arg2_type arg2, arg3_type arg3) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(arg1,arg2,arg3); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type resultAccess = getArrayAccess (retval); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + VectorizedOperation3 + vop (resultAccess, arg1Access, arg2Access, arg3Access); + dispatchTask(vop,len); + } + } + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<3> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+","+args.elements[1].name+","+args.elements[2].name+") - "; + } +}; + +template +struct VectorizedFunction4 { + BOOST_STATIC_ASSERT((size::value == function_traits::arity)); + + typedef function_traits traits; + typedef typename fold >::type any_vectorized; + + typedef typename result_access_type::type result_type; + typedef typename result_access_type::direct result_access_type; + // Result array is created here 'from scratch', so is always 'direct' access. + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + typedef typename argument_access_type >::type>::type arg2_type; + typedef typename argument_access_type >::type>::direct arg2_direct_access_type; + typedef typename argument_access_type >::type>::masked arg2_masked_access_type; + + typedef typename argument_access_type >::type>::type arg3_type; + typedef typename argument_access_type >::type>::direct arg3_direct_access_type; + typedef typename argument_access_type >::type>::masked arg3_masked_access_type; + + typedef typename argument_access_type >::type>::type arg4_type; + typedef typename argument_access_type >::type>::direct arg4_direct_access_type; + typedef typename argument_access_type >::type>::masked arg4_masked_access_type; + + static result_type + apply(arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(arg1,arg2,arg3,arg4); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type resultAccess = getArrayAccess (retval); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + VectorizedOperation4 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access); + dispatchTask(vop,len); + } + } + } + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<4> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+","+args.elements[1].name+","+args.elements[2].name+","+args.elements[3].name+") - "; + } +}; + +template +struct VectorizedFunction5 { + BOOST_STATIC_ASSERT((size::value == function_traits::arity)); + + typedef function_traits traits; + typedef typename fold >::type any_vectorized; + + typedef typename result_access_type::type result_type; + typedef typename result_access_type::direct result_access_type; + // Result array is created here 'from scratch', so is always 'direct' access. + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + typedef typename argument_access_type >::type>::type arg2_type; + typedef typename argument_access_type >::type>::direct arg2_direct_access_type; + typedef typename argument_access_type >::type>::masked arg2_masked_access_type; + + typedef typename argument_access_type >::type>::type arg3_type; + typedef typename argument_access_type >::type>::direct arg3_direct_access_type; + typedef typename argument_access_type >::type>::masked arg3_masked_access_type; + + typedef typename argument_access_type >::type>::type arg4_type; + typedef typename argument_access_type >::type>::direct arg4_direct_access_type; + typedef typename argument_access_type >::type>::masked arg4_masked_access_type; + + typedef typename argument_access_type >::type>::type arg5_type; + typedef typename argument_access_type >::type>::direct arg5_direct_access_type; + typedef typename argument_access_type >::type>::masked arg5_masked_access_type; + + static result_type + apply(arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, arg5_type arg5) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(arg1,arg2,arg3,arg4,arg5); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type resultAccess = getArrayAccess (retval); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + if (any_masked(arg3)) + { + arg3_masked_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + else + { + arg3_direct_access_type arg3Access = + getArrayAccess (arg3); + + if (any_masked(arg4)) + { + arg4_masked_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + else + { + arg4_direct_access_type arg4Access = + getArrayAccess (arg4); + + if (any_masked(arg5)) + { + arg5_masked_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + else + { + arg5_direct_access_type arg5Access = + getArrayAccess (arg5); + + VectorizedOperation5 + vop (resultAccess, arg1Access, arg2Access, arg3Access, arg4Access, arg5Access); + dispatchTask(vop,len); + } + } + } + } + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<5> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+","+args.elements[1].name+","+args.elements[2].name+","+args.elements[3].name+","+args.elements[4].name+") - "; + } +}; + +template +struct function_binding +{ + std::string _name, _doc; + const Keywords &_args; + + + function_binding(const std::string &name, const std::string &doc,const Keywords &args) + : _name(name), _doc(doc), _args(args) + {} + + template + void operator()(Vectorize) const + { + typedef typename at, + VectorizedFunction2, + VectorizedFunction3, + VectorizedFunction4, + VectorizedFunction5 + >, + long_::arity> >::type vectorized_function_type; + std::string doc = _name + vectorized_function_type::format_arguments(_args) + _doc; + boost::python::def(_name.c_str(),&vectorized_function_type::apply,doc.c_str(),_args); + } +}; + +template +function_binding +build_function_binding(Func *func,const std::string &name,const std::string &doc,const Keywords &args) +{ + return function_binding(name,doc,args); +} + +template +struct generate_bindings_struct +{ + //BOOST_STATIC_ASSERT(size::value == function_traits::arity); + static void apply(const std::string &name,const std::string &doc,const Keywords &args) { + for_each::type>(build_function_binding(Op::apply,name,doc,args)); + } +}; + + +template +struct VectorizedVoidOperation0 : public Task +{ + access_type access; + + VectorizedVoidOperation0 (access_type a) : access(a) {} + + void execute (size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + Op::apply (access[i]); + } + } +}; + +template +struct VectorizedVoidOperation1 : public Task +{ + access_type access; + arg1_access_type arg1; + + VectorizedVoidOperation1(access_type a, arg1_access_type a1) : access(a), arg1(a1) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + Op::apply (access[i], arg1[i]); + } + } +}; + +template +struct VectorizedMaskedVoidOperation1 : public Task +{ + access_type access; + arg1_access_type arg1; + array_type array; + + VectorizedMaskedVoidOperation1(access_type a, arg1_access_type a1, array_type arr) + : access(a), arg1(a1), array(arr) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + const size_t ri = array.raw_ptr_index(i); + Op::apply (access[i], arg1[ri]); + } + } +}; + +template +struct VectorizedVoidOperation2 : public Task +{ + access_type access; + arg1_access_type arg1; + arg2_access_type arg2; + + VectorizedVoidOperation2(access_type a, arg1_access_type a1, arg2_access_type a2) + : access(a), arg1(a1), arg2(a2) {} + + void execute(size_t start, size_t end) + { + for (size_t i = start; i < end; ++i) + { + Op::apply (access[i], arg1[i], arg2[i]); + } + } +}; + + +template +struct VectorizedVoidMemberFunction0 { + BOOST_STATIC_ASSERT((size::value+1 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + static reference_type + apply(reference_type array) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(array); + op_precompute::apply(len); + + if (any_masked(array)) + { + masked_access_type access (array); + VectorizedVoidOperation0 vop (access); + dispatchTask(vop,len); + } + else + { + direct_access_type access (array); + VectorizedVoidOperation0 vop (access); + dispatchTask(vop,len); + } + + PY_IMATH_RETURN_PYTHON; + return array; + } +}; + +template +struct VectorizedVoidMemberFunction1 { + BOOST_STATIC_ASSERT((size::value+1 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + static reference_type + apply(reference_type array, arg1_type arg1) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(array,arg1); + op_precompute::apply(len); + + if (any_masked(array)) + { + masked_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + } + else + { + direct_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + } + + PY_IMATH_RETURN_PYTHON; + return array; + } + + static std::string + format_arguments(const boost::python::detail::keywords<1> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+") - "; + } +}; + +// +// special class to handle single argument void memberfunctions, such as those +// used for the inplace operators like +=, -=, etc. In this case we allow additional +// compatibilty between a masked class and an unmasked right hand side, using the +// mask to select results. +// +template +struct VectorizedVoidMaskableMemberFunction1 { + BOOST_STATIC_ASSERT((2 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + typedef typename argument_access_type::type arg1_type; + typedef typename argument_access_type::direct arg1_direct_access_type; + typedef typename argument_access_type::masked arg1_masked_access_type; + + static reference_type + apply(reference_type array, arg1_type arg1) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = array.match_dimension(arg1, false); + op_precompute::apply(len); + + if (array.isMaskedReference() && (size_t) arg1.len() == array.unmaskedLength()) + { + // class is masked, and the unmasked length matches the right hand side + + masked_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedMaskedVoidOperation1 + vop (arrayAccess, argAccess, array); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedMaskedVoidOperation1 + vop (arrayAccess, argAccess, array); + dispatchTask(vop,len); + } + } + else + { + // the two arrays match length (masked or otherwise), use the standard path. + + if (any_masked(array)) + { + masked_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + } + else + { + direct_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedVoidOperation1 + vop (arrayAccess, argAccess); + dispatchTask(vop,len); + } + } + } + + PY_IMATH_RETURN_PYTHON; + return array; + } + + static std::string + format_arguments(const boost::python::detail::keywords<1> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+") - "; + } +}; + +template +struct VectorizedVoidMemberFunction2 { + BOOST_STATIC_ASSERT((size::value+1 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + typedef typename argument_access_type >::type>::type arg2_type; + typedef typename argument_access_type >::type>::direct arg2_direct_access_type; + typedef typename argument_access_type >::type>::masked arg2_masked_access_type; + + static reference_type + apply(reference_type array, arg1_type arg1, arg2_type arg2) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(array,arg1,arg2); + op_precompute::apply(len); + + if (any_masked(array)) + { + masked_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + } + else + { + direct_access_type arrayAccess (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedVoidOperation2 + vop (arrayAccess, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + } + + PY_IMATH_RETURN_PYTHON; + return array; + } + + static std::string + format_arguments(const boost::python::detail::keywords<2> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+","+args.elements[1].name+") - "; + } +}; + + +template +struct VectorizedMemberFunction0 { + BOOST_STATIC_ASSERT((size::value+1 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename vectorized_result_type::type result_type; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + // The return value can't be const or masked. Verify that condition. + BOOST_STATIC_ASSERT( !is_const::value ); + typedef typename result_type::WritableDirectAccess result_access_type; + + static result_type + apply(reference_type array) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(array); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type returnAccess (retval); + + if (any_masked(array)) + { + masked_access_type access (array); + VectorizedOperation1 vop(returnAccess,access); + dispatchTask(vop,len); + } + else + { + direct_access_type access (array); + VectorizedOperation1 vop(returnAccess,access); + dispatchTask(vop,len); + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } +}; + +template +struct VectorizedMemberFunction1 { + BOOST_STATIC_ASSERT((size::value+1 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename vectorized_result_type::type result_type; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + // The return value can't be const or masked. Verify that condition. + BOOST_STATIC_ASSERT( !is_const::value ); + typedef typename result_type::WritableDirectAccess result_access_type; + + static result_type + apply(reference_type array, arg1_type arg1) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(array,arg1); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type returnAccess (retval); + + if (any_masked(array)) + { + masked_access_type access (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedOperation2 vop (returnAccess, access, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedOperation2 vop (returnAccess, access, argAccess); + dispatchTask(vop,len); + } + } + else + { + direct_access_type access (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type argAccess = + getArrayAccess (arg1); + + VectorizedOperation2 vop (returnAccess, access, argAccess); + dispatchTask(vop,len); + } + else + { + arg1_direct_access_type argAccess = + getArrayAccess (arg1); + + VectorizedOperation2 vop (returnAccess, access, argAccess); + dispatchTask(vop,len); + } + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<1> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+") - "; + } +}; + +template +struct VectorizedMemberFunction2 { + BOOST_STATIC_ASSERT((size::value+1 == function_traits::arity)); + + typedef function_traits traits; + + typedef typename vectorized_result_type::type result_type; + + typedef typename access_type::reference_type reference_type; + typedef typename access_type::direct direct_access_type; + typedef typename access_type::masked masked_access_type; + + typedef typename argument_access_type >::type>::type arg1_type; + typedef typename argument_access_type >::type>::direct arg1_direct_access_type; + typedef typename argument_access_type >::type>::masked arg1_masked_access_type; + + typedef typename argument_access_type >::type>::type arg2_type; + typedef typename argument_access_type >::type>::direct arg2_direct_access_type; + typedef typename argument_access_type >::type>::masked arg2_masked_access_type; + + // The return value can't be const or masked. Verify that condition. + BOOST_STATIC_ASSERT( !is_const::value ); + typedef typename result_type::WritableDirectAccess result_access_type; + + static result_type + apply(reference_type array, arg1_type arg1, arg2_type arg2) + { + PY_IMATH_LEAVE_PYTHON; + size_t len = measure_arguments(array,arg1,arg2); + op_precompute::apply(len); + result_type retval = create_uninitalized_return_value::apply(len); + + result_access_type returnAccess (retval); + + if (any_masked(array)) + { + masked_access_type access (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + } + else + { + direct_access_type access (array); + + if (any_masked(arg1)) + { + arg1_masked_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + else + { + arg1_direct_access_type arg1Access = + getArrayAccess (arg1); + + if (any_masked(arg2)) + { + arg2_masked_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + else + { + arg2_direct_access_type arg2Access = + getArrayAccess (arg2); + + VectorizedOperation3 + vop (returnAccess, access, arg1Access, arg2Access); + dispatchTask(vop,len); + } + } + } + + PY_IMATH_RETURN_PYTHON; + return retval; + } + + static std::string + format_arguments(const boost::python::detail::keywords<2> &args) + { + // TODO: add types here + return std::string("(")+args.elements[0].name+","+args.elements[1].name+") - "; + } +}; + +template +struct member_function_binding +{ + Cls &_cls; + std::string _name, _doc; + const Keywords &_args; + + member_function_binding(Cls &cls,const std::string &name, const std::string &doc,const Keywords &args) + : _cls(cls), _name(name), _doc(doc), _args(args) + {} + + template + void operator()(Vectorize) const + { + typedef typename if_::result_type>, + typename if_ >, + VectorizedVoidMaskableMemberFunction1, + VectorizedVoidMemberFunction1 >::type, + VectorizedMemberFunction1 + >::type member_func1_type; + + typedef typename if_::result_type>, + VectorizedVoidMemberFunction2, + VectorizedMemberFunction2 >::type member_func2_type; + + typedef typename if_::result_type>, + boost::python::return_internal_reference<>, // the void vectorizations return a reference to self + boost::python::default_call_policies>::type call_policies; + + typedef typename at, + long_::arity> >::type vectorized_function_type; + std::string doc = _name + vectorized_function_type::format_arguments(_args) + _doc; + _cls.def(_name.c_str(),&vectorized_function_type::apply,doc.c_str(),_args,call_policies()); + } +}; + +template +member_function_binding +build_member_function_binding(Cls &cls,Func *func,const std::string &name,const std::string &doc,const Keywords &args) +{ + return member_function_binding(cls,name,doc,args); +} + +template +struct generate_member_bindings_struct +{ + //BOOST_STATIC_ASSERT(size::value+1 == function_traits::arity); + static void apply(Cls &cls,const std::string &name,const std::string &doc,const Keywords &args) { + for_each::type>(build_member_function_binding(cls,Op::apply,name,doc,args)); + } +}; + +template +void +generate_single_member_binding(Cls &cls,Func *func,const std::string &name,const std::string &doc) +{ + typedef typename if_::result_type>, + VectorizedVoidMemberFunction0,Func>, + VectorizedMemberFunction0,Func> >::type vectorized_function_type; + + typedef typename if_::result_type>, + boost::python::return_internal_reference<>, // the void vectorizations return a reference to self + boost::python::default_call_policies>::type call_policies; + + cls.def(name.c_str(),&vectorized_function_type::apply,doc.c_str(),call_policies()); +} + +} // namespace detail + +// TODO: update for arg("name")=default_value syntax +template +void generate_bindings(const std::string &name,const std::string &doc,const boost::python::detail::keywords<1> &args) { + using namespace detail; + generate_bindings_struct,boost::python::detail::keywords<1> >::apply(name,doc,args); +} + +template +void generate_bindings(const std::string &name,const std::string &doc,const boost::python::detail::keywords<2> &args) { + using namespace detail; + generate_bindings_struct,boost::python::detail::keywords<2> >::apply(name,doc,args); +} + +template +void generate_bindings(const std::string &name,const std::string &doc,const boost::python::detail::keywords<3> &args) { + using namespace detail; + generate_bindings_struct,boost::python::detail::keywords<3> >::apply(name,doc,args); +} + +template +void generate_bindings(const std::string &name,const std::string &doc,const boost::python::detail::keywords<4> &args) { + using namespace detail; + generate_bindings_struct,boost::python::detail::keywords<4> >::apply(name,doc,args); +} + +template +void generate_bindings(const std::string &name,const std::string &doc,const boost::python::detail::keywords<5> &args) { + using namespace detail; + generate_bindings_struct,boost::python::detail::keywords<5> >::apply(name,doc,args); +} + +template +void +generate_member_bindings(Cls &cls,const std::string &name,const std::string &doc) +{ + using namespace detail; + generate_single_member_binding(cls,&Op::apply,name,doc); +} + +template +void +generate_member_bindings(Cls &cls,const std::string &name,const std::string &doc, + const boost::python::detail::keywords<1> &args) +{ + using boost::mpl::vector; + detail::generate_member_bindings_struct, + boost::python::detail::keywords<1> >::apply(cls,name,doc,args); +} + +template +void +generate_member_bindings(Cls &cls,const std::string &name,const std::string &doc, + const boost::python::detail::keywords<2> &args) +{ + using boost::mpl::vector; + detail::generate_member_bindings_struct, + boost::python::detail::keywords<2> >::apply(cls,name,doc,args); +} + +} // namespace PyImath + +#endif // _PyImathAutovectorize_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathBasicTypes.h b/Sources/MetaPy/include/python/PyImath/PyImathBasicTypes.h new file mode 100644 index 00000000..16c5ad55 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathBasicTypes.h @@ -0,0 +1,20 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + + +#ifndef _PyImathBasicTypes_h_ +#define _PyImathBasicTypes_h_ + +#include "PyImathExport.h" + +namespace PyImath { + +PYIMATH_EXPORT void register_basicTypes(); + +} + +#endif diff --git a/Sources/MetaPy/include/python/PyImath/PyImathBox.h b/Sources/MetaPy/include/python/PyImath/PyImathBox.h new file mode 100644 index 00000000..c41f3b65 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathBox.h @@ -0,0 +1,207 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathBox_h_ +#define _PyImathBox_h_ + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include "PyImathVec.h" +#include "PyImathFixedArray.h" + +namespace PyImath { + +template boost::python::class_ > register_Box2(); +template boost::python::class_ > register_Box3(); + +template boost::python::class_ > > register_BoxArray(); + +typedef FixedArray Box2sArray; +typedef FixedArray Box2iArray; +typedef FixedArray Box2i64Array; +typedef FixedArray Box2fArray; +typedef FixedArray Box2dArray; + +typedef FixedArray Box3sArray; +typedef FixedArray Box3iArray; +typedef FixedArray Box3i64Array; +typedef FixedArray Box3fArray; +typedef FixedArray Box3dArray; + +// + +// Other code in the Zeno code base assumes the existance of a class with the +// same name as the Imath class, and with static functions wrap() and +// convert() to produce a PyImath object from an Imath object and vice-versa, +// respectively. The class Boost generates from the Imath class does not +// have these properties, so we define a companion class here. +// The template argument, T, is the element type for the box (e.g., int, +// float). + +template +class Box2 { + public: + static PyObject * wrap (const IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec2 > &b); + static int convert (PyObject *p, IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec2 > *b); +}; + +template +class Box3 { + public: + static PyObject * wrap (const IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec3 > &b); + static int convert (PyObject *p, IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec3 > *v); +}; + +template +PyObject * +Box2::wrap (const IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec2 > &b) +{ + typename boost::python::return_by_value::apply < IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec2 > >::type converter; + PyObject *p = converter (b); + return p; +} + +template +PyObject * +Box3::wrap (const IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec3 > &b) +{ + typename boost::python::return_by_value::apply < IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec3 > >::type converter; + PyObject *p = converter (b); + return p; +} + +template +int +Box2::convert (PyObject *p, IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec2 > *v) +{ + boost::python::extract < IMATH_NAMESPACE::Box > extractorV2i (p); + if (extractorV2i.check()) + { + IMATH_NAMESPACE::Box b = extractorV2i(); + v->min = b.min; + v->max = b.max; + return 1; + } + + boost::python::extract < IMATH_NAMESPACE::Box > extractorV2f (p); + if (extractorV2f.check()) + { + IMATH_NAMESPACE::Box b = extractorV2f(); + v->min = b.min; + v->max = b.max; + return 1; + } + + boost::python::extract < IMATH_NAMESPACE::Box > extractorV2d (p); + if (extractorV2d.check()) + { + IMATH_NAMESPACE::Box b = extractorV2d(); + v->min = b.min; + v->max = b.max; + return 1; + } + + boost::python::extract extractorTuple (p); + if (extractorTuple.check()) + { + boost::python::tuple t = extractorTuple(); + if (t.attr ("__len__") () == 2) + { + PyObject *minObj = + boost::python::extract (t[0])().ptr(); + PyObject *maxObj = + boost::python::extract (t[1])().ptr(); + + IMATH_NAMESPACE::Vec2 min, max; + if (! V2::convert (minObj, &min)) + return 0; + if (! V2::convert (maxObj, &max)) + return 0; + + v->min = min; + v->max = max; + + return 1; + } + } + + return 0; +} + +template +int +Box3::convert (PyObject *p, IMATH_NAMESPACE::Box< IMATH_NAMESPACE::Vec3 > *v) +{ + boost::python::extract < IMATH_NAMESPACE::Box > extractorV3i (p); + if (extractorV3i.check()) + { + IMATH_NAMESPACE::Box b = extractorV3i(); + v->min = b.min; + v->max = b.max; + return 1; + } + + boost::python::extract < IMATH_NAMESPACE::Box > extractorV3f (p); + if (extractorV3f.check()) + { + IMATH_NAMESPACE::Box b = extractorV3f(); + v->min = b.min; + v->max = b.max; + return 1; + } + + boost::python::extract < IMATH_NAMESPACE::Box > extractorV3d (p); + if (extractorV3d.check()) + { + IMATH_NAMESPACE::Box b = extractorV3d(); + v->min = b.min; + v->max = b.max; + return 1; + } + + boost::python::extract extractorTuple (p); + if (extractorTuple.check()) + { + boost::python::tuple t = extractorTuple(); + if (t.attr ("__len__") () == 2) + { + PyObject *minObj = + boost::python::extract (t[0])().ptr(); + PyObject *maxObj = + boost::python::extract (t[1])().ptr(); + + IMATH_NAMESPACE::Vec3 min, max; + if (! V3::convert (minObj, &min)) + return 0; + if (! V3::convert (maxObj, &max)) + return 0; + + v->min = min; + v->max = max; + + return 1; + } + } + + return 0; +} + +typedef Box2 Box2i; +typedef Box2 Box2i64; +typedef Box2 Box2f; +typedef Box2 Box2d; + +typedef Box3 Box3i; +typedef Box3 Box3i64; +typedef Box3 Box3f; +typedef Box3 Box3d; + +} + +#endif diff --git a/Sources/MetaPy/include/python/PyImath/PyImathBoxArrayImpl.h b/Sources/MetaPy/include/python/PyImath/PyImathBoxArrayImpl.h new file mode 100644 index 00000000..2e0b4be1 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathBoxArrayImpl.h @@ -0,0 +1,84 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathBoxArrayImpl_h_ +#define _PyImathBoxArrayImpl_h_ + +// +// This .C file was turned into a header file so that instantiations +// of the various Box* types can be spread across multiple files in +// order to work around MSVC limitations. +// + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include "PyImath.h" +#include "PyImathBox.h" +#include "PyImathDecorators.h" +#include "PyImathMathExc.h" +#include "PyImathOperators.h" +#include "PyImathVecOperators.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template +static FixedArray +BoxArray_get(FixedArray > &va) +{ + return index == 0 ? + FixedArray(&(va.unchecked_index(0).min), + va.len(),2*va.stride(),va.handle(),va.writable()) : + FixedArray(&(va.unchecked_index(0).max), + va.len(),2*va.stride(),va.handle(),va.writable()); +} + +template +static void +setItemTuple(FixedArray > &va, Py_ssize_t index, const tuple &t) +{ + if(t.attr("__len__")() == 2) + { + Box v; + v.min = extract(t[0]); + v.max = extract(t[1]); + va[(size_t)va.canonical_index(index)] = v; + } + else + throw std::invalid_argument ("tuple of length 2 expected"); +} + +template +class_ > > +register_BoxArray() +{ + using boost::mpl::true_; + using boost::mpl::false_; + + class_ > > boxArray_class = FixedArray >::register_("Fixed length array of IMATH_NAMESPACE::Box"); + boxArray_class + .add_property("min",&BoxArray_get) + .add_property("max",&BoxArray_get) + .def("__setitem__", &setItemTuple) + ; + + add_comparison_functions(boxArray_class); + decoratecopy(boxArray_class); + + return boxArray_class; +} + +} // namespace PyImath + +#endif // _PyImathBoxArrayImpl_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathBufferProtocol.h b/Sources/MetaPy/include/python/PyImath/PyImathBufferProtocol.h new file mode 100644 index 00000000..f0d42a18 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathBufferProtocol.h @@ -0,0 +1,29 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathBufferProtocol_h_ +#define _PyImathBufferProtocol_h_ + +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include + +namespace PyImath { + +// For more information on working with the protocol see: +// +// https://docs.python.org/2.7/c-api/buffer.html +// https://docs.python.org/3.7.10/c-api/buffer.html + +template +void add_buffer_protocol (boost::python::class_ &classObj); + +template +ArrayT* fixedArrayFromBuffer (PyObject *obj); + +} + +#endif diff --git a/Sources/MetaPy/include/python/PyImath/PyImathColor.h b/Sources/MetaPy/include/python/PyImath/PyImathColor.h new file mode 100644 index 00000000..767b9b1e --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathColor.h @@ -0,0 +1,229 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathColor3_h_ +#define _PyImathColor3_h_ + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include "PyImath.h" + +namespace PyImath { + +template boost::python::class_ > register_Color4(); +template boost::python::class_ > > register_Color4Array2D(); +template boost::python::class_ > > register_Color4Array(); +template boost::python::class_, boost::python::bases > > register_Color3(); +template boost::python::class_ > > register_Color3Array(); + +typedef FixedArray2D Color4fArray; +typedef FixedArray2D Color4cArray; +typedef FixedArray C4fArray; +typedef FixedArray C4cArray; +typedef FixedArray C3fArray; +typedef FixedArray C3cArray; + +// +// Other code in the Zeno code base assumes the existance of a class with the +// same name as the Imath class, and with static functions wrap() and +// convert() to produce a PyImath object from an Imath object and vice-versa, +// respectively. The class Boost generates from the Imath class does not +// have these properties, so we define a companion class here. +// The template argument, T, is the element type for the color in C++ (e.g., char, +// float). The other argument, U, is how this type is represented in Python +// (e.g., int, float). + +template +class C3 { + public: + static PyObject * wrap (const IMATH_NAMESPACE::Color3 &c); + static int convert (PyObject *p, IMATH_NAMESPACE::Color3 *v); +}; + +template +class C4 { + public: + static PyObject * wrap (const IMATH_NAMESPACE::Color4 &c); + static int convert (PyObject *p, IMATH_NAMESPACE::Color4 *v); +}; + +template +PyObject * +C3::wrap (const IMATH_NAMESPACE::Color3 &c) +{ + typename boost::python::return_by_value::apply < IMATH_NAMESPACE::Color3 >::type converter; + PyObject *p = converter (c); + return p; +} + +template +PyObject * +C4::wrap (const IMATH_NAMESPACE::Color4 &c) +{ + typename boost::python::return_by_value::apply < IMATH_NAMESPACE::Color4 >::type converter; + PyObject *p = converter (c); + return p; +} + +template +int +C3::convert (PyObject *p, IMATH_NAMESPACE::Color3 *v) +{ + boost::python::extract extractorC3c (p); + if (extractorC3c.check()) + { + IMATH_NAMESPACE::C3c c3c = extractorC3c(); + v->setValue (U(c3c[0]), U(c3c[1]), U(c3c[2])); + return 1; + } + + boost::python::extract extractorC3f (p); + if (extractorC3f.check()) + { + IMATH_NAMESPACE::C3f c3f = extractorC3f(); + v->setValue (U(c3f[0]), U(c3f[1]), U(c3f[2])); + return 1; + } + + boost::python::extract extractorTuple (p); + if (extractorTuple.check()) + { + boost::python::tuple t = extractorTuple(); + if (t.attr ("__len__") () == 3) + { + double a = boost::python::extract (t[0]); + double b = boost::python::extract (t[1]); + double c = boost::python::extract (t[2]); + v->setValue (U(a), U(b), U(c)); + return 1; + } + } + + boost::python::extract extractorList (p); + if (extractorList.check()) + { + boost::python::list l = extractorList(); + if (l.attr ("__len__") () == 3) + { + boost::python::extract extractor0 (l[0]); + boost::python::extract extractor1 (l[1]); + boost::python::extract extractor2 (l[2]); + if (extractor0.check() && extractor1.check() && + extractor2.check()) + { + v->setValue (U(extractor0()), U(extractor1()), + U(extractor2())); + return 1; + } + } + } + + boost::python::extract extractorV3i (p); + if (extractorV3i.check()) + { + IMATH_NAMESPACE::V3i v3i = extractorV3i(); + v->setValue (U(v3i[0]), U(v3i[1]), U(v3i[2])); + return 1; + } + + boost::python::extract extractorV3f (p); + if (extractorV3f.check()) + { + IMATH_NAMESPACE::V3f v3f = extractorV3f(); + v->setValue (U(v3f[0]), U(v3f[1]), U(v3f[2])); + return 1; + } + + boost::python::extract extractorV3d (p); + if (extractorV3d.check()) + { + IMATH_NAMESPACE::V3d v3d = extractorV3d(); + v->setValue (U(v3d[0]), U(v3d[1]), U(v3d[2])); + return 1; + } + + return 0; +} + +template +int +C4::convert (PyObject *p, IMATH_NAMESPACE::Color4 *v) +{ + boost::python::extract extractorC4c (p); + if (extractorC4c.check()) + { + IMATH_NAMESPACE::C4c c4c = extractorC4c(); + v->setValue (U(c4c[0]), U(c4c[1]), U(c4c[2]), U(c4c[3])); + return 1; + } + + boost::python::extract extractorC4f (p); + if (extractorC4f.check()) + { + IMATH_NAMESPACE::C4f c4f = extractorC4f(); + v->setValue (U(c4f[0]), U(c4f[1]), U(c4f[2]), U(c4f[3])); + return 1; + } + + boost::python::extract extractorTuple (p); + if (extractorTuple.check()) + { + boost::python::tuple t = extractorTuple(); + if (t.attr ("__len__") () == 4) + { + // As with V3, we extract the tuple elements as doubles and + // cast them to Ts in setValue(), to avoid any odd cases where + // extracting them as Ts from the start would fail. + + double a = boost::python::extract (t[0]); + double b = boost::python::extract (t[1]); + double c = boost::python::extract (t[2]); + double d = boost::python::extract (t[3]); + v->setValue (U(a), U(b), U(c), U(d)); + return 1; + } + } + + boost::python::extract extractorList (p); + if (extractorList.check()) + { + boost::python::list l = extractorList(); + if (l.attr ("__len__") () == 4) + { + boost::python::extract extractor0 (l[0]); + boost::python::extract extractor1 (l[1]); + boost::python::extract extractor2 (l[2]); + boost::python::extract extractor3 (l[3]); + if (extractor0.check() && extractor1.check() && + extractor2.check() && extractor3.check()) + { + v->setValue (U(extractor0()), U(extractor1()), + U(extractor2()), U(extractor3())); + return 1; + } + } + } + + return 0; +} + + +typedef C3 Color3f; +typedef C3 Color3c; +typedef Color3f C3f; +typedef Color3c C3c; + +typedef C4 Color4f; +typedef C4 Color4c; +typedef Color4f C4f; +typedef Color4c C4c; + +} + +#endif diff --git a/Sources/MetaPy/include/python/PyImath/PyImathColor3ArrayImpl.h b/Sources/MetaPy/include/python/PyImath/PyImathColor3ArrayImpl.h new file mode 100644 index 00000000..a22db09d --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathColor3ArrayImpl.h @@ -0,0 +1,58 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathColor3ArrayImpl_h_ +#define _PyImathColor3ArrayImpl_h_ + +// +// This .C file was turned into a header file so that instantiations +// of the various V3* types can be spread across multiple files in +// order to work around MSVC limitations. +// + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathDecorators.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +// XXX fixme - template this +// really this should get generated automatically... + +template +static FixedArray +Color3Array_get(FixedArray > &ca) +{ + return FixedArray(&(ca.unchecked_index(0)[index]), + ca.len(),3*ca.stride(),ca.handle(),ca.writable()); +} + +// Currently we are only exposing the RGBA components. +template +class_ > > +register_Color3Array() +{ + class_ > > color3Array_class = FixedArray >::register_("Fixed length array of Imath::Color3"); + color3Array_class + .add_property("r",&Color3Array_get) + .add_property("g",&Color3Array_get) + .add_property("b",&Color3Array_get) + ; + + return color3Array_class; +} + +} // namespace PyImath + +#endif // _PyImathColor3ArrayImpl_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathColor4Array2DImpl.h b/Sources/MetaPy/include/python/PyImath/PyImathColor4Array2DImpl.h new file mode 100644 index 00000000..2d52f6b3 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathColor4Array2DImpl.h @@ -0,0 +1,565 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathColor4Array2DImpl_h_ +#define _PyImathColor4Array2DImpl_h_ + +// +// This .C file was turned into a header file so that instantiations +// of the various V3* types can be spread across multiple files in +// order to work around MSVC limitations. +// + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathDecorators.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +template struct Color4Array2DName { static const char *value(); }; + + +// XXX fixme - template this +// really this should get generated automatically... + +template +static FixedArray2D +Color4Array2D_get(FixedArray2D > &va) +{ + return FixedArray2D(&va(0,0)[index], va.len().x,va.len().y, 4*va.stride().x, va.stride().y, va.handle()); +} + + +// template +// static FixedArray2D > +// Color4Array_cross0(const FixedArray2D > &va, const FixedArray2D > &vb) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); +// FixedArray2D > f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).cross(vb(i,j)); +// return f; +// } +// +// template +// static FixedArray2D > +// Color4Array_cross1(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// FixedArray2D > f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).cross(vb); +// return f; +// } +// +// template +// static FixedArray2D +// Color4Array_dot0(const FixedArray2D > &va, const FixedArray2D > &vb) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); +// FixedArray2D f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).dot(vb(i,j)); +// return f; +// } +// +// template +// static FixedArray2D +// Color4Array_dot1(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// FixedArray2D f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).dot(vb); +// return f; +// } + +// template +// static FixedArray2D +// Color4Array_length(const FixedArray2D > &va) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// FixedArray2D f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).length(); +// return f; +// } +// +// template +// static FixedArray2D +// Color4Array_length2(const FixedArray2D > &va) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// FixedArray2D f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).length2(); +// return f; +// } +// +// template +// static FixedArray2D > & +// Color4Array_normalize(FixedArray2D > &va) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// for (size_t i = 0; i < len; ++i) +// va(i,j).normalize(); +// return va; +// } +// +// template static FixedArray2D > +// Color4Array_normalized(const FixedArray2D > &va) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// FixedArray2D > f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j).normalized(); +// return f; +// } +// +template +static FixedArray2D > +Color4Array_mulT(const FixedArray2D > &va, T t) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) * t; + return f; +} +// +// template +// static FixedArray2D > +// Color4Array_mulM44(const FixedArray2D > &va, const IMATH_NAMESPACE::Matrix44 &m) +// { +// PY_IMATH_LEAVE_PYTHON; +// IMATH_NAMESPACE::Vec2 len = va.len(); +// FixedArray2D > f(len); +// for (size_t i = 0; i < len; ++i) +// f(i,j) = va(i,j) * m; +// return f; +// } +// +template +static FixedArray2D > +Color4Array_mulArrayT(const FixedArray2D > &va, const FixedArray2D &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) * vb(i,j); + return f; +} + +template +static const FixedArray2D > & +Color4Array_imulT(FixedArray2D > &va, T t) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) *= t; + return va; +} + +template +static const FixedArray2D > & +Color4Array_imulArrayT(FixedArray2D > &va, const FixedArray2D &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) *= vb(i,j); + return va; +} + +template +static FixedArray2D > +Color4Array_divT(const FixedArray2D > &va, T t) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) / t; + return f; +} + +template +static FixedArray2D > +Color4Array_divArrayT(const FixedArray2D > &va, const FixedArray2D &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) / vb(i,j); + return f; +} + +template +static const FixedArray2D > & +Color4Array_idivT(FixedArray2D > &va, T t) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) /= t; + return va; +} + +template +static const FixedArray2D > & +Color4Array_idivArrayT(FixedArray2D > &va, const FixedArray2D &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) /= vb(i,j); + return va; +} + +template +static FixedArray2D > +Color4Array_add(const FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) + vb(i,j); + return f; +} + +template +static FixedArray2D > +Color4Array_addColor(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) + vb; + return f; +} + +template +static FixedArray2D > +Color4Array_sub(const FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) - vb(i,j); + return f; +} + +template +static FixedArray2D > +Color4Array_subColor(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) - vb; + return f; +} + +template +static FixedArray2D > +Color4Array_rsubColor(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = vb - va(i,j); + return f; +} + +template +static FixedArray2D > +Color4Array_mul(const FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) * vb(i,j); + return f; +} + +template +static FixedArray2D > +Color4Array_mulColor(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) * vb; + return f; +} + +template +static FixedArray2D > +Color4Array_div(const FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) / vb(i,j); + return f; +} + +template +static FixedArray2D > +Color4Array_divColor(const FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = va(i,j) / vb; + return f; +} + +template +static FixedArray2D > +Color4Array_neg(const FixedArray2D > &va) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + FixedArray2D > f(len); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + f(i,j) = -va(i,j); + return f; +} + +template +static const FixedArray2D > & +Color4Array_iadd(FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) += vb(i,j); + return va; +} + +template +static const FixedArray2D > & +Color4Array_iaddColor(FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) += vb; + return va; +} + +template +static const FixedArray2D > & +Color4Array_isub(FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) -= vb(i,j); + return va; +} + +template +static const FixedArray2D > & +Color4Array_isubColor(FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) -= vb; + return va; +} + +template +static const FixedArray2D > & +Color4Array_imul(FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) *= vb(i,j); + return va; +} + +template +static const FixedArray2D > & +Color4Array_imulColor(FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) *= vb; + return va; +} + +template +static const FixedArray2D > & +Color4Array_idiv(FixedArray2D > &va, const FixedArray2D > &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.match_dimension(vb); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) /= vb(i,j); + return va; +} + +template +static const FixedArray2D > & +Color4Array_idivColor(FixedArray2D > &va, const IMATH_NAMESPACE::Color4 &vb) +{ + PY_IMATH_LEAVE_PYTHON; + IMATH_NAMESPACE::Vec2 len = va.len(); + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + va(i,j) /= vb; + return va; +} + +template +static void +setItemTuple(FixedArray2D > &va, const tuple &index, const tuple &t) +{ + if(t.attr("__len__")() == 4 && index.attr("__len__")() == 2) + { + Color4 v; + v.r = extract(t[0]); + v.g = extract(t[1]); + v.b = extract(t[2]); + v.a = extract(t[3]); + va(va.canonical_index(extract(index[0]),va.len()[0]), + va.canonical_index(extract(index[1]),va.len()[1])) = v; + } + else + throw std::invalid_argument ("tuple of length 4 expected"); +} + +template +class_ > > +register_Color4Array2D() +{ + class_ > > color4Array2D_class = + FixedArray2D >::register_(Color4Array2DName::value(),"Fixed length 2d array of IMATH_NAMESPACE::Color4"); + color4Array2D_class + .add_property("r",&Color4Array2D_get) + .add_property("g",&Color4Array2D_get) + .add_property("b",&Color4Array2D_get) + .add_property("a",&Color4Array2D_get) +// .def("dot",&Color4Array_dot0) +// .def("dot",&Color4Array_dot1) +// .def("cross", &Color4Array_cross0) +// .def("cross", &Color4Array_cross1) +// .def("length", &Color4Array_length) +// .def("length2", &Color4Array_length2) +// .def("normalize", &Color4Array_normalize,return_internal_reference<>()) +// .def("normalized", &Color4Array_normalized) + .def("__setitem__", &setItemTuple) + .def("__mul__", &Color4Array_mulT) +// .def("__mul__", &Color4Array_mulM44) +// .def("__mul__", &Color4Array_mulM44) + .def("__rmul__", &Color4Array_mulT) + .def("__mul__", &Color4Array_mulArrayT) + .def("__rmul__", &Color4Array_mulArrayT) + .def("__imul__", &Color4Array_imulT,return_internal_reference<>()) + .def("__imul__", &Color4Array_imulArrayT,return_internal_reference<>()) + .def("__div__", &Color4Array_divT) + .def("__div__", &Color4Array_divArrayT) + .def("__truediv__", &Color4Array_divT) + .def("__truediv__", &Color4Array_divArrayT) + .def("__idiv__", &Color4Array_idivT,return_internal_reference<>()) + .def("__idiv__", &Color4Array_idivArrayT,return_internal_reference<>()) + .def("__itruediv__", &Color4Array_idivT,return_internal_reference<>()) + .def("__itruediv__", &Color4Array_idivArrayT,return_internal_reference<>()) + .def("__add__",&Color4Array_add) + .def("__add__",&Color4Array_addColor) + .def("__radd__",&Color4Array_addColor) + .def("__sub__",&Color4Array_sub) + .def("__sub__",&Color4Array_subColor) + .def("__rsub__",&Color4Array_rsubColor) + .def("__mul__",&Color4Array_mul) + .def("__mul__",&Color4Array_mulColor) + .def("__rmul__",&Color4Array_mulColor) + .def("__div__",&Color4Array_div) + .def("__div__",&Color4Array_divColor) + .def("__truediv__",&Color4Array_div) + .def("__truediv__",&Color4Array_divColor) + .def("__neg__",&Color4Array_neg) + .def("__iadd__",&Color4Array_iadd, return_internal_reference<>()) + .def("__iadd__",&Color4Array_iaddColor, return_internal_reference<>()) + .def("__isub__",&Color4Array_isub, return_internal_reference<>()) + .def("__isub__",&Color4Array_isubColor, return_internal_reference<>()) + .def("__imul__",&Color4Array_imul, return_internal_reference<>()) + .def("__imul__",&Color4Array_imulColor, return_internal_reference<>()) + .def("__idiv__",&Color4Array_idiv, return_internal_reference<>()) + .def("__idiv__",&Color4Array_idivColor, return_internal_reference<>()) + .def("__itruediv__",&Color4Array_idiv, return_internal_reference<>()) + .def("__itruediv__",&Color4Array_idivColor, return_internal_reference<>()) + ; + + add_comparison_functions(color4Array2D_class); + decoratecopy(color4Array2D_class); + + return color4Array2D_class; +} + + +} // namespace PyImath + +#endif // _PyImathColor4ArrayImpl_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathColor4ArrayImpl.h b/Sources/MetaPy/include/python/PyImath/PyImathColor4ArrayImpl.h new file mode 100644 index 00000000..dd0bfb78 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathColor4ArrayImpl.h @@ -0,0 +1,59 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathColor4ArrayImpl_h_ +#define _PyImathColor4ArrayImpl_h_ + +// +// This .C file was turned into a header file so that instantiations +// of the various V3* types can be spread across multiple files in +// order to work around MSVC limitations. +// + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" +#include "PyImathMathExc.h" +#include "PyImathDecorators.h" + +namespace PyImath { +using namespace boost::python; +using namespace IMATH_NAMESPACE; + +// XXX fixme - template this +// really this should get generated automatically... + +template +static FixedArray +Color4Array_get(FixedArray > &ca) +{ + return FixedArray(&(ca.unchecked_index(0)[index]), + ca.len(),4*ca.stride(),ca.handle(),ca.writable()); +} + +// Currently we are only exposing the RGBA components. +template +class_ > > +register_Color4Array() +{ + class_ > > color4Array_class = FixedArray >::register_("Fixed length array of IMATH_NAMESPACE::Color4"); + color4Array_class + .add_property("r",&Color4Array_get) + .add_property("g",&Color4Array_get) + .add_property("b",&Color4Array_get) + .add_property("a",&Color4Array_get) + ; + + return color4Array_class; +} + +} // namespace PyImath + +#endif // _PyImathColor4ArrayImpl_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathDecorators.h b/Sources/MetaPy/include/python/PyImath/PyImathDecorators.h new file mode 100644 index 00000000..3eb6ab70 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathDecorators.h @@ -0,0 +1,48 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef INCLUDED_PYIMATH_DECORATORS_H +#define INCLUDED_PYIMATH_DECORATORS_H + +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include + +namespace PyImath +{ + +// These function add __copy__ and __deepcopy__ methods +// to python classes by simply wrapping the copy constructors +// This interface is needed for using these classes with +// the python copy module. + +template +static T +copy(const T& x) +{ + return T(x); +} + +template +static T +deepcopy(const T& x, boost::python::dict&) +{ + return copy(x); +} + +template +boost::python::class_& +decoratecopy(boost::python::class_& cls) +{ + cls.def("__copy__",©); + cls.def("__deepcopy__",&deepcopy); + return cls; +} + +} // namespace PyImath + +#endif // INCLUDED_PYIMATH_DECORATORS_H + diff --git a/Sources/MetaPy/include/python/PyImath/PyImathEuler.h b/Sources/MetaPy/include/python/PyImath/PyImathEuler.h new file mode 100644 index 00000000..34064d2c --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathEuler.h @@ -0,0 +1,85 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathEuler_h_ +#define _PyImathEuler_h_ + +#include +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include "PyImath.h" + +namespace PyImath { + +template boost::python::class_,boost::python::bases > > register_Euler(); +template boost::python::class_ > > register_EulerArray(); +typedef FixedArray EulerfArray; +typedef FixedArray EulerdArray; + +// + +// Other code in the Zeno code base assumes the existance of a class with the +// same name as the Imath class, and with static functions wrap() and +// convert() to produce a PyImath object from an Imath object and vice-versa, +// respectively. The class Boost generates from the Imath class does not +// have these properties, so we define a companion class here. +// The template argument, T, is the element type for the axis vector +// (e.g.,float, double). + +template +class E { + public: + static PyObject * wrap (const IMATH_NAMESPACE::Euler &e); + static int convert (PyObject *p, IMATH_NAMESPACE::Euler *v); +}; + +template +PyObject * +E::wrap (const IMATH_NAMESPACE::Euler &e) +{ + typename boost::python::return_by_value::apply < IMATH_NAMESPACE::Euler >::type converter; + PyObject *p = converter (e); + return p; +} + +template +int +E::convert (PyObject *p, IMATH_NAMESPACE::Euler *v) +{ + boost::python::extract extractorEf (p); + if (extractorEf.check()) + { + IMATH_NAMESPACE::Eulerf e = extractorEf(); + v->x = T(e.x); + v->y = T(e.y); + v->z = T(e.z); + v->setOrder (typename IMATH_NAMESPACE::Euler::Order (e.order())); + return 1; + } + + boost::python::extract extractorEd (p); + if (extractorEd.check()) + { + IMATH_NAMESPACE::Eulerd e = extractorEd(); + v->x = T(e.x); + v->y = T(e.y); + v->z = T(e.z); + v->setOrder (typename IMATH_NAMESPACE::Euler::Order (e.order())); + return 1; + } + + return 0; +} + +typedef E Eulerf; +typedef E Eulerd; + +} + +#endif diff --git a/Sources/MetaPy/include/python/PyImath/PyImathExport.h b/Sources/MetaPy/include/python/PyImath/PyImathExport.h new file mode 100644 index 00000000..20312f84 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathExport.h @@ -0,0 +1,28 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef PYIMATHEXPORT_H +#define PYIMATHEXPORT_H + +#if defined(IMATH_DLL) + #if defined(PLATFORM_VISIBILITY_AVAILABLE) + #define PYIMATH_EXPORT __attribute__((visibility("default"))) + #define PYIMATH_EXPORT __attribute__((visibility("default"))) + #elif defined(_MSC_VER) + #if defined(PYIMATH_BUILD) + #define PYIMATH_EXPORT __declspec(dllexport) + #else + #define PYIMATH_EXPORT __declspec(dllimport) + #endif + #else + #define PYIMATH_EXPORT + #endif +#else + #define PYIMATH_EXPORT +#endif + +#endif // #ifndef PYIMATHEXPORT_H diff --git a/Sources/MetaPy/include/python/PyImath/PyImathFixedArray.h b/Sources/MetaPy/include/python/PyImath/PyImathFixedArray.h new file mode 100644 index 00000000..64283980 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathFixedArray.h @@ -0,0 +1,854 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathFixedArray_h_ +#define _PyImathFixedArray_h_ + +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include "PyImathUtil.h" + +// +// Note: when PyImath from the v2 release of OpenEXR depended on Iex, +// the PY_IMATH_LEAVE/RETURN_PYTHON macros bracketed calls that +// enabled/disabled float-point exceptions via via the MathExcOn +// class. This was a compile-time option based on the setting of +// PYIMATH_ENABLE_EXCEPTIONS. This behavior is now deprecated, hence +// the empty macros. +// + +#define PY_IMATH_LEAVE_PYTHON PyImath::PyReleaseLock pyunlock; +#define PY_IMATH_RETURN_PYTHON + +namespace PyImath { + +namespace { + +// +// Utility classes used for converting array members to boost python objects. +// + +template +struct ReturnReference +{ + static boost::python::object applyReadOnly (const T& val) + { + typename boost::python::copy_const_reference::apply::type converter; + return boost::python::object(boost::python::handle<>(converter(val))); + } + + static boost::python::object applyWritable (T& val) + { + typename boost::python::reference_existing_object::apply::type converter; + return boost::python::object(boost::python::handle<>(converter(val))); + } + + static bool isReferenceWrap () { return true; } +}; + +template +struct ReturnByValue +{ + static boost::python::object applyReadOnly (const T& val) + { + typename boost::python::return_by_value::apply::type converter; + return boost::python::object(boost::python::handle<>(converter(val))); + } + + static boost::python::object applyWritable (T& val) + { + return applyReadOnly (val); + } + + static bool isReferenceWrap () { return false; } +}; + +} // namespace + +// +// Utility class for a runtime-specified fixed length array type in python +// +template +struct FixedArrayDefaultValue +{ + static T value(); +}; + +enum Uninitialized {UNINITIALIZED}; + +template +class FixedArray +{ + T * _ptr; + size_t _length; + size_t _stride; + bool _writable; + + // this handle optionally stores a shared_array to allocated array data + // so that everything is freed properly on exit. + boost::any _handle; + + boost::shared_array _indices; // non-NULL iff I'm a masked reference + size_t _unmaskedLength; + + + public: + typedef T BaseType; + + FixedArray(T *ptr, Py_ssize_t length, Py_ssize_t stride = 1, bool writable = true) + : _ptr(ptr), _length(length), _stride(stride), _writable(writable), + _handle(), _unmaskedLength(0) + { + if (length < 0) + { + throw std::domain_error ("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::domain_error ("Fixed array stride must be positive"); + } + // nothing + } + + FixedArray(T *ptr, Py_ssize_t length, Py_ssize_t stride, + boost::any handle, bool writable = true) + : _ptr(ptr), _length(length), _stride(stride), _writable(writable), + _handle(handle), _unmaskedLength(0) + { + if (_length < 0) + { + throw std::domain_error("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::domain_error("Fixed array stride must be positive"); + } + // nothing + } + + FixedArray(const T *ptr, Py_ssize_t length, Py_ssize_t stride = 1) + : _ptr(const_cast(ptr)), _length(length), _stride(stride), + _writable(false), _handle(), _unmaskedLength(0) + { + if (length < 0) + { + throw std::logic_error("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::logic_error("Fixed array stride must be positive"); + } + // nothing + } + + FixedArray(const T *ptr, Py_ssize_t length, Py_ssize_t stride, boost::any handle) + : _ptr(const_cast(ptr)), _length(length), _stride(stride), _writable(false), + _handle(handle), _unmaskedLength(0) + { + if (_length < 0) + { + throw std::logic_error("Fixed array length must be non-negative"); + } + if (stride <= 0) + { + throw std::logic_error("Fixed array stride must be positive"); + } + // nothing + } + + explicit FixedArray(Py_ssize_t length) + : _ptr(0), _length(length), _stride(1), _writable(true), + _handle(), _unmaskedLength(0) + { + if (_length < 0) { + throw std::domain_error("Fixed array length must be non-negative"); + } + boost::shared_array a(new T[length]); + T tmp = FixedArrayDefaultValue::value(); + for (Py_ssize_t i=0; i a(new T[length]); + _handle = a; + _ptr = a.get(); + } + + FixedArray(const T &initialValue, Py_ssize_t length) + : _ptr(0), _length(length), _stride(1), _writable(true), + _handle(), _unmaskedLength(0) + { + if (_length < 0) { + throw std::domain_error("Fixed array length must be non-negative"); + } + boost::shared_array a(new T[length]); + for (Py_ssize_t i=0; i + FixedArray(FixedArray& f, const MaskArrayType& mask) + : _ptr(f._ptr), _stride(f._stride), _writable(f._writable), _handle(f._handle), _unmaskedLength(0) + { + if (f.isMaskedReference()) + { + throw std::invalid_argument("Masking an already-masked FixedArray not supported yet (SQ27000)"); + } + + size_t len = f.match_dimension(mask); + _unmaskedLength = len; + + size_t reduced_len = 0; + for (size_t i = 0; i < len; ++i) + if (mask[i]) + reduced_len++; + + _indices.reset(new size_t[reduced_len]); + + for (size_t i = 0, j = 0; i < len; ++i) + { + if (mask[i]) + { + _indices[j] = i; + j++; + } + } + + _length = reduced_len; + } + + template + FixedArray(const FixedArray& f, const MaskArrayType& mask) + : _ptr(f._ptr), _stride(f._stride), _writable(false), _handle(f._handle), _unmaskedLength(0) + { + if (f.isMaskedReference()) + { + throw std::invalid_argument("Masking an already-masked FixedArray not supported yet (SQ27000)"); + } + + size_t len = f.match_dimension(mask); + _unmaskedLength = len; + + size_t reduced_len = 0; + for (size_t i = 0; i < len; ++i) + if (mask[i]) + reduced_len++; + + _indices.reset(new size_t[reduced_len]); + + for (size_t i = 0, j = 0; i < len; ++i) + { + if (mask[i]) + { + _indices[j] = i; + j++; + } + } + + _length = reduced_len; + } + + template + explicit FixedArray(const FixedArray &other) + : _ptr(0), _length(other.len()), _stride(1), _writable(true), + _handle(), _unmaskedLength(other.unmaskedLength()) + { + boost::shared_array a(new T[_length]); + for (size_t i=0; i<_length; ++i) a[i] = T(other[i]); + _handle = a; + _ptr = a.get(); + + if (_unmaskedLength) + { + _indices.reset(new size_t[_length]); + + for (size_t i = 0; i < _length; ++i) + _indices[i] = other.raw_ptr_index(i); + } + } + + FixedArray(const FixedArray &other) + : _ptr(other._ptr), _length(other._length), _stride(other._stride), + _writable(other._writable), + _handle(other._handle), + _indices(other._indices), + _unmaskedLength(other._unmaskedLength) + { + } + + const FixedArray & + operator = (const FixedArray &other) + { + if (&other == this) return *this; + + _ptr = other._ptr; + _length = other._length; + _stride = other._stride; + _writable = other._writable; + _handle = other._handle; + _unmaskedLength = other._unmaskedLength; + _indices = other._indices; + + return *this; + } + + ~FixedArray() + { + // nothing + } + + explicit operator bool() const {return _ptr != nullptr;} + + const boost::any & handle() { return _handle; } + + // + // Make an index suitable for indexing into an array in c++ from + // a python index, which can be negative for indexing relative to + // the end of an array + // + size_t canonical_index(Py_ssize_t index) const + { + if (index < 0) index += len(); + if (index >= len() || index < 0) { + PyErr_SetString(PyExc_IndexError, "Index out of range"); + boost::python::throw_error_already_set(); + } + return index; // still a virtual index if this is a masked reference array + } + + void extract_slice_indices(PyObject *index, size_t &start, size_t &end, Py_ssize_t &step, size_t &slicelength) const + { + if (PySlice_Check(index)) { +#if PY_MAJOR_VERSION > 2 + PyObject *slice = index; +#else + PySliceObject *slice = reinterpret_cast(index); +#endif + Py_ssize_t s,e,sl; + if (PySlice_GetIndicesEx(slice,_length,&s,&e,&step,&sl) == -1) { + boost::python::throw_error_already_set(); + } + // e can be -1 if the iteration is backwards with a negative slice operator [::-n] (n > 0). + if (s < 0 || e < -1 || sl < 0) { + throw std::domain_error("Slice extraction produced invalid start, end, or length indices"); + } + start = s; + end = e; + slicelength = sl; + } else if (PyInt_Check(index)) { + size_t i = canonical_index(PyInt_AsSsize_t(index)); + start = i; end = i+1; step = 1; slicelength = 1; + } else { + PyErr_SetString(PyExc_TypeError, "Object is not a slice"); + boost::python::throw_error_already_set(); + } + } + + // Although this method isn't used directly by this class, + // there are some sub-classes that are using it. + typedef typename boost::mpl::if_, T&,T>::type get_type; + get_type getitem(Py_ssize_t index) { return (*this)[canonical_index(index)]; } + typedef typename boost::mpl::if_,const T&,T>::type get_type_const; + get_type_const getitem(Py_ssize_t index) const { return (*this)[canonical_index(index)]; } + + // We return an internal reference for class-types and a copy of the data + // for non-class types. Returning an internal refeference doesn't seem + // to work with non-class types. + + boost::python::object getobjectTuple (Py_ssize_t index) + { + typedef typename boost::mpl::if_, + ReturnReference, + ReturnByValue >::type convertType; + + boost::python::object retval; + int referenceMode = 0; + + const size_t i = canonical_index(index); + T& val = _ptr[(isMaskedReference() ? raw_ptr_index(i) : i) * _stride]; + + if (_writable) + { + retval = convertType::applyWritable (val); + + if (convertType::isReferenceWrap()) + referenceMode = 0; // Managed reference. + else + referenceMode = 2; // Default policy (return-by-value) + } + else + { + retval = convertType::applyReadOnly (val); + + if (convertType::isReferenceWrap()) + referenceMode = 1; // Copy const reference + else + referenceMode = 2; // Default policy (return-by-value) + } + + return boost::python::make_tuple (referenceMode, retval); + } + + boost::python::object getobjectTuple (Py_ssize_t index) const + { + typedef typename boost::mpl::if_, + ReturnReference, + ReturnByValue >::type convertType; + + boost::python::object retval; + int referenceMode = 1; + + const size_t i = canonical_index(index); + const T& val = _ptr[(isMaskedReference() ? raw_ptr_index(i) : i) * _stride]; + + retval = convertType::applyReadOnly (val); + + if (convertType::isReferenceWrap()) + referenceMode = 1; // Copy const reference + else + referenceMode = 2; // Default policy (return-by-value) + + return boost::python::make_tuple (referenceMode, retval); + } + + FixedArray getslice(::PyObject *index) const + { + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength); + FixedArray f(slicelength); + + if (isMaskedReference()) + { + for (size_t i=0; i + FixedArray getslice_mask(const MaskArrayType& mask) + { + // 'writable' state is preserved in the returned fixed-array. + FixedArray f(*this, mask); + return f; + } + + void + setitem_scalar(PyObject *index, const T &data) + { + if (!_writable) + throw std::invalid_argument("Fixed array is read-only."); + + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength); + + if (isMaskedReference()) + { + for (size_t i=0; i + void + setitem_scalar_mask(const MaskArrayType &mask, const T &data) + { + if (!_writable) + throw std::invalid_argument("Fixed array is read-only."); + + size_t len = match_dimension(mask, false); + + if (isMaskedReference()) + { + for (size_t i = 0; i < len; ++i) + _ptr[raw_ptr_index(i)*_stride] = data; + } + else + { + for (size_t i=0; i + void + setitem_vector(::PyObject *index, const ArrayType &data) + { + if (!_writable) + throw std::invalid_argument("Fixed array is read-only."); + + size_t start=0, end=0, slicelength=0; + Py_ssize_t step; + extract_slice_indices(index,start,end,step,slicelength); + + // we have a valid range of indices + if ((size_t)data.len() != slicelength) { + PyErr_SetString(PyExc_IndexError, "Dimensions of source do not match destination"); + boost::python::throw_error_already_set(); + } + + if (isMaskedReference()) + { + for (size_t i=0; i + void + setitem_vector_mask(const MaskArrayType &mask, const ArrayType &data) + { + if (!_writable) + throw std::invalid_argument("Fixed array is read-only."); + + // We could relax this but this restriction if there's a good + // enough reason too. + + if (isMaskedReference()) + { + throw std::invalid_argument("We don't support setting item masks for masked reference arrays."); + } + + size_t len = match_dimension(mask); + if ((size_t)data.len() == len) + { + for (size_t i = 0; i < len; ++i) + if (mask[i]) _ptr[i*_stride] = data[i]; + } + else + { + Py_ssize_t count = 0; + for (size_t i = 0; i < len; ++i) + if (mask[i]) count++; + + if (data.len() != count) { + throw std::invalid_argument("Dimensions of source data do not match destination either masked or unmasked"); + } + + Py_ssize_t dataIndex = 0; + for (size_t i = 0; i < len; ++i) + { + if (mask[i]) + { + _ptr[i*_stride] = data[dataIndex]; + dataIndex++; + } + } + } + } + + // exposed as Py_ssize_t for compatilbity with standard python sequences + Py_ssize_t len() const { return _length; } + size_t stride() const { return _stride; } + bool writable() const { return _writable; } + + // This method is mainly here for use in confidence tests, but there may + // be other use-cases where a writable array needs to be made read-only. + // Note that we do not provide a 'makeWritable' method here, because that + // type of operation shouldn't be allowed. + void makeReadOnly() { _writable = false; } + + // no bounds checking on i! + T& operator [] (size_t i) + { + if (!_writable) + throw std::invalid_argument("Fixed array is read-only."); + + return _ptr[(isMaskedReference() ? raw_ptr_index(i) : i) * _stride]; + } + + // no bounds checking on i! + const T& operator [] (size_t i) const + { + return _ptr[(isMaskedReference() ? raw_ptr_index(i) : i) * _stride]; + } + + // no mask conversion or bounds checking on i! + T& direct_index(size_t i) + { + if (!_writable) + throw std::invalid_argument("Fixed array is read-only."); + + return _ptr[i*_stride]; + } + + // no mask conversion or bounds checking on i! + const T& direct_index (size_t i) const + { + return _ptr[i*_stride]; + } + + // In some cases, an access to the raw data without the 'writable' check + // is needed. Generally in specialized python-wrapping helpers. + T& unchecked_index (size_t i) + { + return _ptr[(isMaskedReference() ? raw_ptr_index(i) : i) * _stride]; + } + + T& unchecked_direct_index (size_t i) + { + return _ptr[i*_stride]; + } + + bool isMaskedReference() const {return _indices.get() != 0;} + size_t unmaskedLength() const {return _unmaskedLength;} + + // Conversion of indices to raw pointer indices. + // This should only be called when this is a masked reference. + // No safety checks done for performance. + size_t raw_ptr_index(size_t i) const + { + assert(isMaskedReference()); + assert(i < _length); + assert(_indices[i] >= 0 && _indices[i] < _unmaskedLength); + return _indices[i]; + } + + static boost::python::class_ > register_(const char *doc) + { + // Depending on the data-type (class or fundamental) and the writable + // state of the array, different forms are returned by the '__getitem__' + // method. If writable and a class, an internal reference to the data + // is returned so that its value can be changed. If not-writable or a + // fundemental data type (float, int, etc.), then a 'copy' of the data + // is returned. + + typename boost::python::object (FixedArray::*nonconst_getobject)(Py_ssize_t) = + &FixedArray::getobjectTuple; + typename boost::python::object (FixedArray:: *const_getobject)(Py_ssize_t) const = + &FixedArray::getobjectTuple; + + boost::python::class_ > c(name(),doc, boost::python::init("construct an array of the specified length initialized to the default value for the type")); + c + .def(boost::python::init &>("construct an array with the same values as the given array")) + .def(boost::python::init("construct an array of the specified length initialized to the specified default value")) + .def("__getitem__", &FixedArray::getslice) + .def("__getitem__", &FixedArray::getslice_mask > ) + .def("__getitem__", const_getobject, + selectable_postcall_policy_from_tuple< + boost::python::with_custodian_and_ward_postcall<0,1>, + boost::python::return_value_policy, + boost::python::default_call_policies>()) + .def("__getitem__", nonconst_getobject, + selectable_postcall_policy_from_tuple< + boost::python::with_custodian_and_ward_postcall<0,1>, + boost::python::return_value_policy, + boost::python::default_call_policies>()) + .def("__setitem__", &FixedArray::setitem_scalar) + .def("__setitem__", &FixedArray::setitem_scalar_mask >) + .def("__setitem__", &FixedArray::setitem_vector >) + .def("__setitem__", &FixedArray::setitem_vector_mask, FixedArray >) + .def("__len__",&FixedArray::len) + .def("writable",&FixedArray::writable) + .def("makeReadOnly", &FixedArray::makeReadOnly) + .def("ifelse",&FixedArray::ifelse_scalar) + .def("ifelse",&FixedArray::ifelse_vector) + ; + return c; + } + + template + size_t match_dimension(const ArrayType &a1, bool strictComparison = true) const + { + if (len() == a1.len()) + return len(); + + bool throwExc = false; + if (strictComparison) + throwExc = true; + else if (isMaskedReference()) + { + if (static_cast(_unmaskedLength) != a1.len()) + throwExc = true; + } + else + throwExc = true; + + if (throwExc) + { + throw std::invalid_argument("Dimensions of source do not match destination"); + } + + return len(); + } + + FixedArray ifelse_vector(const FixedArray &choice, const FixedArray &other) { + size_t len = match_dimension(choice); + match_dimension(other); + FixedArray tmp(len); // should use default construction but V3f doens't initialize + for (size_t i=0; i < len; ++i) tmp[i] = choice[i] ? (*this)[i] : other[i]; + return tmp; + } + + FixedArray ifelse_scalar(const FixedArray &choice, const T &other) { + size_t len = match_dimension(choice); + FixedArray tmp(len); // should use default construction but V3f doens't initialize + for (size_t i=0; i < len; ++i) tmp[i] = choice[i] ? (*this)[i] : other; + return tmp; + } + + // Instantiations of fixed ararys must implement this static member + static const char *name(); + + // Various 'Accessor' classes used in performance-critical areas while also + // managing the writable/read-only state efficiently. + + class ReadOnlyDirectAccess + { + public: + ReadOnlyDirectAccess (const FixedArray& array) + : _ptr (array._ptr), _stride (array._stride) + { + if (array.isMaskedReference()) + throw std::invalid_argument ("Fixed array is masked. ReadOnlyDirectAccess not granted."); + } + + ReadOnlyDirectAccess (const ReadOnlyDirectAccess& other) + : _ptr (other._ptr), _stride (other._stride) {} + + const T& operator[] (size_t i) const { return _ptr[i*_stride]; } + + private: + const T* _ptr; + + protected: + const size_t _stride; + }; + + class WritableDirectAccess : public ReadOnlyDirectAccess + { + public: + WritableDirectAccess (FixedArray& array) + : ReadOnlyDirectAccess (array), _ptr (array._ptr) + { + if (!array.writable()) + throw std::invalid_argument ("Fixed array is read-only. WritableDirectAccess not granted."); + } + + WritableDirectAccess (const WritableDirectAccess& other) + : ReadOnlyDirectAccess (other), _ptr (other._ptr) {} + + T& operator[] (size_t i) { return _ptr[i*_stride]; } + + private: + T* _ptr; + + using ReadOnlyDirectAccess::_stride; + }; + + // + + class ReadOnlyMaskedAccess + { + public: + ReadOnlyMaskedAccess (const FixedArray& array) + : _ptr (array._ptr), _stride (array._stride), + _indices (array._indices) + { + if (!array.isMaskedReference()) + throw std::invalid_argument ("Fixed array is not masked. ReadOnlyMaskedAccess not granted."); + } + + ReadOnlyMaskedAccess (const ReadOnlyMaskedAccess& other) + : _ptr (other._ptr), _stride (other._stride), + _indices (other._indices) {} + + // No index-range check here. + const T& operator[] (size_t i) const { return _ptr[_indices[i]*_stride]; } + + private: + const T* _ptr; + + protected: + const size_t _stride; + boost::shared_array _indices; + }; + + class WritableMaskedAccess : public ReadOnlyMaskedAccess + { + public: + WritableMaskedAccess (FixedArray& array) + : ReadOnlyMaskedAccess (array), _ptr (array._ptr) + { + if (!array.writable()) + std::invalid_argument ("Fixed array is read-only. WritableMaskedAccess not granted."); + } + + WritableMaskedAccess (const WritableMaskedAccess& other) + : ReadOnlyMaskedAccess (other), _ptr (other._ptr) {} + + // No index-range check here. + T& operator[] (size_t i) { return _ptr[_indices[i]*_stride]; } + + private: + T* _ptr; + + using ReadOnlyMaskedAccess::_stride; + using ReadOnlyMaskedAccess::_indices; + }; + +}; + +// +// Helper struct for arary indexing with a known compile time length +// +template +struct IndexAccessDefault { + typedef Data & result_type; + static Data & apply(Container &c, size_t i) { return c[i]; } +}; + +template > +struct StaticFixedArray +{ + static Py_ssize_t len(const Container &) { return Length; } + static typename IndexAccess::result_type getitem(Container &c, Py_ssize_t index) { return IndexAccess::apply(c,canonical_index(index)); } + static void setitem(Container &c, Py_ssize_t index, const Data &data) { IndexAccess::apply(c,canonical_index(index)) = data; } + static size_t canonical_index(Py_ssize_t index) + { + if (index < 0) index += Length; + if (index < 0 || index >= Length) { + PyErr_SetString(PyExc_IndexError, "Index out of range"); + boost::python::throw_error_already_set(); + } + return index; + } +}; + +} + +#endif // _PyImathFixedArray_h_ diff --git a/Sources/MetaPy/include/python/PyImath/PyImathFixedArray2D.h b/Sources/MetaPy/include/python/PyImath/PyImathFixedArray2D.h new file mode 100644 index 00000000..4f49eb84 --- /dev/null +++ b/Sources/MetaPy/include/python/PyImath/PyImathFixedArray2D.h @@ -0,0 +1,779 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// Copyright Contributors to the OpenEXR Project. +// + +// clang-format off + +#ifndef _PyImathFixedArray2D_h_ +#define _PyImathFixedArray2D_h_ + +#define BOOST_BIND_GLOBAL_PLACEHOLDERS +#include +#include +#include +#include +#include +#include +#include "PyImathFixedArray.h" +#include "PyImathOperators.h" + +namespace PyImath { + +template +class FixedArray2D +{ + T * _ptr; + IMATH_NAMESPACE::Vec2 _length; + IMATH_NAMESPACE::Vec2 _stride; + size_t _size; //flattened size of the array + + // this handle optionally stores a shared_array to allocated array data + // so that everything is freed properly on exit. + boost::any _handle; + + public: + + FixedArray2D(T *ptr, Py_ssize_t lengthX, Py_ssize_t lengthY, Py_ssize_t strideX = 1) + : _ptr(ptr), _length(lengthX, lengthY), _stride(strideX, lengthX), _handle() + { + if (lengthX < 0 || lengthY < 0) + throw std::domain_error("Fixed array 2d lengths must be non-negative"); + if (strideX <= 0) + throw std::domain_error("Fixed array 2d strides must be positive"); + initializeSize(); + //std::cout << "fixed array external construct" << std::endl; + // nothing + } + + FixedArray2D(T *ptr, Py_ssize_t lengthX, Py_ssize_t lengthY, Py_ssize_t strideX, Py_ssize_t strideY) + : _ptr(ptr), _length(lengthX, lengthY), _stride(strideX, strideY), _handle() + { + if (lengthX < 0 || lengthY < 0) + throw std::domain_error("Fixed array 2d lengths must be non-negative"); + if (strideX <= 0 || strideY < 0) + throw std::domain_error("Fixed array 2d strides must be positive"); + initializeSize(); + //std::cout << "fixed array external construct" << std::endl; + // nothing + } + + FixedArray2D(T *ptr, Py_ssize_t lengthX, Py_ssize_t lengthY, Py_ssize_t strideX, Py_ssize_t strideY, boost::any handle) + : _ptr(ptr), _length(lengthX, lengthY), _stride(strideX, strideY), _handle(handle) + { + initializeSize(); + //std::cout << "fixed array external construct with handle" << std::endl; + // nothing + } + + explicit FixedArray2D(Py_ssize_t lengthX, Py_ssize_t lengthY) + : _ptr(0), _length(lengthX, lengthY), _stride(1, lengthX), _handle() + { + if (lengthX < 0 || lengthY < 0) + throw std::domain_error("Fixed array 2d lengths must be non-negative"); + initializeSize(); + T tmp = FixedArrayDefaultValue::value(); + boost::shared_array a(new T[_size]); + for (size_t i=0; i<_size; ++i) a[i] = tmp; + _handle = a; + _ptr = a.get(); + } + + explicit FixedArray2D(const IMATH_NAMESPACE::V2i& length) + : _ptr(0), _length(length), _stride(1, length.x), _handle() + { + if (length.x < 0 || length.y < 0) + throw std::domain_error("Fixed array 2d lengths must be non-negative"); + initializeSize(); + T tmp = FixedArrayDefaultValue::value(); + boost::shared_array a(new T[_size]); + for (size_t i=0; i<_size; ++i) a[i] = tmp; + _handle = a; + _ptr = a.get(); + } + + FixedArray2D(const T &initialValue, Py_ssize_t lengthX, Py_ssize_t lengthY) + : _ptr(0), _length(lengthX, lengthY), _stride(1, lengthX), _handle() + { + if (lengthX < 0 || lengthY < 0) + throw std::domain_error("Fixed array 2d lengths must be non-negative"); + initializeSize(); + boost::shared_array a(new T[_size]); + for (size_t i=0; i<_size; ++i) a[i] = initialValue; + _handle = a; + _ptr = a.get(); + } + void initializeSize() + { + _size = _length.x*_length.y; + } + + template + explicit FixedArray2D(const FixedArray2D &other) + : _ptr(0), _length(other.len()), _stride(1, other.len().x), _handle() + { + initializeSize(); + boost::shared_array a(new T[_size]); + size_t z = 0; + for (size_t j = 0; j < _length.y; ++j) + for (size_t i = 0; i < _length.x; ++i) + a[z++] = T(other(i,j)); + _handle = a; + _ptr = a.get(); + } + + FixedArray2D(const FixedArray2D &other) + : _ptr(other._ptr), _length(other._length), _stride(other._stride), _size(other._size), _handle(other._handle) + { + //std::cout << "fixed array copy consturct construct" << std::endl; + // nothing + } + + const FixedArray2D & + operator = (const FixedArray2D &other) + { + if (&other == this) return *this; + + //std::cout << "fixed array assign" << std::endl; + + _ptr = other._ptr; + _length = other._length; + _stride = other._stride; + _handle = other._handle; + + _size = _length.x*_length.y; + + return *this; + } + + ~FixedArray2D() + { + //std::cout << "fixed array delete" << std::endl; + } + + const boost::any & handle() { return _handle; } + + size_t canonical_index(Py_ssize_t index, size_t length) const + { + if (index < 0) index += length; + if ((size_t) index >= length || index < 0) { + PyErr_SetString(PyExc_IndexError, "Index out of range"); + boost::python::throw_error_already_set(); + } + return index; + } + + void extract_slice_indices(PyObject *index, size_t length, size_t &start, size_t &end, Py_ssize_t &step, size_t &slicelength) const + { + if (PySlice_Check(index)) { +#if PY_MAJOR_VERSION > 2 + PyObject *slice = index; +#else + PySliceObject *slice = reinterpret_cast(index); +#endif + Py_ssize_t s, e, sl; + if (PySlice_GetIndicesEx(slice,length,&s,&e,&step,&sl) == -1) { + boost::python::throw_error_already_set(); + } + if (s < 0 || e < 0 || sl < 0) { + throw std::domain_error("Slice extraction produced invalid start, end, or length indices"); + } + start = s; + end = e; + slicelength = sl; + } else if (PyInt_Check(index)) { + size_t i = canonical_index(PyInt_AsSsize_t(index), length); + start = i; end = i+1; step = 1; slicelength = 1; + } else { + PyErr_SetString(PyExc_TypeError, "Object is not a slice"); + boost::python::throw_error_already_set(); + } + //std::cout << "Slice indices are " << start << " " << end << " " << step << " " << slicelength << std::endl; + } + + // return_internal_reference doesn't seem to work with non-class types + typedef typename boost::mpl::if_,T&,T>::type get_type; +// get_type getitem(Py_ssize_t index) const { return _ptr[canonical_index(index)*_stride]; } + //FIXME: const does not work here with at least IMATH_NAMESPACE::Color4, why it works for V3fArray? + get_type getitem(Py_ssize_t i, Py_ssize_t j) //const + { + return (*this)(canonical_index(i, _length.x), canonical_index(j, _length.y)); + } + + //FIXME: anyway to seperate 2:3,4:5 from 2,4? we'd like to return int for the second one, and also 1d array for 2, 4:5 or 2:3, 4 + FixedArray2D getslice(PyObject *index) const + { + if (PyTuple_Check(index) && PyTuple_Size(index) == 2) + { + size_t startx=0, endx=0, slicelengthx=0; + size_t starty=0, endy=0, slicelengthy=0; + Py_ssize_t stepx=0; + Py_ssize_t stepy=0; + extract_slice_indices(PyTuple_GetItem(index, 0),_length.x,startx,endx,stepx,slicelengthx); + extract_slice_indices(PyTuple_GetItem(index, 1),_length.y,starty,endy,stepy,slicelengthy); + FixedArray2D f(slicelengthx, slicelengthy); + for (size_t j=0,z=0; j &mask) const + { +// size_t len = match_dimension(mask); +// size_t slicelength = 0; +// for (size_t i=0; i len = match_dimension(mask); + FixedArray2D f(len); + for (size_t j=0; j(index[0]); +// Py_ssize_t j = boost::python::extract(index[1]); +// (*this)(i,j) = data; +// } + void + setitem_scalar(PyObject *index, const T &data) + { + if (!PyTuple_Check(index) || PyTuple_Size(index) != 2) + { + PyErr_SetString(PyExc_TypeError, "Slice syntax error"); + boost::python::throw_error_already_set(); + } + + size_t startx=0, endx=0, slicelengthx=0; + size_t starty=0, endy=0, slicelengthy=0; + Py_ssize_t stepx=0; + Py_ssize_t stepy=0; + extract_slice_indices(PyTuple_GetItem(index, 0),_length.x,startx,endx,stepx,slicelengthx); + extract_slice_indices(PyTuple_GetItem(index, 1),_length.y,starty,endy,stepy,slicelengthy); + for (size_t j=0; j &mask, const T &data) + { + IMATH_NAMESPACE::Vec2 len = match_dimension(mask); + for (size_t j = 0; j < len.y; j++) + for (size_t i=0; i(slicelengthx, slicelengthy)) { + PyErr_SetString(PyExc_IndexError, "Dimensions of source do not match destination"); + boost::python::throw_error_already_set(); + } + for (size_t i=0; i &mask, const FixedArray2D &data) + { + IMATH_NAMESPACE::Vec2 len = match_dimension(mask); + if (data.len() == len) { + for (size_t j = 0; j < len.y; j++) + for (size_t i=0; i &mask, const FixedArray &data) + { + IMATH_NAMESPACE::Vec2 len = match_dimension(mask); + if ((size_t) data.len() == len.x*len.y) { + for (size_t j = 0, z = 0; j < len.y; j++) + for (size_t i=0; i &data) + { + //TODO:sanity check + size_t startx=0, endx=0, slicelengthx=0; + size_t starty=0, endy=0, slicelengthy=0; + Py_ssize_t stepx=0; + Py_ssize_t stepy=0; + extract_slice_indices(PyTuple_GetItem(index, 0),_length.x,startx,endx,stepx,slicelengthx); + extract_slice_indices(PyTuple_GetItem(index, 1),_length.y,starty,endy,stepy,slicelengthy); + // we have a valid range of indices + if ((size_t) data.len() != slicelengthx*slicelengthy) { + PyErr_SetString(PyExc_IndexError, "Dimensions of source data do not match destination"); + boost::python::throw_error_already_set(); + } + for (size_t j=0, z=0; j len() const { return _length; } + IMATH_NAMESPACE::Vec2 stride() const { return _stride; } + T & operator () (size_t i, size_t j) { return _ptr[_stride.x*(j*_stride.y + i)]; } + const T & operator () (size_t i, size_t j) const { return _ptr[_stride.x*(j*_stride.y + i)]; } + size_t totalLen() const { return _size; } + boost::python::tuple size() const + { + return boost::python::make_tuple(_length.x, _length.y); + } + + static boost::python::class_ > register_(const char *name, const char *doc) + { + // a little tricky, but here we go - class types return internal references + // but fundemental types just get copied. this typedef sets up the appropriate + // call policy for each type. + typedef typename boost::mpl::if_< + boost::is_class, + boost::python::return_internal_reference<>, + boost::python::default_call_policies>::type call_policy; + + boost::python::class_ > c(name,doc, boost::python::init( + "construct an array of the specified length initialized to the default value for the type")); + c + .def(boost::python::init &>("construct an array with the same values as the given array")) + .def(boost::python::init("construct an array of the specified length initialized to the specified default value")) + .def("__getitem__", &FixedArray2D::getslice) + .def("__getitem__", &FixedArray2D::getslice_mask) +// .def("__getitem__", &FixedArray2D::getitem, call_policy()) + .def("item", &FixedArray2D::getitem, call_policy()) +// .def("__setitem__", &FixedArray2D::setitem) + .def("__setitem__", &FixedArray2D::setitem_scalar) + .def("__setitem__", &FixedArray2D::setitem_scalar_mask) + .def("__setitem__", &FixedArray2D::setitem_vector) + .def("__setitem__", &FixedArray2D::setitem_vector_mask) + .def("__setitem__", &FixedArray2D::setitem_array1d) + .def("__setitem__", &FixedArray2D::setitem_array1d_mask) + .def("__len__",&FixedArray2D::totalLen) + .def("size",&FixedArray2D::size) + .def("ifelse",&FixedArray2D::ifelse_scalar) + .def("ifelse",&FixedArray2D::ifelse_vector) + ; + return c; + } + +// template +// size_t match_dimension(const FixedArray &a1) const +// { +// if (_length.x != a1.len()) { +// PyErr_SetString(PyExc_IndexError, "Dimensions of source do not match destination"); +// boost::python::throw_error_already_set(); +// } +// return _length.x; +// } + + template + IMATH_NAMESPACE::Vec2 match_dimension(const FixedArray2D &a1) const + { + if (len() != a1.len()) { + PyErr_SetString(PyExc_IndexError, "Dimensions of source do not match destination"); + boost::python::throw_error_already_set(); + } + return len(); + } + + FixedArray2D ifelse_vector(const FixedArray2D &choice, const FixedArray2D &other) { + IMATH_NAMESPACE::Vec2 len = match_dimension(choice); + match_dimension(other); + FixedArray2D tmp(len); // should use default construction but V3f doens't initialize + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + tmp(i,j) = choice(i,j) ? (*this)(i,j) : other(i,j); + return tmp; + } + + FixedArray2D ifelse_scalar(const FixedArray2D &choice, const T &other) { + IMATH_NAMESPACE::Vec2 len = match_dimension(choice); + FixedArray2D tmp(len); // should use default construction but V3f doens't initialize + for (size_t j = 0; j < len.y; ++j) + for (size_t i = 0; i < len.x; ++i) + tmp(i,j) = choice(i,j) ? (*this)(i,j) : other; + return tmp; + } + +}; + +// unary operation application +template