This should help reduce warning spew when building with newer compilers.
The pybind11::module type has been renamed pybind11::module_ to avoid
conflicts with c++20 modules, according to the pybind11 changelog, so
this CL also updates gem5 source to use the new type. There is
supposedly an alias pybind11::module which is for compatibility, but we
still get linker errors without changing to pybind11::module_.
Change-Id: I0acb36215b33e3a713866baec43f5af630c356ee
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/40255
Maintainer: Bobby R. Bruce <bbruce@ucdavis.edu>
Reviewed-by: Bobby R. Bruce <bbruce@ucdavis.edu>
Tested-by: kokoro <noreply+kokoro@google.com>
version: 1.0.{build}
image:
-- Visual Studio 2017
- Visual Studio 2015
test: off
skip_branch_with_pr: true
build:
parallel: true
platform:
-- x64
- x86
environment:
matrix:
- PYTHON: 36
- CPP: 14
CONFIG: Debug
- PYTHON: 27
- CPP: 14
CONFIG: Debug
- - CONDA: 36
- CPP: latest
- CONFIG: Release
-matrix:
- exclude:
- - image: Visual Studio 2015
- platform: x86
- - image: Visual Studio 2015
- CPP: latest
- - image: Visual Studio 2017
- CPP: latest
- platform: x86
install:
- ps: |
- if ($env:PLATFORM -eq "x64") { $env:CMAKE_ARCH = "x64" }
- if ($env:APPVEYOR_JOB_NAME -like "*Visual Studio 2017*") {
- $env:CMAKE_GENERATOR = "Visual Studio 15 2017"
- $env:CMAKE_INCLUDE_PATH = "C:\Libraries\boost_1_64_0"
- $env:CXXFLAGS = "-permissive-"
- } else {
- $env:CMAKE_GENERATOR = "Visual Studio 14 2015"
- }
- if ($env:PYTHON) {
- if ($env:PLATFORM -eq "x64") { $env:PYTHON = "$env:PYTHON-x64" }
- $env:PATH = "C:\Python$env:PYTHON\;C:\Python$env:PYTHON\Scripts\;$env:PATH"
- python -W ignore -m pip install --upgrade pip wheel
- python -W ignore -m pip install pytest numpy --no-warn-script-location
- } elseif ($env:CONDA) {
- if ($env:CONDA -eq "27") { $env:CONDA = "" }
- if ($env:PLATFORM -eq "x64") { $env:CONDA = "$env:CONDA-x64" }
- $env:PATH = "C:\Miniconda$env:CONDA\;C:\Miniconda$env:CONDA\Scripts\;$env:PATH"
- $env:PYTHONHOME = "C:\Miniconda$env:CONDA"
- conda --version
- conda install -y -q pytest numpy scipy
- }
+ $env:CMAKE_GENERATOR = "Visual Studio 14 2015"
+ if ($env:PLATFORM -eq "x64") { $env:PYTHON = "$env:PYTHON-x64" }
+ $env:PATH = "C:\Python$env:PYTHON\;C:\Python$env:PYTHON\Scripts\;$env:PATH"
+ python -W ignore -m pip install --upgrade pip wheel
+ python -W ignore -m pip install pytest numpy --no-warn-script-location pytest-timeout
- ps: |
- Start-FileDownload 'http://bitbucket.org/eigen/eigen/get/3.3.3.zip'
- 7z x 3.3.3.zip -y > $null
- $env:CMAKE_INCLUDE_PATH = "eigen-eigen-67e894c6cd8f;$env:CMAKE_INCLUDE_PATH"
+ Start-FileDownload 'https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.zip'
+ 7z x eigen-3.3.7.zip -y > $null
+ $env:CMAKE_INCLUDE_PATH = "eigen-3.3.7;$env:CMAKE_INCLUDE_PATH"
build_script:
- cmake -G "%CMAKE_GENERATOR%" -A "%CMAKE_ARCH%"
- -DPYBIND11_CPP_STANDARD=/std:c++%CPP%
+ -DCMAKE_CXX_STANDARD=14
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DCMAKE_SUPPRESS_REGENERATION=1
- set MSBuildLogger="C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll"
- cmake --build . --config %CONFIG% --target pytest -- /m /v:m /logger:%MSBuildLogger%
- cmake --build . --config %CONFIG% --target cpptest -- /m /v:m /logger:%MSBuildLogger%
-- if "%CPP%"=="latest" (cmake --build . --config %CONFIG% --target test_cmake_build -- /m /v:m /logger:%MSBuildLogger%)
on_failure: if exist "tests\test_cmake_build" type tests\test_cmake_build\*.log*
--- /dev/null
+---
+# See all possible options and defaults with:
+# clang-format --style=llvm --dump-config
+BasedOnStyle: LLVM
+AccessModifierOffset: -4
+AlignConsecutiveAssignments: true
+AlwaysBreakTemplateDeclarations: Yes
+BinPackArguments: false
+BinPackParameters: false
+BreakBeforeBinaryOperators: All
+BreakConstructorInitializers: BeforeColon
+ColumnLimit: 99
+IndentCaseLabels: true
+IndentPPDirectives: AfterHash
+IndentWidth: 4
+Language: Cpp
+SpaceAfterCStyleCast: true
+# SpaceInEmptyBlock: true # too new
+Standard: Cpp11
+TabWidth: 4
+...
--- /dev/null
+FormatStyle: file
+
+Checks: '
+llvm-namespace-comment,
+modernize-use-override,
+readability-container-size-empty,
+modernize-use-using,
+modernize-use-equals-default,
+modernize-use-auto,
+modernize-use-emplace,
+'
+
+HeaderFilterRegex: 'pybind11/.*h'
--- /dev/null
+parse:
+ additional_commands:
+ pybind11_add_module:
+ flags:
+ - THIN_LTO
+ - MODULE
+ - SHARED
+ - NO_EXTRAS
+ - EXCLUDE_FROM_ALL
+ - SYSTEM
+
+format:
+ line_width: 99
+ tab_size: 2
+
+ # If an argument group contains more than this many sub-groups
+ # (parg or kwarg groups) then force it to a vertical layout.
+ max_subgroups_hwrap: 2
+
+ # If a positional argument group contains more than this many
+ # arguments, then force it to a vertical layout.
+ max_pargs_hwrap: 6
+
+ # If a cmdline positional group consumes more than this many
+ # lines without nesting, then invalidate the layout (and nest)
+ max_rows_cmdline: 2
+ separate_ctrl_name_with_space: false
+ separate_fn_name_with_space: false
+ dangle_parens: false
+
+ # If the trailing parenthesis must be 'dangled' on its on
+ # 'line, then align it to this reference: `prefix`: the start'
+ # 'of the statement, `prefix-indent`: the start of the'
+ # 'statement, plus one indentation level, `child`: align to'
+ # the column of the arguments
+ dangle_align: prefix
+ # If the statement spelling length (including space and
+ # parenthesis) is smaller than this amount, then force reject
+ # nested layouts.
+ min_prefix_chars: 4
+
+ # If the statement spelling length (including space and
+ # parenthesis) is larger than the tab width by more than this
+ # amount, then force reject un-nested layouts.
+ max_prefix_chars: 10
+
+ # If a candidate layout is wrapped horizontally but it exceeds
+ # this many lines, then reject the layout.
+ max_lines_hwrap: 2
+
+ line_ending: unix
+
+ # Format command names consistently as 'lower' or 'upper' case
+ command_case: canonical
+
+ # Format keywords consistently as 'lower' or 'upper' case
+ # unchanged is valid too
+ keyword_case: 'upper'
+
+ # A list of command names which should always be wrapped
+ always_wrap: []
+
+ # If true, the argument lists which are known to be sortable
+ # will be sorted lexicographically
+ enable_sort: true
+
+ # If true, the parsers may infer whether or not an argument
+ # list is sortable (without annotation).
+ autosort: false
+
+# Causes a few issues - can be solved later, possibly.
+markup:
+ enable_markup: false
--- /dev/null
+Thank you for your interest in this project! Please refer to the following
+sections on how to contribute code and bug reports.
+
+### Reporting bugs
+
+Before submitting a question or bug report, please take a moment of your time
+and ensure that your issue isn't already discussed in the project documentation
+provided at [pybind11.readthedocs.org][] or in the [issue tracker][]. You can
+also check [gitter][] to see if it came up before.
+
+Assuming that you have identified a previously unknown problem or an important
+question, it's essential that you submit a self-contained and minimal piece of
+code that reproduces the problem. In other words: no external dependencies,
+isolate the function(s) that cause breakage, submit matched and complete C++
+and Python snippets that can be easily compiled and run in isolation; or
+ideally make a small PR with a failing test case that can be used as a starting
+point.
+
+## Pull requests
+
+Contributions are submitted, reviewed, and accepted using GitHub pull requests.
+Please refer to [this article][using pull requests] for details and adhere to
+the following rules to make the process as smooth as possible:
+
+* Make a new branch for every feature you're working on.
+* Make small and clean pull requests that are easy to review but make sure they
+ do add value by themselves.
+* Add tests for any new functionality and run the test suite (`cmake --build
+ build --target pytest`) to ensure that no existing features break.
+* Please run [`pre-commit`][pre-commit] to check your code matches the
+ project style. (Note that `gawk` is required.) Use `pre-commit run
+ --all-files` before committing (or use installed-mode, check pre-commit docs)
+ to verify your code passes before pushing to save time.
+* This project has a strong focus on providing general solutions using a
+ minimal amount of code, thus small pull requests are greatly preferred.
+
+### Licensing of contributions
+
+pybind11 is provided under a BSD-style license that can be found in the
+``LICENSE`` file. By using, distributing, or contributing to this project, you
+agree to the terms and conditions of this license.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+("Enhancements") to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to the author of this software, without
+imposing a separate written license agreement for such Enhancements, then you
+hereby grant the following license: a non-exclusive, royalty-free perpetual
+license to install, use, modify, prepare derivative works, incorporate into
+other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+
+## Development of pybind11
+
+To setup an ideal development environment, run the following commands on a
+system with CMake 3.14+:
+
+```bash
+python3 -m venv venv
+source venv/bin/activate
+pip install -r tests/requirements.txt
+cmake -S . -B build -DDOWNLOAD_CATCH=ON -DDOWNLOAD_EIGEN=ON
+cmake --build build -j4
+```
+
+Tips:
+
+* You can use `virtualenv` (from PyPI) instead of `venv` (which is Python 3
+ only).
+* You can select any name for your environment folder; if it contains "env" it
+ will be ignored by git.
+* If you don’t have CMake 3.14+, just add “cmake” to the pip install command.
+* You can use `-DPYBIND11_FINDPYTHON=ON` to use FindPython on CMake 3.12+
+* In classic mode, you may need to set `-DPYTHON_EXECUTABLE=/path/to/python`.
+ FindPython uses `-DPython_ROOT_DIR=/path/to` or
+ `-DPython_EXECUTABLE=/path/to/python`.
+
+### Configuration options
+
+In CMake, configuration options are given with “-D”. Options are stored in the
+build directory, in the `CMakeCache.txt` file, so they are remembered for each
+build directory. Two selections are special - the generator, given with `-G`,
+and the compiler, which is selected based on environment variables `CXX` and
+similar, or `-DCMAKE_CXX_COMPILER=`. Unlike the others, these cannot be changed
+after the initial run.
+
+The valid options are:
+
+* `-DCMAKE_BUILD_TYPE`: Release, Debug, MinSizeRel, RelWithDebInfo
+* `-DPYBIND11_FINDPYTHON=ON`: Use CMake 3.12+’s FindPython instead of the
+ classic, deprecated, custom FindPythonLibs
+* `-DPYBIND11_NOPYTHON=ON`: Disable all Python searching (disables tests)
+* `-DBUILD_TESTING=ON`: Enable the tests
+* `-DDOWNLOAD_CATCH=ON`: Download catch to build the C++ tests
+* `-DOWNLOAD_EIGEN=ON`: Download Eigen for the NumPy tests
+* `-DPYBIND11_INSTALL=ON/OFF`: Enable the install target (on by default for the
+ master project)
+* `-DUSE_PYTHON_INSTALL_DIR=ON`: Try to install into the python dir
+
+
+<details><summary>A few standard CMake tricks: (click to expand)</summary><p>
+
+* Use `cmake --build build -v` to see the commands used to build the files.
+* Use `cmake build -LH` to list the CMake options with help.
+* Use `ccmake` if available to see a curses (terminal) gui, or `cmake-gui` for
+ a completely graphical interface (not present in the PyPI package).
+* Use `cmake --build build -j12` to build with 12 cores (for example).
+* Use `-G` and the name of a generator to use something different. `cmake
+ --help` lists the generators available.
+ - On Unix, setting `CMAKE_GENERATER=Ninja` in your environment will give
+ you automatic mulithreading on all your CMake projects!
+* Open the `CMakeLists.txt` with QtCreator to generate for that IDE.
+* You can use `-DCMAKE_EXPORT_COMPILE_COMMANDS=ON` to generate the `.json` file
+ that some tools expect.
+
+</p></details>
+
+
+To run the tests, you can "build" the check target:
+
+```bash
+cmake --build build --target check
+```
+
+`--target` can be spelled `-t` in CMake 3.15+. You can also run individual
+tests with these targets:
+
+* `pytest`: Python tests only, using the
+[pytest](https://docs.pytest.org/en/stable/) framework
+* `cpptest`: C++ tests only
+* `test_cmake_build`: Install / subdirectory tests
+
+If you want to build just a subset of tests, use
+`-DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_pickling.cpp"`. If this is
+empty, all tests will be built.
+
+You may also pass flags to the `pytest` target by editing `tests/pytest.ini` or
+by using the `PYTEST_ADDOPTS` environment variable
+(see [`pytest` docs](https://docs.pytest.org/en/2.7.3/customize.html#adding-default-options)). As an example:
+
+```bash
+env PYTEST_ADDOPTS="--capture=no --exitfirst" \
+ cmake --build build --target pytest
+# Or using abbreviated flags
+env PYTEST_ADDOPTS="-s -x" cmake --build build --target pytest
+```
+
+### Formatting
+
+All formatting is handled by pre-commit.
+
+Install with brew (macOS) or pip (any OS):
+
+```bash
+# Any OS
+python3 -m pip install pre-commit
+
+# OR macOS with homebrew:
+brew install pre-commit
+```
+
+Then, you can run it on the items you've added to your staging area, or all
+files:
+
+```bash
+pre-commit run
+# OR
+pre-commit run --all-files
+```
+
+And, if you want to always use it, you can install it as a git hook (hence the
+name, pre-commit):
+
+```bash
+pre-commit install
+```
+
+### Clang-Format
+
+As of v2.6.2, pybind11 ships with a [`clang-format`][clang-format]
+configuration file at the top level of the repo (the filename is
+`.clang-format`). Currently, formatting is NOT applied automatically, but
+manually using `clang-format` for newly developed files is highly encouraged.
+To check if a file needs formatting:
+
+```bash
+clang-format -style=file --dry-run some.cpp
+```
+
+The output will show things to be fixed, if any. To actually format the file:
+
+```bash
+clang-format -style=file -i some.cpp
+```
+
+Note that the `-style-file` option searches the parent directories for the
+`.clang-format` file, i.e. the commands above can be run in any subdirectory
+of the pybind11 repo.
+
+### Clang-Tidy
+
+[`clang-tidy`][clang-tidy] performs deeper static code analyses and is
+more complex to run, compared to `clang-format`, but support for `clang-tidy`
+is built into the pybind11 CMake configuration. To run `clang-tidy`, the
+following recipe should work. Files will be modified in place, so you can
+use git to monitor the changes.
+
+```bash
+docker run --rm -v $PWD:/pybind11 -it silkeh/clang:10
+apt-get update && apt-get install python3-dev python3-pytest
+cmake -S pybind11/ -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);-fix"
+cmake --build build
+```
+
+### Include what you use
+
+To run include what you use, install (`brew install include-what-you-use` on
+macOS), then run:
+
+```bash
+cmake -S . -B build-iwyu -DCMAKE_CXX_INCLUDE_WHAT_YOU_USE=$(which include-what-you-use)
+cmake --build build
+```
+
+The report is sent to stderr; you can pipe it into a file if you wish.
+
+### Build recipes
+
+This builds with the Intel compiler (assuming it is in your path, along with a
+recent CMake and Python 3):
+
+```bash
+python3 -m venv venv
+. venv/bin/activate
+pip install pytest
+cmake -S . -B build-intel -DCMAKE_CXX_COMPILER=$(which icpc) -DDOWNLOAD_CATCH=ON -DDOWNLOAD_EIGEN=ON -DPYBIND11_WERROR=ON
+```
+
+This will test the PGI compilers:
+
+```bash
+docker run --rm -it -v $PWD:/pybind11 nvcr.io/hpc/pgi-compilers:ce
+apt-get update && apt-get install -y python3-dev python3-pip python3-pytest
+wget -qO- "https://cmake.org/files/v3.18/cmake-3.18.2-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
+cmake -S pybind11/ -B build
+cmake --build build
+```
+
+### Explanation of the SDist/wheel building design
+
+> These details below are _only_ for packaging the Python sources from git. The
+> SDists and wheels created do not have any extra requirements at all and are
+> completely normal.
+
+The main objective of the packaging system is to create SDists (Python's source
+distribution packages) and wheels (Python's binary distribution packages) that
+include everything that is needed to work with pybind11, and which can be
+installed without any additional dependencies. This is more complex than it
+appears: in order to support CMake as a first class language even when using
+the PyPI package, they must include the _generated_ CMake files (so as not to
+require CMake when installing the `pybind11` package itself). They should also
+provide the option to install to the "standard" location
+(`<ENVROOT>/include/pybind11` and `<ENVROOT>/share/cmake/pybind11`) so they are
+easy to find with CMake, but this can cause problems if you are not an
+environment or using ``pyproject.toml`` requirements. This was solved by having
+two packages; the "nice" pybind11 package that stores the includes and CMake
+files inside the package, that you get access to via functions in the package,
+and a `pybind11-global` package that can be included via `pybind11[global]` if
+you want the more invasive but discoverable file locations.
+
+If you want to install or package the GitHub source, it is best to have Pip 10
+or newer on Windows, macOS, or Linux (manylinux1 compatible, includes most
+distributions). You can then build the SDists, or run any procedure that makes
+SDists internally, like making wheels or installing.
+
+
+```bash
+# Editable development install example
+python3 -m pip install -e .
+```
+
+Since Pip itself does not have an `sdist` command (it does have `wheel` and
+`install`), you may want to use the upcoming `build` package:
+
+```bash
+python3 -m pip install build
+
+# Normal package
+python3 -m build -s .
+
+# Global extra
+PYBIND11_GLOBAL_SDIST=1 python3 -m build -s .
+```
+
+If you want to use the classic "direct" usage of `python setup.py`, you will
+need CMake 3.15+ and either `make` or `ninja` preinstalled (possibly via `pip
+install cmake ninja`), since directly running Python on `setup.py` cannot pick
+up and install `pyproject.toml` requirements. As long as you have those two
+things, though, everything works the way you would expect:
+
+```bash
+# Normal package
+python3 setup.py sdist
+
+# Global extra
+PYBIND11_GLOBAL_SDIST=1 python3 setup.py sdist
+```
+
+A detailed explanation of the build procedure design for developers wanting to
+work on or maintain the packaging system is as follows:
+
+#### 1. Building from the source directory
+
+When you invoke any `setup.py` command from the source directory, including
+`pip wheel .` and `pip install .`, you will activate a full source build. This
+is made of the following steps:
+
+1. If the tool is PEP 518 compliant, like Pip 10+, it will create a temporary
+ virtual environment and install the build requirements (mostly CMake) into
+ it. (if you are not on Windows, macOS, or a manylinux compliant system, you
+ can disable this with `--no-build-isolation` as long as you have CMake 3.15+
+ installed)
+2. The environment variable `PYBIND11_GLOBAL_SDIST` is checked - if it is set
+ and truthy, this will be make the accessory `pybind11-global` package,
+ instead of the normal `pybind11` package. This package is used for
+ installing the files directly to your environment root directory, using
+ `pybind11[global]`.
+2. `setup.py` reads the version from `pybind11/_version.py` and verifies it
+ matches `includes/pybind11/detail/common.h`.
+3. CMake is run with `-DCMAKE_INSTALL_PREIFX=pybind11`. Since the CMake install
+ procedure uses only relative paths and is identical on all platforms, these
+ files are valid as long as they stay in the correct relative position to the
+ includes. `pybind11/share/cmake/pybind11` has the CMake files, and
+ `pybind11/include` has the includes. The build directory is discarded.
+4. Simpler files are placed in the SDist: `tools/setup_*.py.in`,
+ `tools/pyproject.toml` (`main` or `global`)
+5. The package is created by running the setup function in the
+ `tools/setup_*.py`. `setup_main.py` fills in Python packages, and
+ `setup_global.py` fills in only the data/header slots.
+6. A context manager cleans up the temporary CMake install directory (even if
+ an error is thrown).
+
+### 2. Building from SDist
+
+Since the SDist has the rendered template files in `tools` along with the
+includes and CMake files in the correct locations, the builds are completely
+trivial and simple. No extra requirements are required. You can even use Pip 9
+if you really want to.
+
+
+[pre-commit]: https://pre-commit.com
+[clang-format]: https://clang.llvm.org/docs/ClangFormat.html
+[clang-tidy]: https://clang.llvm.org/extra/clang-tidy/
+[pybind11.readthedocs.org]: http://pybind11.readthedocs.org/en/latest
+[issue tracker]: https://github.com/pybind/pybind11/issues
+[gitter]: https://gitter.im/pybind/Lobby
+[using pull requests]: https://help.github.com/articles/using-pull-requests
--- /dev/null
+---
+name: Bug Report
+about: File an issue about a bug
+title: "[BUG] "
+---
+
+
+Make sure you've completed the following steps before submitting your issue -- thank you!
+
+1. Make sure you've read the [documentation][]. Your issue may be addressed there.
+2. Search the [issue tracker][] to verify that this hasn't already been reported. +1 or comment there if it has.
+3. Consider asking first in the [Gitter chat room][].
+4. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible.
+ a. If possible, make a PR with a new, failing test to give us a starting point to work on!
+
+[documentation]: https://pybind11.readthedocs.io
+[issue tracker]: https://github.com/pybind/pybind11/issues
+[Gitter chat room]: https://gitter.im/pybind/Lobby
+
+*After reading, remove this checklist and the template text in parentheses below.*
+
+## Issue description
+
+(Provide a short description, state the expected behavior and what actually happens.)
+
+## Reproducible example code
+
+(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.)
--- /dev/null
+blank_issues_enabled: false
+contact_links:
+ - name: Gitter room
+ url: https://gitter.im/pybind/Lobby
+ about: A room for discussing pybind11 with an active community
--- /dev/null
+---
+name: Feature Request
+about: File an issue about adding a feature
+title: "[FEAT] "
+---
+
+
+Make sure you've completed the following steps before submitting your issue -- thank you!
+
+1. Check if your feature has already been mentioned / rejected / planned in other issues.
+2. If those resources didn't help, consider asking in the [Gitter chat room][] to see if this is interesting / useful to a larger audience and possible to implement reasonably,
+4. If you have a useful feature that passes the previous items (or not suitable for chat), please fill in the details below.
+
+[Gitter chat room]: https://gitter.im/pybind/Lobby
+
+*After reading, remove this checklist.*
--- /dev/null
+---
+name: Question
+about: File an issue about unexplained behavior
+title: "[QUESTION] "
+---
+
+If you have a question, please check the following first:
+
+1. Check if your question has already been answered in the [FAQ][] section.
+2. Make sure you've read the [documentation][]. Your issue may be addressed there.
+3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room][]
+4. Search the [issue tracker][], including the closed issues, to see if your question has already been asked/answered. +1 or comment if it has been asked but has no answer.
+5. If you have a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.
+6. Include a self-contained and minimal piece of code that illustrates your question. If that's not possible, try to make the description as clear as possible.
+
+[FAQ]: http://pybind11.readthedocs.io/en/latest/faq.html
+[documentation]: https://pybind11.readthedocs.io
+[issue tracker]: https://github.com/pybind/pybind11/issues
+[Gitter chat room]: https://gitter.im/pybind/Lobby
+
+*After reading, remove this checklist.*
--- /dev/null
+version: 2
+updates:
+ # Maintain dependencies for GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "daily"
+ ignore:
+ # Official actions have moving tags like v1
+ # that are used, so they don't need updates here
+ - dependency-name: "actions/checkout"
+ - dependency-name: "actions/setup-python"
+ - dependency-name: "actions/cache"
+ - dependency-name: "actions/upload-artifact"
+ - dependency-name: "actions/download-artifact"
+ - dependency-name: "actions/labeler"
--- /dev/null
+docs:
+- any:
+ - 'docs/**/*.rst'
+ - '!docs/changelog.rst'
+ - '!docs/upgrade.rst'
+
+ci:
+- '.github/workflows/*.yml'
--- /dev/null
+needs changelog:
+- all:
+ - '!docs/changelog.rst'
--- /dev/null
+## Description
+
+<!-- Include relevant issues or PRs here, describe what changed and why -->
+
+
+## Suggested changelog entry:
+
+<!-- Fill in the below block with the expected RestructuredText entry. Delete if no entry needed;
+ but do not delete header or rst block if an entry is needed! Will be collected via a script. -->
+
+```rst
+
+```
+
+<!-- If the upgrade guide needs updating, note that here too -->
--- /dev/null
+name: CI
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - master
+ - stable
+ - v*
+
+jobs:
+ # This is the "main" test suite, which tests a large number of different
+ # versions of default compilers and Python versions in GitHub Actions.
+ standard:
+ strategy:
+ fail-fast: false
+ matrix:
+ runs-on: [ubuntu-latest, windows-latest, macos-latest]
+ python:
+ - 2.7
+ - 3.5
+ - 3.6
+ - 3.9
+ # - 3.10-dev # Re-enable once 3.10.0a5 is released
+ - pypy2
+ - pypy3
+
+ # Items in here will either be added to the build matrix (if not
+ # present), or add new keys to an existing matrix element if all the
+ # existing keys match.
+ #
+ # We support three optional keys: args (both build), args1 (first
+ # build), and args2 (second build).
+ include:
+ # Just add a key
+ - runs-on: ubuntu-latest
+ python: 3.6
+ args: >
+ -DPYBIND11_FINDPYTHON=ON
+ - runs-on: windows-latest
+ python: 3.6
+ args: >
+ -DPYBIND11_FINDPYTHON=ON
+
+ # These items will be removed from the build matrix, keys must match.
+ exclude:
+ # Currently 32bit only, and we build 64bit
+ - runs-on: windows-latest
+ python: pypy2
+ - runs-on: windows-latest
+ python: pypy3
+
+ # TODO: PyPy2 7.3.3 segfaults, while 7.3.2 was fine.
+ - runs-on: ubuntu-latest
+ python: pypy2
+
+ name: "🐍 ${{ matrix.python }} • ${{ matrix.runs-on }} • x64 ${{ matrix.args }}"
+ runs-on: ${{ matrix.runs-on }}
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup Python ${{ matrix.python }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Setup Boost (Windows / Linux latest)
+ shell: bash
+ run: echo "BOOST_ROOT=$BOOST_ROOT_1_72_0" >> $GITHUB_ENV
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.7
+
+ - name: Cache wheels
+ if: runner.os == 'macOS'
+ uses: actions/cache@v2
+ with:
+ # This path is specific to macOS - we really only need it for PyPy NumPy wheels
+ # See https://github.com/actions/cache/blob/master/examples.md#python---pip
+ # for ways to do this more generally
+ path: ~/Library/Caches/pip
+ # Look to see if there is a cache hit for the corresponding requirements file
+ key: ${{ runner.os }}-pip-${{ matrix.python }}-x64-${{ hashFiles('tests/requirements.txt') }}
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt --prefer-binary
+
+ - name: Setup annotations on Linux
+ if: runner.os == 'Linux'
+ run: python -m pip install pytest-github-actions-annotate-failures
+
+ # First build - C++11 mode and inplace
+ - name: Configure C++11 ${{ matrix.args }}
+ run: >
+ cmake -S . -B .
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_STANDARD=11
+ ${{ matrix.args }}
+
+ - name: Build C++11
+ run: cmake --build . -j 2
+
+ - name: Python tests C++11
+ run: cmake --build . --target pytest -j 2
+
+ - name: C++11 tests
+ # TODO: Figure out how to load the DLL on Python 3.8+
+ if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10-dev'))"
+ run: cmake --build . --target cpptest -j 2
+
+ - name: Interface test C++11
+ run: cmake --build . --target test_cmake_build
+
+ - name: Clean directory
+ run: git clean -fdx
+
+ # Second build - C++17 mode and in a build directory
+ - name: Configure ${{ matrix.args2 }}
+ run: >
+ cmake -S . -B build2
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_STANDARD=17
+ ${{ matrix.args }}
+ ${{ matrix.args2 }}
+
+ - name: Build
+ run: cmake --build build2 -j 2
+
+ - name: Python tests
+ run: cmake --build build2 --target pytest
+
+ - name: C++ tests
+ # TODO: Figure out how to load the DLL on Python 3.8+
+ if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10-dev'))"
+ run: cmake --build build2 --target cpptest
+
+ - name: Interface test
+ run: cmake --build build2 --target test_cmake_build
+
+ # Eventually Microsoft might have an action for setting up
+ # MSVC, but for now, this action works:
+ - name: Prepare compiler environment for Windows 🐍 2.7
+ if: matrix.python == 2.7 && runner.os == 'Windows'
+ uses: ilammy/msvc-dev-cmd@v1
+ with:
+ arch: x64
+
+ # This makes two environment variables available in the following step(s)
+ - name: Set Windows 🐍 2.7 environment variables
+ if: matrix.python == 2.7 && runner.os == 'Windows'
+ shell: bash
+ run: |
+ echo "DISTUTILS_USE_SDK=1" >> $GITHUB_ENV
+ echo "MSSdk=1" >> $GITHUB_ENV
+
+ # This makes sure the setup_helpers module can build packages using
+ # setuptools
+ - name: Setuptools helpers test
+ run: pytest tests/extra_setuptools
+
+
+ deadsnakes:
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - python-version: 3.9
+ python-debug: true
+ valgrind: true
+ - python-version: 3.10-dev
+ python-debug: false
+
+ name: "🐍 ${{ matrix.python-version }}${{ matrix.python-debug && '-dbg' || '' }} (deadsnakes)${{ matrix.valgrind && ' • Valgrind' || '' }} • x64"
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup Python ${{ matrix.python-version }} (deadsnakes)
+ uses: deadsnakes/action@v2.1.1
+ with:
+ python-version: ${{ matrix.python-version }}
+ debug: ${{ matrix.python-debug }}
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.7
+
+ - name: Valgrind cache
+ if: matrix.valgrind
+ uses: actions/cache@v2
+ id: cache-valgrind
+ with:
+ path: valgrind
+ key: 3.16.1 # Valgrind version
+
+ - name: Compile Valgrind
+ if: matrix.valgrind && steps.cache-valgrind.outputs.cache-hit != 'true'
+ run: |
+ VALGRIND_VERSION=3.16.1
+ curl https://sourceware.org/pub/valgrind/valgrind-$VALGRIND_VERSION.tar.bz2 -o - | tar xj
+ mv valgrind-$VALGRIND_VERSION valgrind
+ cd valgrind
+ ./configure
+ make -j 2 > /dev/null
+
+ - name: Install Valgrind
+ if: matrix.valgrind
+ working-directory: valgrind
+ run: |
+ sudo make install
+ sudo apt-get update
+ sudo apt-get install libc6-dbg # Needed by Valgrind
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt --prefer-binary
+
+ - name: Configure
+ run: >
+ cmake -S . -B build
+ -DCMAKE_BUILD_TYPE=Debug
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_STANDARD=17
+
+ - name: Build
+ run: cmake --build build -j 2
+
+ - name: Python tests
+ run: cmake --build build --target pytest
+
+ - name: C++ tests
+ run: cmake --build build --target cpptest
+
+ - name: Run Valgrind on Python tests
+ if: matrix.valgrind
+ run: cmake --build build --target memcheck
+
+
+ # Testing on clang using the excellent silkeh clang docker images
+ clang:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ clang:
+ - 3.6
+ - 3.7
+ - 3.9
+ - 7
+ - 9
+ - dev
+ std:
+ - 11
+ include:
+ - clang: 5
+ std: 14
+ - clang: 10
+ std: 20
+ - clang: 10
+ std: 17
+
+ name: "🐍 3 • Clang ${{ matrix.clang }} • C++${{ matrix.std }} • x64"
+ container: "silkeh/clang:${{ matrix.clang }}"
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Add wget and python3
+ run: apt-get update && apt-get install -y python3-dev python3-numpy python3-pytest libeigen3-dev
+
+ - name: Configure
+ shell: bash
+ run: >
+ cmake -S . -B build
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DCMAKE_CXX_STANDARD=${{ matrix.std }}
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Build
+ run: cmake --build build -j 2
+
+ - name: Python tests
+ run: cmake --build build --target pytest
+
+ - name: C++ tests
+ run: cmake --build build --target cpptest
+
+ - name: Interface test
+ run: cmake --build build --target test_cmake_build
+
+
+ # Testing NVCC; forces sources to behave like .cu files
+ cuda:
+ runs-on: ubuntu-latest
+ name: "🐍 3.8 • CUDA 11 • Ubuntu 20.04"
+ container: nvidia/cuda:11.0-devel-ubuntu20.04
+
+ steps:
+ - uses: actions/checkout@v2
+
+ # tzdata will try to ask for the timezone, so set the DEBIAN_FRONTEND
+ - name: Install 🐍 3
+ run: apt-get update && DEBIAN_FRONTEND="noninteractive" apt-get install -y cmake git python3-dev python3-pytest python3-numpy
+
+ - name: Configure
+ run: cmake -S . -B build -DPYBIND11_CUDA_TESTS=ON -DPYBIND11_WERROR=ON -DDOWNLOAD_CATCH=ON
+
+ - name: Build
+ run: cmake --build build -j2 --verbose
+
+ - name: Python tests
+ run: cmake --build build --target pytest
+
+
+# TODO: Internal compiler error - report to NVidia
+# # Testing CentOS 8 + PGI compilers
+# centos-nvhpc8:
+# runs-on: ubuntu-latest
+# name: "🐍 3 • CentOS8 / PGI 20.11 • x64"
+# container: centos:8
+#
+# steps:
+# - uses: actions/checkout@v2
+#
+# - name: Add Python 3 and a few requirements
+# run: yum update -y && yum install -y git python3-devel python3-numpy python3-pytest make environment-modules
+#
+# - name: Install CMake with pip
+# run: |
+# python3 -m pip install --upgrade pip
+# python3 -m pip install cmake --prefer-binary
+#
+# - name: Install NVidia HPC SDK
+# run: >
+# yum -y install
+# https://developer.download.nvidia.com/hpc-sdk/20.11/nvhpc-20-11-20.11-1.x86_64.rpm
+# https://developer.download.nvidia.com/hpc-sdk/20.11/nvhpc-2020-20.11-1.x86_64.rpm
+#
+# - name: Configure
+# shell: bash
+# run: |
+# source /etc/profile.d/modules.sh
+# module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.11
+# cmake -S . -B build -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=14 -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+#
+# - name: Build
+# run: cmake --build build -j 2 --verbose
+#
+# - name: Python tests
+# run: cmake --build build --target pytest
+#
+# - name: C++ tests
+# run: cmake --build build --target cpptest
+#
+# - name: Interface test
+# run: cmake --build build --target test_cmake_build
+
+
+ # Testing on CentOS 7 + PGI compilers, which seems to require more workarounds
+ centos-nvhpc7:
+ runs-on: ubuntu-latest
+ name: "🐍 3 • CentOS7 / PGI 20.9 • x64"
+ container: centos:7
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Add Python 3 and a few requirements
+ run: yum update -y && yum install -y epel-release && yum install -y git python3-devel make environment-modules cmake3
+
+ - name: Install NVidia HPC SDK
+ run: yum -y install https://developer.download.nvidia.com/hpc-sdk/20.9/nvhpc-20-9-20.9-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/20.9/nvhpc-2020-20.9-1.x86_64.rpm
+
+ # On CentOS 7, we have to filter a few tests (compiler internal error)
+ # and allow deeper templete recursion (not needed on CentOS 8 with a newer
+ # standard library). On some systems, you many need further workarounds:
+ # https://github.com/pybind/pybind11/pull/2475
+ - name: Configure
+ shell: bash
+ run: |
+ source /etc/profile.d/modules.sh
+ module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.9
+ cmake3 -S . -B build -DDOWNLOAD_CATCH=ON \
+ -DCMAKE_CXX_STANDARD=11 \
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") \
+ -DCMAKE_CXX_FLAGS="-Wc,--pending_instantiations=0" \
+ -DPYBIND11_TEST_FILTER="test_smart_ptr.cpp;test_virtual_functions.cpp"
+
+ # Building before installing Pip should produce a warning but not an error
+ - name: Build
+ run: cmake3 --build build -j 2 --verbose
+
+ - name: Install CMake with pip
+ run: |
+ python3 -m pip install --upgrade pip
+ python3 -m pip install pytest
+
+ - name: Python tests
+ run: cmake3 --build build --target pytest
+
+ - name: C++ tests
+ run: cmake3 --build build --target cpptest
+
+ - name: Interface test
+ run: cmake3 --build build --target test_cmake_build
+
+
+ # Testing on GCC using the GCC docker images (only recent images supported)
+ gcc:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ gcc:
+ - 7
+ - latest
+ std:
+ - 11
+ include:
+ - gcc: 10
+ std: 20
+
+ name: "🐍 3 • GCC ${{ matrix.gcc }} • C++${{ matrix.std }}• x64"
+ container: "gcc:${{ matrix.gcc }}"
+
+ steps:
+ - uses: actions/checkout@v1
+
+ - name: Add Python 3
+ run: apt-get update; apt-get install -y python3-dev python3-numpy python3-pytest python3-pip libeigen3-dev
+
+ - name: Update pip
+ run: python3 -m pip install --upgrade pip
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.7
+
+ - name: Configure
+ shell: bash
+ run: >
+ cmake -S . -B build
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DCMAKE_CXX_STANDARD=${{ matrix.std }}
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Build
+ run: cmake --build build -j 2
+
+ - name: Python tests
+ run: cmake --build build --target pytest
+
+ - name: C++ tests
+ run: cmake --build build --target cpptest
+
+ - name: Interface test
+ run: cmake --build build --target test_cmake_build
+
+
+ # Testing on ICC using the oneAPI apt repo
+ icc:
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+
+ name: "🐍 3 • ICC latest • x64"
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Add apt repo
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y wget build-essential pkg-config cmake ca-certificates gnupg
+ wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
+ sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
+ echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
+
+ - name: Add ICC & Python 3
+ run: sudo apt-get update; sudo apt-get install -y intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic cmake python3-dev python3-numpy python3-pytest python3-pip
+
+ - name: Update pip
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ python3 -m pip install --upgrade pip
+
+ - name: Install dependencies
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ python3 -m pip install -r tests/requirements.txt --prefer-binary
+
+ - name: Configure C++11
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake -S . -B build-11 \
+ -DPYBIND11_WERROR=ON \
+ -DDOWNLOAD_CATCH=ON \
+ -DDOWNLOAD_EIGEN=OFF \
+ -DCMAKE_CXX_STANDARD=11 \
+ -DCMAKE_CXX_COMPILER=$(which icpc) \
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Build C++11
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake --build build-11 -j 2 -v
+
+ - name: Python tests C++11
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ sudo service apport stop
+ cmake --build build-11 --target check
+
+ - name: C++ tests C++11
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake --build build-11 --target cpptest
+
+ - name: Interface test C++11
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake --build build-11 --target test_cmake_build
+
+ - name: Configure C++17
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake -S . -B build-17 \
+ -DPYBIND11_WERROR=ON \
+ -DDOWNLOAD_CATCH=ON \
+ -DDOWNLOAD_EIGEN=OFF \
+ -DCMAKE_CXX_STANDARD=17 \
+ -DCMAKE_CXX_COMPILER=$(which icpc) \
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Build C++17
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake --build build-17 -j 2 -v
+
+ - name: Python tests C++17
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ sudo service apport stop
+ cmake --build build-17 --target check
+
+ - name: C++ tests C++17
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake --build build-17 --target cpptest
+
+ - name: Interface test C++17
+ run: |
+ set +e; source /opt/intel/oneapi/setvars.sh; set -e
+ cmake --build build-17 --target test_cmake_build
+
+
+ # Testing on CentOS (manylinux uses a centos base, and this is an easy way
+ # to get GCC 4.8, which is the manylinux1 compiler).
+ centos:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ centos:
+ - 7 # GCC 4.8
+ - 8
+
+ name: "🐍 3 • CentOS ${{ matrix.centos }} • x64"
+ container: "centos:${{ matrix.centos }}"
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Add Python 3
+ run: yum update -y && yum install -y python3-devel gcc-c++ make git
+
+ - name: Update pip
+ run: python3 -m pip install --upgrade pip
+
+ - name: Install dependencies
+ run: python3 -m pip install cmake -r tests/requirements.txt --prefer-binary
+
+ - name: Configure
+ shell: bash
+ run: >
+ cmake -S . -B build
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_STANDARD=11
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Build
+ run: cmake --build build -j 2
+
+ - name: Python tests
+ run: cmake --build build --target pytest
+
+ - name: C++ tests
+ run: cmake --build build --target cpptest
+
+ - name: Interface test
+ run: cmake --build build --target test_cmake_build
+
+
+ # This tests an "install" with the CMake tools
+ install-classic:
+ name: "🐍 3.5 • Debian • x86 • Install"
+ runs-on: ubuntu-latest
+ container: i386/debian:stretch
+
+ steps:
+ - uses: actions/checkout@v1
+
+ - name: Install requirements
+ run: |
+ apt-get update
+ apt-get install -y git make cmake g++ libeigen3-dev python3-dev python3-pip
+ pip3 install "pytest==3.1.*"
+
+ - name: Configure for install
+ run: >
+ cmake .
+ -DPYBIND11_INSTALL=1 -DPYBIND11_TEST=0
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Make and install
+ run: make install
+
+ - name: Copy tests to new directory
+ run: cp -a tests /pybind11-tests
+
+ - name: Make a new test directory
+ run: mkdir /build-tests
+
+ - name: Configure tests
+ run: >
+ cmake ../pybind11-tests
+ -DDOWNLOAD_CATCH=ON
+ -DPYBIND11_WERROR=ON
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+ working-directory: /build-tests
+
+ - name: Run tests
+ run: make pytest -j 2
+ working-directory: /build-tests
+
+
+ # This verifies that the documentation is not horribly broken, and does a
+ # basic sanity check on the SDist.
+ doxygen:
+ name: "Documentation build test"
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions/setup-python@v2
+
+ - name: Install Doxygen
+ run: sudo apt-get install -y doxygen librsvg2-bin # Changed to rsvg-convert in 20.04
+
+ - name: Install docs & setup requirements
+ run: python3 -m pip install -r docs/requirements.txt
+
+ - name: Build docs
+ run: python3 -m sphinx -W -b html docs docs/.build
+
+ - name: Make SDist
+ run: python3 setup.py sdist
+
+ - run: git status --ignored
+
+ - name: Check local include dir
+ run: >
+ ls pybind11;
+ python3 -c "import pybind11, pathlib; assert (a := pybind11.get_include()) == (b := str(pathlib.Path('include').resolve())), f'{a} != {b}'"
+
+ - name: Compare Dists (headers only)
+ working-directory: include
+ run: |
+ python3 -m pip install --user -U ../dist/*
+ installed=$(python3 -c "import pybind11; print(pybind11.get_include() + '/pybind11')")
+ diff -rq $installed ./pybind11
+
+ win32:
+ strategy:
+ fail-fast: false
+ matrix:
+ python:
+ - 3.5
+ - 3.6
+ - 3.7
+ - 3.8
+ - 3.9
+ - pypy3
+ # TODO: fix hang on pypy2
+
+ include:
+ - python: 3.9
+ args: -DCMAKE_CXX_STANDARD=20 -DDOWNLOAD_EIGEN=OFF
+ - python: 3.8
+ args: -DCMAKE_CXX_STANDARD=17
+
+ name: "🐍 ${{ matrix.python }} • MSVC 2019 • x86 ${{ matrix.args }}"
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup Python ${{ matrix.python }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+ architecture: x86
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.7
+
+ - name: Prepare MSVC
+ uses: ilammy/msvc-dev-cmd@v1
+ with:
+ arch: x86
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt --prefer-binary
+
+ # First build - C++11 mode and inplace
+ - name: Configure ${{ matrix.args }}
+ run: >
+ cmake -S . -B build
+ -G "Visual Studio 16 2019" -A Win32
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ ${{ matrix.args }}
+ - name: Build C++11
+ run: cmake --build build -j 2
+
+ - name: Run tests
+ run: cmake --build build -t pytest
+
+ win32-msvc2015:
+ name: "🐍 ${{ matrix.python }} • MSVC 2015 • x64"
+ runs-on: windows-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python:
+ - 2.7
+ - 3.6
+ - 3.7
+ # todo: check/cpptest does not support 3.8+ yet
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup 🐍 ${{ matrix.python }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.7
+
+ - name: Prepare MSVC
+ uses: ilammy/msvc-dev-cmd@v1
+ with:
+ toolset: 14.0
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt --prefer-binary
+
+ # First build - C++11 mode and inplace
+ - name: Configure
+ run: >
+ cmake -S . -B build
+ -G "Visual Studio 14 2015" -A x64
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+
+ - name: Build C++14
+ run: cmake --build build -j 2
+
+ - name: Run all checks
+ run: cmake --build build -t check
+
+
+ win32-msvc2017:
+ name: "🐍 ${{ matrix.python }} • MSVC 2017 • x64"
+ runs-on: windows-2016
+ strategy:
+ fail-fast: false
+ matrix:
+ python:
+ - 2.7
+ - 3.5
+ - 3.7
+ std:
+ - 14
+
+ include:
+ - python: 2.7
+ std: 17
+ args: >
+ -DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup 🐍 ${{ matrix.python }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.7
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt --prefer-binary
+
+ # First build - C++11 mode and inplace
+ - name: Configure
+ run: >
+ cmake -S . -B build
+ -G "Visual Studio 15 2017" -A x64
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_STANDARD=${{ matrix.std }}
+ ${{ matrix.args }}
+
+ - name: Build ${{ matrix.std }}
+ run: cmake --build build -j 2
+
+ - name: Run all checks
+ run: cmake --build build -t check
--- /dev/null
+name: Config
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - master
+ - stable
+ - v*
+
+jobs:
+ # This tests various versions of CMake in various combinations, to make sure
+ # the configure step passes.
+ cmake:
+ strategy:
+ fail-fast: false
+ matrix:
+ runs-on: [ubuntu-latest, macos-latest, windows-latest]
+ arch: [x64]
+ cmake: [3.18]
+
+ include:
+ - runs-on: ubuntu-latest
+ arch: x64
+ cmake: 3.4
+
+ - runs-on: macos-latest
+ arch: x64
+ cmake: 3.7
+
+ - runs-on: windows-2016
+ arch: x86
+ cmake: 3.8
+
+ - runs-on: windows-2016
+ arch: x86
+ cmake: 3.18
+
+ name: 🐍 3.7 • CMake ${{ matrix.cmake }} • ${{ matrix.runs-on }}
+ runs-on: ${{ matrix.runs-on }}
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup Python 3.7
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.7
+ architecture: ${{ matrix.arch }}
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt
+
+ # An action for adding a specific version of CMake:
+ # https://github.com/jwlawson/actions-setup-cmake
+ - name: Setup CMake ${{ matrix.cmake }}
+ uses: jwlawson/actions-setup-cmake@v1.7
+ with:
+ cmake-version: ${{ matrix.cmake }}
+
+ # These steps use a directory with a space in it intentionally
+ - name: Make build directories
+ run: mkdir "build dir"
+
+ - name: Configure
+ working-directory: build dir
+ shell: bash
+ run: >
+ cmake ..
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DPYTHON_EXECUTABLE=$(python -c "import sys; print(sys.executable)")
+
+ # Only build and test if this was manually triggered in the GitHub UI
+ - name: Build
+ working-directory: build dir
+ if: github.event_name == 'workflow_dispatch'
+ run: cmake --build . --config Release
+
+ - name: Test
+ working-directory: build dir
+ if: github.event_name == 'workflow_dispatch'
+ run: cmake --build . --config Release --target check
--- /dev/null
+# This is a format job. Pre-commit has a first-party GitHub action, so we use
+# that: https://github.com/pre-commit/action
+
+name: Format
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - master
+ - stable
+ - "v*"
+
+jobs:
+ pre-commit:
+ name: Format
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - uses: pre-commit/action@v2.0.0
+ with:
+ # Slow hooks are marked with manual - slow is okay here, run them too
+ extra_args: --hook-stage manual --all-files
+
+ clang-tidy:
+ name: Clang-Tidy
+ runs-on: ubuntu-latest
+ container: silkeh/clang:10
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Install requirements
+ run: apt-get update && apt-get install -y python3-dev python3-pytest
+
+ - name: Configure
+ run: >
+ cmake -S . -B build
+ -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);--warnings-as-errors=*"
+ -DDOWNLOAD_EIGEN=ON
+ -DDOWNLOAD_CATCH=ON
+ -DCMAKE_CXX_STANDARD=17
+
+ - name: Build
+ run: cmake --build build -j 2
--- /dev/null
+name: Labeler
+on:
+ pull_request_target:
+ types: [closed]
+
+jobs:
+ label:
+ name: Labeler
+ runs-on: ubuntu-latest
+ steps:
+
+ - uses: actions/labeler@main
+ if: github.event.pull_request.merged == true
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ configuration-path: .github/labeler_merged.yml
--- /dev/null
+name: Pip
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - master
+ - stable
+ - v*
+ release:
+ types:
+ - published
+
+jobs:
+ # This builds the sdists and wheels and makes sure the files are exactly as
+ # expected. Using Windows and Python 2.7, since that is often the most
+ # challenging matrix element.
+ test-packaging:
+ name: 🐍 2.7 • 📦 tests • windows-latest
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup 🐍 2.7
+ uses: actions/setup-python@v2
+ with:
+ python-version: 2.7
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt --prefer-binary
+
+ - name: Python Packaging tests
+ run: pytest tests/extra_python_package/
+
+
+ # This runs the packaging tests and also builds and saves the packages as
+ # artifacts.
+ packaging:
+ name: 🐍 3.8 • 📦 & 📦 tests • ubuntu-latest
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup 🐍 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Prepare env
+ run: python -m pip install -r tests/requirements.txt build twine --prefer-binary
+
+ - name: Python Packaging tests
+ run: pytest tests/extra_python_package/
+
+ - name: Build SDist and wheels
+ run: |
+ python -m build
+ PYBIND11_GLOBAL_SDIST=1 python -m build
+
+ - name: Check metadata
+ run: twine check dist/*
+
+ - name: Save standard package
+ uses: actions/upload-artifact@v2
+ with:
+ name: standard
+ path: dist/pybind11-*
+
+ - name: Save global package
+ uses: actions/upload-artifact@v2
+ with:
+ name: global
+ path: dist/pybind11_global-*
+
+
+
+ # When a GitHub release is made, upload the artifacts to PyPI
+ upload:
+ name: Upload to PyPI
+ runs-on: ubuntu-latest
+ if: github.event_name == 'release' && github.event.action == 'published'
+ needs: [packaging]
+
+ steps:
+ - uses: actions/setup-python@v2
+
+ # Downloads all to directories matching the artifact names
+ - uses: actions/download-artifact@v2
+
+ - name: Publish standard package
+ uses: pypa/gh-action-pypi-publish@v1.4.1
+ with:
+ password: ${{ secrets.pypi_password }}
+ packages_dir: standard/
+
+ - name: Publish global package
+ uses: pypa/gh-action-pypi-publish@v1.4.1
+ with:
+ password: ${{ secrets.pypi_password_global }}
+ packages_dir: global/
CMakeFiles
Makefile
cmake_install.cmake
+cmake_uninstall.cmake
.DS_Store
*.so
*.pyd
*.sdf
*.opensdf
*.vcxproj
+*.vcxproj.user
*.filters
example.dir
Win32
.*.swp
.DS_Store
/dist
-/build
-/cmake/
+/*build*
.cache/
sosize-*.txt
pybind11Config*.cmake
pybind11Targets.cmake
+/*env*
+/.vscode
+/pybind11/include/*
+/pybind11/share/*
+++ /dev/null
-[submodule "tools/clang"]
- path = tools/clang
- url = ../../wjakob/clang-cindex-python3
--- /dev/null
+# To use:
+#
+# pre-commit run -a
+#
+# Or:
+#
+# pre-commit install # (runs every time you commit in git)
+#
+# To update this file:
+#
+# pre-commit autoupdate
+#
+# See https://github.com/pre-commit/pre-commit
+
+repos:
+# Standard hooks
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.4.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-yaml
+ - id: debug-statements
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ - id: requirements-txt-fixer
+ - id: trailing-whitespace
+ - id: fix-encoding-pragma
+
+# Black, the code formatter, natively supports pre-commit
+- repo: https://github.com/psf/black
+ rev: 20.8b1
+ hooks:
+ - id: black
+ # By default, this ignores pyi files, though black supports them
+ types: [text]
+ files: \.pyi?$
+
+# Changes tabs to spaces
+- repo: https://github.com/Lucas-C/pre-commit-hooks
+ rev: v1.1.9
+ hooks:
+ - id: remove-tabs
+
+# Flake8 also supports pre-commit natively (same author)
+- repo: https://gitlab.com/pycqa/flake8
+ rev: 3.8.4
+ hooks:
+ - id: flake8
+ additional_dependencies: [flake8-bugbear, pep8-naming]
+ exclude: ^(docs/.*|tools/.*)$
+
+# CMake formatting
+- repo: https://github.com/cheshirekow/cmake-format-precommit
+ rev: v0.6.13
+ hooks:
+ - id: cmake-format
+ additional_dependencies: [pyyaml]
+ types: [file]
+ files: (\.cmake|CMakeLists.txt)(.in)?$
+
+# Check static types with mypy
+- repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v0.800
+ hooks:
+ - id: mypy
+ # The default Python type ignores .pyi files, so let's rerun if detected
+ types: [text]
+ files: ^pybind11.*\.pyi?$
+ # Running per-file misbehaves a bit, so just run on all files, it's fast
+ pass_filenames: false
+
+# Checks the manifest for missing files (native support)
+- repo: https://github.com/mgedmin/check-manifest
+ rev: "0.46"
+ hooks:
+ - id: check-manifest
+ # This is a slow hook, so only run this if --hook-stage manual is passed
+ stages: [manual]
+ additional_dependencies: [cmake, ninja]
+
+# The original pybind11 checks for a few C++ style items
+- repo: local
+ hooks:
+ - id: disallow-caps
+ name: Disallow improper capitalization
+ language: pygrep
+ entry: PyBind|Numpy|Cmake|CCache
+ exclude: .pre-commit-config.yaml
+
+- repo: local
+ hooks:
+ - id: check-style
+ name: Classic check-style
+ language: system
+ types:
+ - c++
+ entry: ./tools/check-style.sh
+++ /dev/null
-language: cpp
-matrix:
- include:
- # This config does a few things:
- # - Checks C++ and Python code styles (check-style.sh and flake8).
- # - Makes sure sphinx can build the docs without any errors or warnings.
- # - Tests setup.py sdist and install (all header files should be present).
- # - Makes sure that everything still works without optional deps (numpy/scipy/eigen) and
- # also tests the automatic discovery functions in CMake (Python version, C++ standard).
- - os: linux
- dist: xenial # Necessary to run doxygen 1.8.15
- name: Style, docs, and pip
- cache: false
- before_install:
- - pyenv global $(pyenv whence 2to3) # activate all python versions
- - PY_CMD=python3
- - $PY_CMD -m pip install --user --upgrade pip wheel setuptools
- install:
- - $PY_CMD -m pip install --user --upgrade sphinx sphinx_rtd_theme breathe flake8 pep8-naming pytest
- - curl -fsSL https://sourceforge.net/projects/doxygen/files/rel-1.8.15/doxygen-1.8.15.linux.bin.tar.gz/download | tar xz
- - export PATH="$PWD/doxygen-1.8.15/bin:$PATH"
- script:
- - tools/check-style.sh
- - flake8
- - $PY_CMD -m sphinx -W -b html docs docs/.build
- - |
- # Make sure setup.py distributes and installs all the headers
- $PY_CMD setup.py sdist
- $PY_CMD -m pip install --user -U ./dist/*
- installed=$($PY_CMD -c "import pybind11; print(pybind11.get_include(True) + '/pybind11')")
- diff -rq $installed ./include/pybind11
- - |
- # Barebones build
- cmake -DCMAKE_BUILD_TYPE=Debug -DPYBIND11_WERROR=ON -DDOWNLOAD_CATCH=ON -DPYTHON_EXECUTABLE=$(which $PY_CMD) .
- make pytest -j 2
- make cpptest -j 2
- # The following are regular test configurations, including optional dependencies.
- # With regard to each other they differ in Python version, C++ standard and compiler.
- - os: linux
- dist: trusty
- name: Python 2.7, c++11, gcc 4.8
- env: PYTHON=2.7 CPP=11 GCC=4.8
- addons:
- apt:
- packages:
- - cmake=2.\*
- - cmake-data=2.\*
- - os: linux
- dist: trusty
- name: Python 3.6, c++11, gcc 4.8
- env: PYTHON=3.6 CPP=11 GCC=4.8
- addons:
- apt:
- sources:
- - deadsnakes
- packages:
- - python3.6-dev
- - python3.6-venv
- - cmake=2.\*
- - cmake-data=2.\*
- - os: linux
- dist: trusty
- env: PYTHON=2.7 CPP=14 GCC=6 CMAKE=1
- name: Python 2.7, c++14, gcc 4.8, CMake test
- addons:
- apt:
- sources:
- - ubuntu-toolchain-r-test
- packages:
- - g++-6
- - os: linux
- dist: trusty
- name: Python 3.5, c++14, gcc 6, Debug build
- # N.B. `ensurepip` could be installed transitively by `python3.5-venv`, but
- # seems to have apt conflicts (at least for Trusty). Use Docker instead.
- services: docker
- env: DOCKER=debian:stretch PYTHON=3.5 CPP=14 GCC=6 DEBUG=1
- - os: linux
- dist: xenial
- env: PYTHON=3.6 CPP=17 GCC=7
- name: Python 3.6, c++17, gcc 7
- addons:
- apt:
- sources:
- - deadsnakes
- - ubuntu-toolchain-r-test
- packages:
- - g++-7
- - python3.6-dev
- - python3.6-venv
- - os: linux
- dist: xenial
- env: PYTHON=3.6 CPP=17 CLANG=7
- name: Python 3.6, c++17, Clang 7
- addons:
- apt:
- sources:
- - deadsnakes
- - llvm-toolchain-xenial-7
- packages:
- - python3.6-dev
- - python3.6-venv
- - clang-7
- - libclang-7-dev
- - llvm-7-dev
- - lld-7
- - libc++-7-dev
- - libc++abi-7-dev # Why is this necessary???
- - os: osx
- name: Python 2.7, c++14, AppleClang 7.3, CMake test
- osx_image: xcode7.3
- env: PYTHON=2.7 CPP=14 CLANG CMAKE=1
- - os: osx
- name: Python 3.7, c++14, AppleClang 9, Debug build
- osx_image: xcode9
- env: PYTHON=3.7 CPP=14 CLANG DEBUG=1
- # Test a PyPy 2.7 build
- - os: linux
- dist: trusty
- env: PYPY=5.8 PYTHON=2.7 CPP=11 GCC=4.8
- name: PyPy 5.8, Python 2.7, c++11, gcc 4.8
- addons:
- apt:
- packages:
- - libblas-dev
- - liblapack-dev
- - gfortran
- # Build in 32-bit mode and tests against the CMake-installed version
- - os: linux
- dist: trusty
- services: docker
- env: DOCKER=i386/debian:stretch PYTHON=3.5 CPP=14 GCC=6 INSTALL=1
- name: Python 3.4, c++14, gcc 6, 32-bit
- script:
- - |
- # Consolidated 32-bit Docker Build + Install
- set -ex
- $SCRIPT_RUN_PREFIX sh -c "
- set -ex
- cmake ${CMAKE_EXTRA_ARGS} -DPYBIND11_INSTALL=1 -DPYBIND11_TEST=0 .
- make install
- cp -a tests /pybind11-tests
- mkdir /build-tests && cd /build-tests
- cmake ../pybind11-tests ${CMAKE_EXTRA_ARGS} -DPYBIND11_WERROR=ON
- make pytest -j 2"
- set +ex
-cache:
- directories:
- - $HOME/.local/bin
- - $HOME/.local/lib
- - $HOME/.local/include
- - $HOME/Library/Python
-before_install:
-- |
- # Configure build variables
- set -ex
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
- if [ -n "$CLANG" ]; then
- export CXX=clang++-$CLANG CC=clang-$CLANG
- EXTRA_PACKAGES+=" clang-$CLANG llvm-$CLANG-dev"
- else
- if [ -z "$GCC" ]; then GCC=4.8
- else EXTRA_PACKAGES+=" g++-$GCC"
- fi
- export CXX=g++-$GCC CC=gcc-$GCC
- fi
- elif [ "$TRAVIS_OS_NAME" = "osx" ]; then
- export CXX=clang++ CC=clang;
- fi
- if [ -n "$CPP" ]; then CPP=-std=c++$CPP; fi
- if [ "${PYTHON:0:1}" = "3" ]; then PY=3; fi
- if [ -n "$DEBUG" ]; then CMAKE_EXTRA_ARGS+=" -DCMAKE_BUILD_TYPE=Debug"; fi
- set +ex
-- |
- # Initialize environment
- set -ex
- if [ -n "$DOCKER" ]; then
- docker pull $DOCKER
-
- containerid=$(docker run --detach --tty \
- --volume="$PWD":/pybind11 --workdir=/pybind11 \
- --env="CC=$CC" --env="CXX=$CXX" --env="DEBIAN_FRONTEND=$DEBIAN_FRONTEND" \
- --env=GCC_COLORS=\ \
- $DOCKER)
- SCRIPT_RUN_PREFIX="docker exec --tty $containerid"
- $SCRIPT_RUN_PREFIX sh -c 'for s in 0 15; do sleep $s; apt-get update && apt-get -qy dist-upgrade && break; done'
- else
- if [ "$PYPY" = "5.8" ]; then
- curl -fSL https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.8.0-linux64.tar.bz2 | tar xj
- PY_CMD=$(echo `pwd`/pypy2-v5.8.0-linux64/bin/pypy)
- CMAKE_EXTRA_ARGS+=" -DPYTHON_EXECUTABLE:FILEPATH=$PY_CMD"
- else
- PY_CMD=python$PYTHON
- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
- if [ "$PY" = "3" ]; then
- brew update && brew upgrade python
- else
- curl -fsSL https://bootstrap.pypa.io/get-pip.py | $PY_CMD - --user
- fi
- fi
- fi
- if [ "$PY" = 3 ] || [ -n "$PYPY" ]; then
- $PY_CMD -m ensurepip --user
- fi
- $PY_CMD --version
- $PY_CMD -m pip install --user --upgrade pip wheel
- fi
- set +ex
-install:
-- |
- # Install dependencies
- set -ex
- cmake --version
- if [ -n "$DOCKER" ]; then
- if [ -n "$DEBUG" ]; then
- PY_DEBUG="python$PYTHON-dbg python$PY-scipy-dbg"
- CMAKE_EXTRA_ARGS+=" -DPYTHON_EXECUTABLE=/usr/bin/python${PYTHON}dm"
- fi
- $SCRIPT_RUN_PREFIX sh -c "for s in 0 15; do sleep \$s; \
- apt-get -qy --no-install-recommends install \
- $PY_DEBUG python$PYTHON-dev python$PY-pytest python$PY-scipy \
- libeigen3-dev libboost-dev cmake make ${EXTRA_PACKAGES} && break; done"
- else
-
- if [ "$CLANG" = "7" ]; then
- export CXXFLAGS="-stdlib=libc++"
- fi
-
- export NPY_NUM_BUILD_JOBS=2
- echo "Installing pytest, numpy, scipy..."
- local PIP_CMD=""
- if [ -n $PYPY ]; then
- # For expediency, install only versions that are available on the extra index.
- travis_wait 30 \
- $PY_CMD -m pip install --user --upgrade --extra-index-url https://imaginary.ca/trusty-pypi \
- pytest numpy==1.15.4 scipy==1.2.0
- else
- $PY_CMD -m pip install --user --upgrade pytest numpy scipy
- fi
- echo "done."
-
- mkdir eigen
- curl -fsSL https://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 | \
- tar --extract -j --directory=eigen --strip-components=1
- export CMAKE_INCLUDE_PATH="${CMAKE_INCLUDE_PATH:+$CMAKE_INCLUDE_PATH:}$PWD/eigen"
- fi
- set +ex
-script:
-- |
- # CMake Configuration
- set -ex
- $SCRIPT_RUN_PREFIX cmake ${CMAKE_EXTRA_ARGS} \
- -DPYBIND11_PYTHON_VERSION=$PYTHON \
- -DPYBIND11_CPP_STANDARD=$CPP \
- -DPYBIND11_WERROR=${WERROR:-ON} \
- -DDOWNLOAD_CATCH=${DOWNLOAD_CATCH:-ON} \
- .
- set +ex
-- |
- # pytest
- set -ex
- $SCRIPT_RUN_PREFIX make pytest -j 2 VERBOSE=1
- set +ex
-- |
- # cpptest
- set -ex
- $SCRIPT_RUN_PREFIX make cpptest -j 2
- set +ex
-- |
- # CMake Build Interface
- set -ex
- if [ -n "$CMAKE" ]; then $SCRIPT_RUN_PREFIX make test_cmake_build; fi
- set +ex
-after_failure: cat tests/test_cmake_build/*.log*
-after_script:
-- |
- # Cleanup (Docker)
- set -ex
- if [ -n "$DOCKER" ]; then docker stop "$containerid"; docker rm "$containerid"; fi
- set +ex
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
-cmake_minimum_required(VERSION 2.8.12)
+cmake_minimum_required(VERSION 3.4)
-if (POLICY CMP0048)
- # cmake warns if loaded from a min-3.0-required parent dir, so silence the warning:
- cmake_policy(SET CMP0048 NEW)
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
endif()
-# CMake versions < 3.4.0 do not support try_compile/pthread checks without C as active language.
-if(CMAKE_VERSION VERSION_LESS 3.4.0)
- project(pybind11)
-else()
- project(pybind11 CXX)
+# Extract project version from source
+file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/pybind11/detail/common.h"
+ pybind11_version_defines REGEX "#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) ")
+
+foreach(ver ${pybind11_version_defines})
+ if(ver MATCHES [[#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$]])
+ set(PYBIND11_VERSION_${CMAKE_MATCH_1} "${CMAKE_MATCH_2}")
+ endif()
+endforeach()
+
+if(PYBIND11_VERSION_PATCH MATCHES [[\.([a-zA-Z0-9]+)$]])
+ set(pybind11_VERSION_TYPE "${CMAKE_MATCH_1}")
+endif()
+string(REGEX MATCH "^[0-9]+" PYBIND11_VERSION_PATCH "${PYBIND11_VERSION_PATCH}")
+
+project(
+ pybind11
+ LANGUAGES CXX
+ VERSION "${PYBIND11_VERSION_MAJOR}.${PYBIND11_VERSION_MINOR}.${PYBIND11_VERSION_PATCH}")
+
+# Standard includes
+include(GNUInstallDirs)
+include(CMakePackageConfigHelpers)
+include(CMakeDependentOption)
+
+if(NOT pybind11_FIND_QUIETLY)
+ message(STATUS "pybind11 v${pybind11_VERSION} ${pybind11_VERSION_TYPE}")
+endif()
+
+# Avoid infinite recursion if tests include this as a subdirectory
+if(DEFINED PYBIND11_MASTER_PROJECT)
+ set(PYBIND11_TEST OFF)
endif()
# Check if pybind11 is being used directly or via add_subdirectory
-set(PYBIND11_MASTER_PROJECT OFF)
-if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
+if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR AND NOT DEFINED PYBIND11_MASTER_PROJECT)
+ ### Warn if not an out-of-source builds
+ if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
+ set(lines
+ "You are building in-place. If that is not what you intended to "
+ "do, you can clean the source directory with:\n"
+ "rm -r CMakeCache.txt CMakeFiles/ cmake_uninstall.cmake pybind11Config.cmake "
+ "pybind11ConfigVersion.cmake tests/CMakeFiles/\n")
+ message(AUTHOR_WARNING ${lines})
+ endif()
+
set(PYBIND11_MASTER_PROJECT ON)
+
+ if(OSX AND CMAKE_VERSION VERSION_LESS 3.7)
+ # Bug in macOS CMake < 3.7 is unable to download catch
+ message(WARNING "CMAKE 3.7+ needed on macOS to download catch, and newer HIGHLY recommended")
+ elseif(WINDOWS AND CMAKE_VERSION VERSION_LESS 3.8)
+ # Only tested with 3.8+ in CI.
+ message(WARNING "CMAKE 3.8+ tested on Windows, previous versions untested")
+ endif()
+
+ message(STATUS "CMake ${CMAKE_VERSION}")
+
+ if(CMAKE_CXX_STANDARD)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ endif()
+
+ set(pybind11_system "")
+else()
+ set(PYBIND11_MASTER_PROJECT OFF)
+ set(pybind11_system SYSTEM)
endif()
+# Options
option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT})
-option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT})
+option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT})
+option(PYBIND11_NOPYTHON "Disable search for Python" OFF)
-list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/tools")
+cmake_dependent_option(
+ USE_PYTHON_INCLUDE_DIR
+ "Install pybind11 headers in Python include directory instead of default installation prefix"
+ OFF "PYBIND11_INSTALL" OFF)
-include(pybind11Tools)
-
-# Cache variables so pybind11_add_module can be used in parent projects
-set(PYBIND11_INCLUDE_DIR "${CMAKE_CURRENT_LIST_DIR}/include" CACHE INTERNAL "")
-set(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS} CACHE INTERNAL "")
-set(PYTHON_LIBRARIES ${PYTHON_LIBRARIES} CACHE INTERNAL "")
-set(PYTHON_MODULE_PREFIX ${PYTHON_MODULE_PREFIX} CACHE INTERNAL "")
-set(PYTHON_MODULE_EXTENSION ${PYTHON_MODULE_EXTENSION} CACHE INTERNAL "")
-set(PYTHON_VERSION_MAJOR ${PYTHON_VERSION_MAJOR} CACHE INTERNAL "")
-set(PYTHON_VERSION_MINOR ${PYTHON_VERSION_MINOR} CACHE INTERNAL "")
+cmake_dependent_option(PYBIND11_FINDPYTHON "Force new FindPython" OFF
+ "NOT CMAKE_VERSION VERSION_LESS 3.12" OFF)
# NB: when adding a header don't forget to also add it to setup.py
set(PYBIND11_HEADERS
- include/pybind11/detail/class.h
- include/pybind11/detail/common.h
- include/pybind11/detail/descr.h
- include/pybind11/detail/init.h
- include/pybind11/detail/internals.h
- include/pybind11/detail/typeid.h
- include/pybind11/attr.h
- include/pybind11/buffer_info.h
- include/pybind11/cast.h
- include/pybind11/chrono.h
- include/pybind11/common.h
- include/pybind11/complex.h
- include/pybind11/options.h
- include/pybind11/eigen.h
- include/pybind11/embed.h
- include/pybind11/eval.h
- include/pybind11/functional.h
- include/pybind11/numpy.h
- include/pybind11/operators.h
- include/pybind11/pybind11.h
- include/pybind11/pytypes.h
- include/pybind11/stl.h
- include/pybind11/stl_bind.h
-)
-string(REPLACE "include/" "${CMAKE_CURRENT_SOURCE_DIR}/include/"
- PYBIND11_HEADERS "${PYBIND11_HEADERS}")
-
-if (PYBIND11_TEST)
- add_subdirectory(tests)
+ include/pybind11/detail/class.h
+ include/pybind11/detail/common.h
+ include/pybind11/detail/descr.h
+ include/pybind11/detail/init.h
+ include/pybind11/detail/internals.h
+ include/pybind11/detail/typeid.h
+ include/pybind11/attr.h
+ include/pybind11/buffer_info.h
+ include/pybind11/cast.h
+ include/pybind11/chrono.h
+ include/pybind11/common.h
+ include/pybind11/complex.h
+ include/pybind11/options.h
+ include/pybind11/eigen.h
+ include/pybind11/embed.h
+ include/pybind11/eval.h
+ include/pybind11/iostream.h
+ include/pybind11/functional.h
+ include/pybind11/numpy.h
+ include/pybind11/operators.h
+ include/pybind11/pybind11.h
+ include/pybind11/pytypes.h
+ include/pybind11/stl.h
+ include/pybind11/stl_bind.h)
+
+# Compare with grep and warn if mismatched
+if(PYBIND11_MASTER_PROJECT AND NOT CMAKE_VERSION VERSION_LESS 3.12)
+ file(
+ GLOB_RECURSE _pybind11_header_check
+ LIST_DIRECTORIES false
+ RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
+ CONFIGURE_DEPENDS "include/pybind11/*.h")
+ set(_pybind11_here_only ${PYBIND11_HEADERS})
+ set(_pybind11_disk_only ${_pybind11_header_check})
+ list(REMOVE_ITEM _pybind11_here_only ${_pybind11_header_check})
+ list(REMOVE_ITEM _pybind11_disk_only ${PYBIND11_HEADERS})
+ if(_pybind11_here_only)
+ message(AUTHOR_WARNING "PYBIND11_HEADERS has extra files:" ${_pybind11_here_only})
+ endif()
+ if(_pybind11_disk_only)
+ message(AUTHOR_WARNING "PYBIND11_HEADERS is missing files:" ${_pybind11_disk_only})
+ endif()
endif()
-include(GNUInstallDirs)
-include(CMakePackageConfigHelpers)
+# CMake 3.12 added list(TRANSFORM <list> PREPEND
+# But we can't use it yet
+string(REPLACE "include/" "${CMAKE_CURRENT_SOURCE_DIR}/include/" PYBIND11_HEADERS
+ "${PYBIND11_HEADERS}")
-# extract project version from source
-file(STRINGS "${PYBIND11_INCLUDE_DIR}/pybind11/detail/common.h" pybind11_version_defines
- REGEX "#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) ")
-foreach(ver ${pybind11_version_defines})
- if (ver MATCHES "#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$")
- set(PYBIND11_VERSION_${CMAKE_MATCH_1} "${CMAKE_MATCH_2}" CACHE INTERNAL "")
- endif()
-endforeach()
-set(${PROJECT_NAME}_VERSION ${PYBIND11_VERSION_MAJOR}.${PYBIND11_VERSION_MINOR}.${PYBIND11_VERSION_PATCH})
-message(STATUS "pybind11 v${${PROJECT_NAME}_VERSION}")
+# Cache variable so this can be used in parent projects
+set(pybind11_INCLUDE_DIR
+ "${CMAKE_CURRENT_LIST_DIR}/include"
+ CACHE INTERNAL "Directory where pybind11 headers are located")
+
+# Backward compatible variable for add_subdirectory mode
+if(NOT PYBIND11_MASTER_PROJECT)
+ set(PYBIND11_INCLUDE_DIR
+ "${pybind11_INCLUDE_DIR}"
+ CACHE INTERNAL "")
+endif()
+
+# Note: when creating targets, you cannot use if statements at configure time -
+# you need generator expressions, because those will be placed in the target file.
+# You can also place ifs *in* the Config.in, but not here.
-option (USE_PYTHON_INCLUDE_DIR "Install pybind11 headers in Python include directory instead of default installation prefix" OFF)
-if (USE_PYTHON_INCLUDE_DIR)
- file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${PYTHON_INCLUDE_DIRS})
+# This section builds targets, but does *not* touch Python
+# Non-IMPORT targets cannot be defined twice
+if(NOT TARGET pybind11_headers)
+ # Build the headers-only target (no Python included):
+ # (long name used here to keep this from clashing in subdirectory mode)
+ add_library(pybind11_headers INTERFACE)
+ add_library(pybind11::pybind11_headers ALIAS pybind11_headers) # to match exported target
+ add_library(pybind11::headers ALIAS pybind11_headers) # easier to use/remember
+
+ target_include_directories(
+ pybind11_headers ${pybind11_system} INTERFACE $<BUILD_INTERFACE:${pybind11_INCLUDE_DIR}>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
+
+ target_compile_features(pybind11_headers INTERFACE cxx_inheriting_constructors cxx_user_literals
+ cxx_right_angle_brackets)
+else()
+ # It is invalid to install a target twice, too.
+ set(PYBIND11_INSTALL OFF)
endif()
-if(NOT (CMAKE_VERSION VERSION_LESS 3.0)) # CMake >= 3.0
- # Build an interface library target:
- add_library(pybind11 INTERFACE)
- add_library(pybind11::pybind11 ALIAS pybind11) # to match exported target
- target_include_directories(pybind11 INTERFACE $<BUILD_INTERFACE:${PYBIND11_INCLUDE_DIR}>
- $<BUILD_INTERFACE:${PYTHON_INCLUDE_DIRS}>
- $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
- target_compile_options(pybind11 INTERFACE $<BUILD_INTERFACE:${PYBIND11_CPP_STANDARD}>)
-
- add_library(module INTERFACE)
- add_library(pybind11::module ALIAS module)
- if(NOT MSVC)
- target_compile_options(module INTERFACE -fvisibility=hidden)
+include("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11Common.cmake")
+
+# Relative directory setting
+if(USE_PYTHON_INCLUDE_DIR AND DEFINED Python_INCLUDE_DIRS)
+ file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${Python_INCLUDE_DIRS})
+elseif(USE_PYTHON_INCLUDE_DIR AND DEFINED PYTHON_INCLUDE_DIR)
+ file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${PYTHON_INCLUDE_DIRS})
+endif()
+
+if(PYBIND11_INSTALL)
+ install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+ set(PYBIND11_CMAKECONFIG_INSTALL_DIR
+ "${CMAKE_INSTALL_DATAROOTDIR}/cmake/${PROJECT_NAME}"
+ CACHE STRING "install path for pybind11Config.cmake")
+
+ configure_package_config_file(
+ tools/${PROJECT_NAME}Config.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
+ INSTALL_DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
+
+ if(CMAKE_VERSION VERSION_LESS 3.14)
+ # Remove CMAKE_SIZEOF_VOID_P from ConfigVersion.cmake since the library does
+ # not depend on architecture specific settings or libraries.
+ set(_PYBIND11_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})
+ unset(CMAKE_SIZEOF_VOID_P)
+
+ write_basic_package_version_file(
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+ VERSION ${PROJECT_VERSION}
+ COMPATIBILITY AnyNewerVersion)
+
+ set(CMAKE_SIZEOF_VOID_P ${_PYBIND11_CMAKE_SIZEOF_VOID_P})
+ else()
+ # CMake 3.14+ natively supports header-only libraries
+ write_basic_package_version_file(
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+ VERSION ${PROJECT_VERSION}
+ COMPATIBILITY AnyNewerVersion ARCH_INDEPENDENT)
endif()
- target_link_libraries(module INTERFACE pybind11::pybind11)
- if(WIN32 OR CYGWIN)
- target_link_libraries(module INTERFACE $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)
- elseif(APPLE)
- target_link_libraries(module INTERFACE "-undefined dynamic_lookup")
+
+ install(
+ FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+ tools/FindPythonLibsNew.cmake
+ tools/pybind11Common.cmake
+ tools/pybind11Tools.cmake
+ tools/pybind11NewTools.cmake
+ DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
+
+ if(NOT PYBIND11_EXPORT_NAME)
+ set(PYBIND11_EXPORT_NAME "${PROJECT_NAME}Targets")
endif()
- add_library(embed INTERFACE)
- add_library(pybind11::embed ALIAS embed)
- target_link_libraries(embed INTERFACE pybind11::pybind11 $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)
+ install(TARGETS pybind11_headers EXPORT "${PYBIND11_EXPORT_NAME}")
+
+ install(
+ EXPORT "${PYBIND11_EXPORT_NAME}"
+ NAMESPACE "pybind11::"
+ DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
+
+ # Uninstall target
+ if(PYBIND11_MASTER_PROJECT)
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tools/cmake_uninstall.cmake.in"
+ "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY)
+
+ add_custom_target(uninstall COMMAND ${CMAKE_COMMAND} -P
+ ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
+ endif()
endif()
-if (PYBIND11_INSTALL)
- install(DIRECTORY ${PYBIND11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
- # GNUInstallDirs "DATADIR" wrong here; CMake search path wants "share".
- set(PYBIND11_CMAKECONFIG_INSTALL_DIR "share/cmake/${PROJECT_NAME}" CACHE STRING "install path for pybind11Config.cmake")
-
- configure_package_config_file(tools/${PROJECT_NAME}Config.cmake.in
- "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
- INSTALL_DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
- # Remove CMAKE_SIZEOF_VOID_P from ConfigVersion.cmake since the library does
- # not depend on architecture specific settings or libraries.
- set(_PYBIND11_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})
- unset(CMAKE_SIZEOF_VOID_P)
- write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
- VERSION ${${PROJECT_NAME}_VERSION}
- COMPATIBILITY AnyNewerVersion)
- set(CMAKE_SIZEOF_VOID_P ${_PYBIND11_CMAKE_SIZEOF_VOID_P})
- install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
- ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
- tools/FindPythonLibsNew.cmake
- tools/pybind11Tools.cmake
- DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
-
- if(NOT (CMAKE_VERSION VERSION_LESS 3.0))
- if(NOT PYBIND11_EXPORT_NAME)
- set(PYBIND11_EXPORT_NAME "${PROJECT_NAME}Targets")
+# BUILD_TESTING takes priority, but only if this is the master project
+if(PYBIND11_MASTER_PROJECT AND DEFINED BUILD_TESTING)
+ if(BUILD_TESTING)
+ if(_pybind11_nopython)
+ message(FATAL_ERROR "Cannot activate tests in NOPYTHON mode")
+ else()
+ add_subdirectory(tests)
endif()
-
- install(TARGETS pybind11 module embed
- EXPORT "${PYBIND11_EXPORT_NAME}")
- if(PYBIND11_MASTER_PROJECT)
- install(EXPORT "${PYBIND11_EXPORT_NAME}"
- NAMESPACE "${PROJECT_NAME}::"
- DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
+ endif()
+else()
+ if(PYBIND11_TEST)
+ if(_pybind11_nopython)
+ message(FATAL_ERROR "Cannot activate tests in NOPYTHON mode")
+ else()
+ add_subdirectory(tests)
endif()
endif()
endif()
+
+# Better symmetry with find_package(pybind11 CONFIG) mode.
+if(NOT PYBIND11_MASTER_PROJECT)
+ set(pybind11_FOUND
+ TRUE
+ CACHE INTERNAL "True if pybind11 and all required components found on the system")
+endif()
+++ /dev/null
-Thank you for your interest in this project! Please refer to the following
-sections on how to contribute code and bug reports.
-
-### Reporting bugs
-
-At the moment, this project is run in the spare time of a single person
-([Wenzel Jakob](http://rgl.epfl.ch/people/wjakob)) with very limited resources
-for issue tracker tickets. Thus, before submitting a question or bug report,
-please take a moment of your time and ensure that your issue isn't already
-discussed in the project documentation provided at
-[http://pybind11.readthedocs.org/en/latest](http://pybind11.readthedocs.org/en/latest).
-
-Assuming that you have identified a previously unknown problem or an important
-question, it's essential that you submit a self-contained and minimal piece of
-code that reproduces the problem. In other words: no external dependencies,
-isolate the function(s) that cause breakage, submit matched and complete C++
-and Python snippets that can be easily compiled and run on my end.
-
-## Pull requests
-Contributions are submitted, reviewed, and accepted using Github pull requests.
-Please refer to [this
-article](https://help.github.com/articles/using-pull-requests) for details and
-adhere to the following rules to make the process as smooth as possible:
-
-* Make a new branch for every feature you're working on.
-* Make small and clean pull requests that are easy to review but make sure they
- do add value by themselves.
-* Add tests for any new functionality and run the test suite (``make pytest``)
- to ensure that no existing features break.
-* Please run ``flake8`` and ``tools/check-style.sh`` to check your code matches
- the project style. (Note that ``check-style.sh`` requires ``gawk``.)
-* This project has a strong focus on providing general solutions using a
- minimal amount of code, thus small pull requests are greatly preferred.
-
-### Licensing of contributions
-
-pybind11 is provided under a BSD-style license that can be found in the
-``LICENSE`` file. By using, distributing, or contributing to this project, you
-agree to the terms and conditions of this license.
-
-You are under no obligation whatsoever to provide any bug fixes, patches, or
-upgrades to the features, functionality or performance of the source code
-("Enhancements") to anyone; however, if you choose to make your Enhancements
-available either publicly, or directly to the author of this software, without
-imposing a separate written license agreement for such Enhancements, then you
-hereby grant the following license: a non-exclusive, royalty-free perpetual
-license to install, use, modify, prepare derivative works, incorporate into
-other computer software, distribute, and sublicense such enhancements or
-derivative works thereof, in binary and source code form.
+++ /dev/null
-Make sure you've completed the following steps before submitting your issue -- thank you!
-
-1. Check if your question has already been answered in the [FAQ](http://pybind11.readthedocs.io/en/latest/faq.html) section.
-2. Make sure you've read the [documentation](http://pybind11.readthedocs.io/en/latest/). Your issue may be addressed there.
-3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room](https://gitter.im/pybind/Lobby).
-4. If you have a genuine bug report or a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.
-5. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible.
-
-*After reading, remove this checklist and the template text in parentheses below.*
-
-## Issue description
-
-(Provide a short description, state the expected behavior and what actually happens.)
-
-## Reproducible example code
-
-(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.)
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Please also refer to the file CONTRIBUTING.md, which clarifies licensing of
+Please also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of
external contributions to this project including patches, pull requests, etc.
-recursive-include include/pybind11 *.h
-include LICENSE README.md CONTRIBUTING.md
+recursive-include pybind11/include/pybind11 *.h
+recursive-include pybind11 *.py
+recursive-include pybind11 py.typed
+recursive-include pybind11 *.pyi
+include pybind11/share/cmake/pybind11/*.cmake
+include LICENSE README.rst pyproject.toml setup.py setup.cfg
+++ /dev/null
-![pybind11 logo](https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png)
-
-# pybind11 — Seamless operability between C++11 and Python
-
-[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=master)](http://pybind11.readthedocs.org/en/master/?badge=master)
-[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=stable)](http://pybind11.readthedocs.org/en/stable/?badge=stable)
-[![Gitter chat](https://img.shields.io/gitter/room/gitterHQ/gitter.svg)](https://gitter.im/pybind/Lobby)
-[![Build Status](https://travis-ci.org/pybind/pybind11.svg?branch=master)](https://travis-ci.org/pybind/pybind11)
-[![Build status](https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true)](https://ci.appveyor.com/project/wjakob/pybind11)
-
-**pybind11** is a lightweight header-only library that exposes C++ types in Python
-and vice versa, mainly to create Python bindings of existing C++ code. Its
-goals and syntax are similar to the excellent
-[Boost.Python](http://www.boost.org/doc/libs/1_58_0/libs/python/doc/) library
-by David Abrahams: to minimize boilerplate code in traditional extension
-modules by inferring type information using compile-time introspection.
-
-The main issue with Boost.Python—and the reason for creating such a similar
-project—is Boost. Boost is an enormously large and complex suite of utility
-libraries that works with almost every C++ compiler in existence. This
-compatibility has its cost: arcane template tricks and workarounds are
-necessary to support the oldest and buggiest of compiler specimens. Now that
-C++11-compatible compilers are widely available, this heavy machinery has
-become an excessively large and unnecessary dependency.
-
-Think of this library as a tiny self-contained version of Boost.Python with
-everything stripped away that isn't relevant for binding generation. Without
-comments, the core header files only require ~4K lines of code and depend on
-Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
-compact implementation was possible thanks to some of the new C++11 language
-features (specifically: tuples, lambda functions and variadic templates). Since
-its creation, this library has grown beyond Boost.Python in many ways, leading
-to dramatically simpler binding code in many common situations.
-
-Tutorial and reference documentation is provided at
-[http://pybind11.readthedocs.org/en/master](http://pybind11.readthedocs.org/en/master).
-A PDF version of the manual is available
-[here](https://media.readthedocs.org/pdf/pybind11/master/pybind11.pdf).
-
-## Core features
-pybind11 can map the following core C++ features to Python
-
-- Functions accepting and returning custom data structures per value, reference, or pointer
-- Instance methods and static methods
-- Overloaded functions
-- Instance attributes and static attributes
-- Arbitrary exception types
-- Enumerations
-- Callbacks
-- Iterators and ranges
-- Custom operators
-- Single and multiple inheritance
-- STL data structures
-- Smart pointers with reference counting like ``std::shared_ptr``
-- Internal references with correct reference counting
-- C++ classes with virtual (and pure virtual) methods can be extended in Python
-
-## Goodies
-In addition to the core functionality, pybind11 provides some extra goodies:
-
-- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an
- implementation-agnostic interface.
-
-- It is possible to bind C++11 lambda functions with captured variables. The
- lambda capture data is stored inside the resulting Python function object.
-
-- pybind11 uses C++11 move constructors and move assignment operators whenever
- possible to efficiently transfer custom data types.
-
-- It's easy to expose the internal storage of custom data types through
- Pythons' buffer protocols. This is handy e.g. for fast conversion between
- C++ matrix classes like Eigen and NumPy without expensive copy operations.
-
-- pybind11 can automatically vectorize functions so that they are transparently
- applied to all entries of one or more NumPy array arguments.
-
-- Python's slice-based access and assignment operations can be supported with
- just a few lines of code.
-
-- Everything is contained in just a few header files; there is no need to link
- against any additional libraries.
-
-- Binaries are generally smaller by a factor of at least 2 compared to
- equivalent bindings generated by Boost.Python. A recent pybind11 conversion
- of PyRosetta, an enormous Boost.Python binding project,
- [reported](http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf) a binary
- size reduction of **5.4x** and compile time reduction by **5.8x**.
-
-- Function signatures are precomputed at compile time (using ``constexpr``),
- leading to smaller binaries.
-
-- With little extra effort, C++ types can be pickled and unpickled similar to
- regular Python objects.
-
-## Supported compilers
-
-1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or newer)
-2. GCC 4.8 or newer
-3. Microsoft Visual Studio 2015 Update 3 or newer
-4. Intel C++ compiler 17 or newer (16 with pybind11 v2.0 and 15 with pybind11 v2.0 and a [workaround](https://github.com/pybind/pybind11/issues/276))
-5. Cygwin/GCC (tested on 2.5.1)
-
-## About
-
-This project was created by [Wenzel Jakob](http://rgl.epfl.ch/people/wjakob).
-Significant features and/or improvements to the code were contributed by
-Jonas Adler,
-Lori A. Burns,
-Sylvain Corlay,
-Trent Houliston,
-Axel Huebl,
-@hulucc,
-Sergey Lyskov
-Johan Mabille,
-Tomasz Miąsko,
-Dean Moldovan,
-Ben Pritchard,
-Jason Rhinelander,
-Boris Schäling,
-Pim Schellart,
-Henry Schreiner,
-Ivan Smirnov, and
-Patrick Stewart.
-
-### License
-
-pybind11 is provided under a BSD-style license that can be found in the
-``LICENSE`` file. By using, distributing, or contributing to this project,
-you agree to the terms and conditions of this license.
--- /dev/null
+.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png
+ :alt: pybind11 logo
+
+**pybind11 — Seamless operability between C++11 and Python**
+
+|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |CI| |Build status|
+
+|Repology| |PyPI package| |Conda-forge| |Python Versions|
+
+`Setuptools example <https://github.com/pybind/python_example>`_
+• `Scikit-build example <https://github.com/pybind/scikit_build_example>`_
+• `CMake example <https://github.com/pybind/cmake_example>`_
+
+.. start
+
+.. warning::
+
+ Combining older versions of pybind11 (< 2.6.0) with Python 3.9.0 will
+ trigger undefined behavior that typically manifests as crashes during
+ interpreter shutdown (but could also destroy your data. **You have been
+ warned.**)
+
+ We recommend that you update to the latest patch release of Python (3.9.1),
+ which includes a `fix <https://github.com/python/cpython/pull/22670>`_
+ that resolves this problem. If you do use Python 3.9.0, please update to
+ the latest version of pybind11 (2.6.0 or newer), which includes a temporary
+ workaround specifically when Python 3.9.0 is detected at runtime.
+
+
+**pybind11** is a lightweight header-only library that exposes C++ types
+in Python and vice versa, mainly to create Python bindings of existing
+C++ code. Its goals and syntax are similar to the excellent
+`Boost.Python <http://www.boost.org/doc/libs/1_58_0/libs/python/doc/>`_
+library by David Abrahams: to minimize boilerplate code in traditional
+extension modules by inferring type information using compile-time
+introspection.
+
+The main issue with Boost.Python—and the reason for creating such a
+similar project—is Boost. Boost is an enormously large and complex suite
+of utility libraries that works with almost every C++ compiler in
+existence. This compatibility has its cost: arcane template tricks and
+workarounds are necessary to support the oldest and buggiest of compiler
+specimens. Now that C++11-compatible compilers are widely available,
+this heavy machinery has become an excessively large and unnecessary
+dependency.
+
+Think of this library as a tiny self-contained version of Boost.Python
+with everything stripped away that isn’t relevant for binding
+generation. Without comments, the core header files only require ~4K
+lines of code and depend on Python (2.7 or 3.5+, or PyPy) and the C++
+standard library. This compact implementation was possible thanks to
+some of the new C++11 language features (specifically: tuples, lambda
+functions and variadic templates). Since its creation, this library has
+grown beyond Boost.Python in many ways, leading to dramatically simpler
+binding code in many common situations.
+
+Tutorial and reference documentation is provided at
+`pybind11.readthedocs.io <https://pybind11.readthedocs.io/en/latest>`_.
+A PDF version of the manual is available
+`here <https://pybind11.readthedocs.io/_/downloads/en/latest/pdf/>`_.
+And the source code is always available at
+`github.com/pybind/pybind11 <https://github.com/pybind/pybind11>`_.
+
+
+Core features
+-------------
+
+
+pybind11 can map the following core C++ features to Python:
+
+- Functions accepting and returning custom data structures per value,
+ reference, or pointer
+- Instance methods and static methods
+- Overloaded functions
+- Instance attributes and static attributes
+- Arbitrary exception types
+- Enumerations
+- Callbacks
+- Iterators and ranges
+- Custom operators
+- Single and multiple inheritance
+- STL data structures
+- Smart pointers with reference counting like ``std::shared_ptr``
+- Internal references with correct reference counting
+- C++ classes with virtual (and pure virtual) methods can be extended
+ in Python
+
+Goodies
+-------
+
+In addition to the core functionality, pybind11 provides some extra
+goodies:
+
+- Python 2.7, 3.5+, and PyPy/PyPy3 7.3 are supported with an
+ implementation-agnostic interface.
+
+- It is possible to bind C++11 lambda functions with captured
+ variables. The lambda capture data is stored inside the resulting
+ Python function object.
+
+- pybind11 uses C++11 move constructors and move assignment operators
+ whenever possible to efficiently transfer custom data types.
+
+- It’s easy to expose the internal storage of custom data types through
+ Pythons’ buffer protocols. This is handy e.g. for fast conversion
+ between C++ matrix classes like Eigen and NumPy without expensive
+ copy operations.
+
+- pybind11 can automatically vectorize functions so that they are
+ transparently applied to all entries of one or more NumPy array
+ arguments.
+
+- Python’s slice-based access and assignment operations can be
+ supported with just a few lines of code.
+
+- Everything is contained in just a few header files; there is no need
+ to link against any additional libraries.
+
+- Binaries are generally smaller by a factor of at least 2 compared to
+ equivalent bindings generated by Boost.Python. A recent pybind11
+ conversion of PyRosetta, an enormous Boost.Python binding project,
+ `reported <http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf>`_
+ a binary size reduction of **5.4x** and compile time reduction by
+ **5.8x**.
+
+- Function signatures are precomputed at compile time (using
+ ``constexpr``), leading to smaller binaries.
+
+- With little extra effort, C++ types can be pickled and unpickled
+ similar to regular Python objects.
+
+Supported compilers
+-------------------
+
+1. Clang/LLVM 3.3 or newer (for Apple Xcode’s clang, this is 5.0.0 or
+ newer)
+2. GCC 4.8 or newer
+3. Microsoft Visual Studio 2015 Update 3 or newer
+4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI)
+5. Cygwin/GCC (previously tested on 2.5.1)
+6. NVCC (CUDA 11.0 tested in CI)
+7. NVIDIA PGI (20.9 tested in CI)
+
+About
+-----
+
+This project was created by `Wenzel
+Jakob <http://rgl.epfl.ch/people/wjakob>`_. Significant features and/or
+improvements to the code were contributed by Jonas Adler, Lori A. Burns,
+Sylvain Corlay, Eric Cousineau, Ralf Grosse-Kunstleve, Trent Houliston, Axel
+Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko,
+Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim
+Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart.
+
+We thank Google for a generous financial contribution to the continuous
+integration infrastructure used by this project.
+
+
+Contributing
+~~~~~~~~~~~~
+
+See the `contributing
+guide <https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md>`_
+for information on building and contributing to pybind11.
+
+License
+~~~~~~~
+
+pybind11 is provided under a BSD-style license that can be found in the
+`LICENSE <https://github.com/pybind/pybind11/blob/master/LICENSE>`_
+file. By using, distributing, or contributing to this project, you agree
+to the terms and conditions of this license.
+
+.. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest
+ :target: http://pybind11.readthedocs.org/en/latest
+.. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg
+ :target: http://pybind11.readthedocs.org/en/stable
+.. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg
+ :target: https://gitter.im/pybind/Lobby
+.. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg
+ :target: https://github.com/pybind/pybind11/actions
+.. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true
+ :target: https://ci.appveyor.com/project/wjakob/pybind11
+.. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg
+ :target: https://pypi.org/project/pybind11/
+.. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg
+ :target: https://github.com/conda-forge/pybind11-feedstock
+.. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg
+ :target: https://repology.org/project/python:pybind11/versions
+.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg
+ :target: https://pypi.org/project/pybind11/
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = NO
+PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS \
+ PY_MAJOR_VERSION=3 \
+ PYBIND11_NOINLINE
from example import print
print(A())
-To register the necessary conversion routines, it is necessary to add
-a partial overload to the ``pybind11::detail::type_caster<T>`` template.
-Although this is an implementation detail, adding partial overloads to this
+To register the necessary conversion routines, it is necessary to add an
+instantiation of the ``pybind11::detail::type_caster<T>`` template.
+Although this is an implementation detail, adding an instantiation of this
type is explicitly allowed.
.. code-block:: cpp
Eigen and numpy have fundamentally different notions of a vector. In Eigen, a
vector is simply a matrix with the number of columns or rows set to 1 at
-compile time (for a column vector or row vector, respectively). Numpy, in
+compile time (for a column vector or row vector, respectively). NumPy, in
contrast, has comparable 2-dimensional 1xN and Nx1 arrays, but *also* has
1-dimensional arrays of size N.
+.. _type-conversions:
+
Type conversions
################
chrono
eigen
custom
-
====================
When including the additional header file :file:`pybind11/stl.h`, conversions
-between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``,
+between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``/``std::valarray<>``,
``std::set<>``/``std::unordered_set<>``, and
``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and
``dict`` data structures are automatically enabled. The types ``std::pair<>``
a ``name::visit()`` function. For any other function name, the specialization must be
included to tell pybind11 how to visit the variant.
+.. warning::
+
+ When converting a ``variant`` type, pybind11 follows the same rules as when
+ determining which function overload to call (:ref:`overload_resolution`), and
+ so the same caveats hold. In particular, the order in which the ``variant``'s
+ alternatives are listed is important, since pybind11 will try conversions in
+ this order. This means that, for example, when converting ``variant<int, bool>``,
+ the ``bool`` variant will never be selected, as any Python ``bool`` is already
+ an ``int`` and is convertible to a C++ ``int``. Changing the order of alternatives
+ (and using ``variant<bool, int>``, in this example) provides a solution.
+
.. note::
pybind11 only supports the modern implementation of ``boost::variant``
before any binding code (e.g. invocations to ``class_::def()``, etc.). This
macro must be specified at the top level (and outside of any namespaces), since
-it instantiates a partial template overload. If your binding code consists of
+it adds a template instantiation of ``type_caster``. If your binding code consists of
multiple compilation units, it must be present in every file (typically via a
common header) preceding any usage of ``std::vector<int>``. Opaque types must
also have a corresponding ``class_`` declaration to associate them with a name
/* Trampoline (need one for each virtual function) */
std::string go(int n_times) override {
- PYBIND11_OVERLOAD_PURE(
+ PYBIND11_OVERRIDE_PURE(
std::string, /* Return type */
Animal, /* Parent class */
go, /* Name of function in C++ (must match Python name) */
}
};
-The macro :c:macro:`PYBIND11_OVERLOAD_PURE` should be used for pure virtual
-functions, and :c:macro:`PYBIND11_OVERLOAD` should be used for functions which have
-a default implementation. There are also two alternate macros
-:c:macro:`PYBIND11_OVERLOAD_PURE_NAME` and :c:macro:`PYBIND11_OVERLOAD_NAME` which
+The macro :c:macro:`PYBIND11_OVERRIDE_PURE` should be used for pure virtual
+functions, and :c:macro:`PYBIND11_OVERRIDE` should be used for functions which have
+a default implementation. There are also two alternate macros
+:c:macro:`PYBIND11_OVERRIDE_PURE_NAME` and :c:macro:`PYBIND11_OVERRIDE_NAME` which
take a string-valued name argument between the *Parent class* and *Name of the
function* slots, which defines the name of function in Python. This is required
when the C++ and Python versions of the
Note, however, that the above is sufficient for allowing python classes to
extend ``Animal``, but not ``Dog``: see :ref:`virtual_and_inheritance` for the
-necessary steps required to providing proper overload support for inherited
+necessary steps required to providing proper overriding support for inherited
classes.
The Python session below shows how to override ``Animal::go`` and invoke it via
will generally leave the C++ instance in an invalid state and cause undefined
behavior if the C++ instance is subsequently used.
+.. versionchanged:: 2.6
+ The default pybind11 metaclass will throw a ``TypeError`` when it detects
+ that ``__init__`` was not called by a derived class.
+
Here is an example:
.. code-block:: python
class Dachshund(Dog):
def __init__(self, name):
- Dog.__init__(self) # Without this, undefined behavior may occur if the C++ portions are referenced.
+ Dog.__init__(self) # Without this, a TypeError is raised.
self.name = name
def bark(self):
return "yap!"
- because in these cases there is no C++ variable to reference (the value
is stored in the referenced Python variable), pybind11 provides one in
- the PYBIND11_OVERLOAD macros (when needed) with static storage duration.
- Note that this means that invoking the overloaded method on *any*
+ the PYBIND11_OVERRIDE macros (when needed) with static storage duration.
+ Note that this means that invoking the overridden method on *any*
instance will change the referenced value stored in *all* instances of
that type.
- Attempts to modify a non-const reference will not have the desired
effect: it will change only the static cache variable, but this change
will not propagate to underlying Python instance, and the change will be
- replaced the next time the overload is invoked.
+ replaced the next time the override is invoked.
+
+.. warning::
+
+ The :c:macro:`PYBIND11_OVERRIDE` and accompanying macros used to be called
+ ``PYBIND11_OVERLOAD`` up until pybind11 v2.5.0, and :func:`get_override`
+ used to be called ``get_overload``. This naming was corrected and the older
+ macro and function names may soon be deprecated, in order to reduce
+ confusion with overloaded functions and methods and ``py::overload_cast``
+ (see :ref:`classes`).
.. seealso::
class PyAnimal : public Animal {
public:
using Animal::Animal; // Inherit constructors
- std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Animal, go, n_times); }
- std::string name() override { PYBIND11_OVERLOAD(std::string, Animal, name, ); }
+ std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, Animal, go, n_times); }
+ std::string name() override { PYBIND11_OVERRIDE(std::string, Animal, name, ); }
};
class PyDog : public Dog {
public:
using Dog::Dog; // Inherit constructors
- std::string go(int n_times) override { PYBIND11_OVERLOAD(std::string, Dog, go, n_times); }
- std::string name() override { PYBIND11_OVERLOAD(std::string, Dog, name, ); }
- std::string bark() override { PYBIND11_OVERLOAD(std::string, Dog, bark, ); }
+ std::string go(int n_times) override { PYBIND11_OVERRIDE(std::string, Dog, go, n_times); }
+ std::string name() override { PYBIND11_OVERRIDE(std::string, Dog, name, ); }
+ std::string bark() override { PYBIND11_OVERRIDE(std::string, Dog, bark, ); }
};
.. note::
- Note the trailing commas in the ``PYBIND11_OVERLOAD`` calls to ``name()``
+ Note the trailing commas in the ``PYBIND11_OVERIDE`` calls to ``name()``
and ``bark()``. These are needed to portably implement a trampoline for a
function that does not take any arguments. For functions that take
a nonzero number of arguments, the trailing comma must be omitted.
class PyHusky : public Husky {
public:
using Husky::Husky; // Inherit constructors
- std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, Husky, go, n_times); }
- std::string name() override { PYBIND11_OVERLOAD(std::string, Husky, name, ); }
- std::string bark() override { PYBIND11_OVERLOAD(std::string, Husky, bark, ); }
+ std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, Husky, go, n_times); }
+ std::string name() override { PYBIND11_OVERRIDE(std::string, Husky, name, ); }
+ std::string bark() override { PYBIND11_OVERRIDE(std::string, Husky, bark, ); }
};
There is, however, a technique that can be used to avoid this duplication
template <class AnimalBase = Animal> class PyAnimal : public AnimalBase {
public:
using AnimalBase::AnimalBase; // Inherit constructors
- std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, AnimalBase, go, n_times); }
- std::string name() override { PYBIND11_OVERLOAD(std::string, AnimalBase, name, ); }
+ std::string go(int n_times) override { PYBIND11_OVERRIDE_PURE(std::string, AnimalBase, go, n_times); }
+ std::string name() override { PYBIND11_OVERRIDE(std::string, AnimalBase, name, ); }
};
template <class DogBase = Dog> class PyDog : public PyAnimal<DogBase> {
public:
using PyAnimal<DogBase>::PyAnimal; // Inherit constructors
// Override PyAnimal's pure virtual go() with a non-pure one:
- std::string go(int n_times) override { PYBIND11_OVERLOAD(std::string, DogBase, go, n_times); }
- std::string bark() override { PYBIND11_OVERLOAD(std::string, DogBase, bark, ); }
+ std::string go(int n_times) override { PYBIND11_OVERRIDE(std::string, DogBase, go, n_times); }
+ std::string bark() override { PYBIND11_OVERRIDE(std::string, DogBase, bark, ); }
};
This technique has the advantage of requiring just one trampoline method to be
.. code-block:: cpp
py::class_<Animal, PyAnimal<>> animal(m, "Animal");
- py::class_<Dog, PyDog<>> dog(m, "Dog");
- py::class_<Husky, PyDog<Husky>> husky(m, "Husky");
+ py::class_<Dog, Animal, PyDog<>> dog(m, "Dog");
+ py::class_<Husky, Dog, PyDog<Husky>> husky(m, "Husky");
// ... add animal, dog, husky definitions
Note that ``Husky`` did not require a dedicated trampoline template class at
for performance reasons: when the trampoline class is not needed for anything
except virtual method dispatching, not initializing the trampoline class
improves performance by avoiding needing to do a run-time check to see if the
-inheriting python instance has an overloaded method.
+inheriting python instance has an overridden method.
Sometimes, however, it is useful to always initialize a trampoline class as an
intermediate class that does more than just handle virtual method dispatching.
this is to use the method body of the trampoline class to do conversions to the
input and return of the Python method.
-The main building block to do so is the :func:`get_overload`, this function
+The main building block to do so is the :func:`get_override`, this function
allows retrieving a method implemented in Python from within the trampoline's
methods. Consider for example a C++ method which has the signature
``bool myMethod(int32_t& value)``, where the return indicates whether
bool MyClass::myMethod(int32_t& value)
{
pybind11::gil_scoped_acquire gil; // Acquire the GIL while in this scope.
- // Try to look up the overloaded method on the Python side.
- pybind11::function overload = pybind11::get_overload(this, "myMethod");
- if (overload) { // method is found
- auto obj = overload(value); // Call the Python function.
+ // Try to look up the overridden method on the Python side.
+ pybind11::function override = pybind11::get_override(this, "myMethod");
+ if (override) { // method is found
+ auto obj = override(value); // Call the Python function.
if (py::isinstance<py::int_>(obj)) { // check if it returned a Python integer type
value = obj.cast<int32_t>(); // Cast it and assign it to the value.
return true; // Return true; value should be used.
py::class_<MyClass, std::unique_ptr<MyClass, py::nodelete>>(m, "MyClass")
.def(py::init<>())
+.. _destructors_that_call_python:
+
+Destructors that call Python
+============================
+
+If a Python function is invoked from a C++ destructor, an exception may be thrown
+of type :class:`error_already_set`. If this error is thrown out of a class destructor,
+``std::terminate()`` will be called, terminating the process. Class destructors
+must catch all exceptions of type :class:`error_already_set` to discard the Python
+exception using :func:`error_already_set::discard_as_unraisable`.
+
+Every Python function should be treated as *possibly throwing*. When a Python generator
+stops yielding items, Python will throw a ``StopIteration`` exception, which can pass
+though C++ destructors if the generator's stack frame holds the last reference to C++
+objects.
+
+For more information, see :ref:`the documentation on exceptions <unraisable_exceptions>`.
+
+.. code-block:: cpp
+
+ class MyClass {
+ public:
+ ~MyClass() {
+ try {
+ py::print("Even printing is dangerous in a destructor");
+ py::exec("raise ValueError('This is an unraisable exception')");
+ } catch (py::error_already_set &e) {
+ // error_context should be information about where/why the occurred,
+ // e.g. use __func__ to get the name of the current function
+ e.discard_as_unraisable(__func__);
+ }
+ }
+ };
+
+.. note::
+
+ pybind11 does not support C++ destructors marked ``noexcept(false)``.
+
+.. versionadded:: 2.6
+
.. _implicit_conversions:
Implicit conversions
p.setExtra(15)
data = pickle.dumps(p, 2)
-Note that only the cPickle module is supported on Python 2.7. The second
-argument to ``dumps`` is also crucial: it selects the pickle protocol version
-2, since the older version 1 is not supported. Newer versions are also fine—for
-instance, specify ``-1`` to always use the latest available version. Beware:
-failure to follow these instructions will cause important pybind11 memory
-allocation routines to be skipped during unpickling, which will likely lead to
-memory corruption and/or segmentation faults.
+
+.. note::
+ Note that only the cPickle module is supported on Python 2.7.
+
+ The second argument to ``dumps`` is also crucial: it selects the pickle
+ protocol version 2, since the older version 1 is not supported. Newer
+ versions are also fine—for instance, specify ``-1`` to always use the
+ latest available version. Beware: failure to follow these instructions
+ will cause important pybind11 memory allocation routines to be skipped
+ during unpickling, which will likely lead to memory corruption and/or
+ segmentation faults.
.. seealso::
.. [#f3] http://docs.python.org/3/library/pickle.html#pickling-class-instances
+Deepcopy support
+================
+
+Python normally uses references in assignments. Sometimes a real copy is needed
+to prevent changing all copies. The ``copy`` module [#f5]_ provides these
+capabilities.
+
+On Python 3, a class with pickle support is automatically also (deep)copy
+compatible. However, performance can be improved by adding custom
+``__copy__`` and ``__deepcopy__`` methods. With Python 2.7, these custom methods
+are mandatory for (deep)copy compatibility, because pybind11 only supports
+cPickle.
+
+For simple classes (deep)copy can be enabled by using the copy constructor,
+which should look as follows:
+
+.. code-block:: cpp
+
+ py::class_<Copyable>(m, "Copyable")
+ .def("__copy__", [](const Copyable &self) {
+ return Copyable(self);
+ })
+ .def("__deepcopy__", [](const Copyable &self, py::dict) {
+ return Copyable(self);
+ }, "memo"_a);
+
+.. note::
+
+ Dynamic attributes will not be copied in this example.
+
+.. [#f5] https://docs.python.org/3/library/copy.html
+
Multiple Inheritance
====================
class Trampoline : public A {
public:
- int foo() const override { PYBIND11_OVERLOAD(int, A, foo, ); }
+ int foo() const override { PYBIND11_OVERRIDE(int, A, foo, ); }
};
class Publicist : public A {
``.def("foo", static_cast<int (A::*)() const>(&Publicist::foo));``
where ``int (A::*)() const`` is the type of ``A::foo``.
+Binding final classes
+=====================
+
+Some classes may not be appropriate to inherit from. In C++11, classes can
+use the ``final`` specifier to ensure that a class cannot be inherited from.
+The ``py::is_final`` attribute can be used to ensure that Python classes
+cannot inherit from a specified type. The underlying C++ type does not need
+to be declared final.
+
+.. code-block:: cpp
+
+ class IsFinal final {};
+
+ py::class_<IsFinal>(m, "IsFinal", py::is_final());
+
+When you try to inherit from such a class in Python, you will now get this
+error:
+
+.. code-block:: pycon
+
+ >>> class PyFinalChild(IsFinal):
+ ... pass
+ TypeError: type 'IsFinal' is not an acceptable base type
+
+.. note:: This attribute is currently ignored on PyPy
+
+.. versionadded:: 2.6
+
Custom automatic downcasters
============================
more complete example, including a demonstration of how to provide
automatic downcasting for an entire class hierarchy without
writing one get() function for each class.
+
+Accessing the type object
+=========================
+
+You can get the type object from a C++ class that has already been registered using:
+
+.. code-block:: python
+
+ py::type T_py = py::type::of<T>();
+
+You can directly use ``py::type::of(ob)`` to get the type object from any python
+object, just like ``type(ob)`` in Python.
+
+.. note::
+
+ Other types, like ``py::type::of<int>()``, do not work, see :ref:`type-conversions`.
+
+.. versionadded:: 2.6
.. code-block:: cmake
- cmake_minimum_required(VERSION 3.0)
+ cmake_minimum_required(VERSION 3.4)
project(example)
find_package(pybind11 REQUIRED) # or `add_subdirectory(pybind11)`
Importing modules
=================
-Python modules can be imported using `module::import()`:
+Python modules can be imported using `module_::import()`:
.. code-block:: cpp
- py::module sys = py::module::import("sys");
+ py::module_ sys = py::module_::import("sys");
py::print(sys.attr("path"));
For convenience, the current working directory is included in ``sys.path`` when
.. code-block:: cpp
- py::module calc = py::module::import("calc");
+ py::module_ calc = py::module_::import("calc");
py::object result = calc.attr("add")(1, 2);
int n = result.cast<int>();
assert(n == 3);
-Modules can be reloaded using `module::reload()` if the source is modified e.g.
+Modules can be reloaded using `module_::reload()` if the source is modified e.g.
by an external process. This can be useful in scenarios where the application
imports a user defined data processing script which needs to be updated after
changes by the user. Note that this function does not reload modules recursively.
namespace py = pybind11;
PYBIND11_EMBEDDED_MODULE(fast_calc, m) {
- // `m` is a `py::module` which is used to bind functions and classes
+ // `m` is a `py::module_` which is used to bind functions and classes
m.def("add", [](int i, int j) {
return i + j;
});
int main() {
py::scoped_interpreter guard{};
- auto fast_calc = py::module::import("fast_calc");
+ auto fast_calc = py::module_::import("fast_calc");
auto result = fast_calc.attr("add")(1, 2).cast<int>();
assert(result == 3);
}
int main() {
py::scoped_interpreter guard{};
- auto py_module = py::module::import("py_module");
+ auto py_module = py::module_::import("py_module");
auto locals = py::dict("fmt"_a="{} + {} = {}", **py_module.attr("__dict__"));
assert(locals["a"].cast<int>() == 1);
Exceptions
##########
-Built-in exception translation
-==============================
+Built-in C++ to Python exception translation
+============================================
+
+When Python calls C++ code through pybind11, pybind11 provides a C++ exception handler
+that will trap C++ exceptions, translate them to the corresponding Python exception,
+and raise them so that Python code can handle them.
-When C++ code invoked from Python throws an ``std::exception``, it is
-automatically converted into a Python ``Exception``. pybind11 defines multiple
-special exception classes that will map to different types of Python
-exceptions:
+pybind11 defines translations for ``std::exception`` and its standard
+subclasses, and several special exception classes that translate to specific
+Python exceptions. Note that these are not actually Python exceptions, so they
+cannot be examined using the Python C API. Instead, they are pure C++ objects
+that pybind11 will translate the corresponding Python exception when they arrive
+at its exception handler.
.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}|
+--------------------------------------+--------------------------------------+
-| C++ exception type | Python exception type |
+| Exception thrown by C++ | Translated to Python exception type |
+======================================+======================================+
| :class:`std::exception` | ``RuntimeError`` |
+--------------------------------------+--------------------------------------+
+--------------------------------------+--------------------------------------+
| :class:`std::range_error` | ``ValueError`` |
+--------------------------------------+--------------------------------------+
+| :class:`std::overflow_error` | ``OverflowError`` |
++--------------------------------------+--------------------------------------+
| :class:`pybind11::stop_iteration` | ``StopIteration`` (used to implement |
| | custom iterators) |
+--------------------------------------+--------------------------------------+
| | of bounds access in ``__getitem__``, |
| | ``__setitem__``, etc.) |
+--------------------------------------+--------------------------------------+
-| :class:`pybind11::value_error` | ``ValueError`` (used to indicate |
-| | wrong value passed in |
-| | ``container.remove(...)``) |
-+--------------------------------------+--------------------------------------+
| :class:`pybind11::key_error` | ``KeyError`` (used to indicate out |
| | of bounds access in ``__getitem__``, |
| | ``__setitem__`` in dict-like |
| | objects, etc.) |
+--------------------------------------+--------------------------------------+
-| :class:`pybind11::error_already_set` | Indicates that the Python exception |
-| | flag has already been set via Python |
-| | API calls from C++ code; this C++ |
-| | exception is used to propagate such |
-| | a Python exception back to Python. |
+| :class:`pybind11::value_error` | ``ValueError`` (used to indicate |
+| | wrong value passed in |
+| | ``container.remove(...)``) |
++--------------------------------------+--------------------------------------+
+| :class:`pybind11::type_error` | ``TypeError`` |
++--------------------------------------+--------------------------------------+
+| :class:`pybind11::buffer_error` | ``BufferError`` |
++--------------------------------------+--------------------------------------+
+| :class:`pybind11::import_error` | ``import_error`` |
++--------------------------------------+--------------------------------------+
+| Any other exception | ``RuntimeError`` |
+--------------------------------------+--------------------------------------+
-When a Python function invoked from C++ throws an exception, it is converted
-into a C++ exception of type :class:`error_already_set` whose string payload
-contains a textual summary.
+Exception translation is not bidirectional. That is, *catching* the C++
+exceptions defined above above will not trap exceptions that originate from
+Python. For that, catch :class:`pybind11::error_already_set`. See :ref:`below
+<handling_python_exceptions_cpp>` for further details.
There is also a special exception :class:`cast_error` that is thrown by
:func:`handle::call` when the input arguments cannot be converted to Python
module and automatically converts any encountered exceptions of type ``CppExp``
into Python exceptions of type ``PyExp``.
+It is possible to specify base class for the exception using the third
+parameter, a `handle`:
+
+.. code-block:: cpp
+
+ py::register_exception<CppExp>(module, "PyExp", PyExc_RuntimeError);
+
+Then `PyExp` can be caught both as `PyExp` and `RuntimeError`.
+
+The class objects of the built-in Python exceptions are listed in the Python
+documentation on `Standard Exceptions <https://docs.python.org/3/c-api/exceptions.html#standard-exceptions>`_.
+The default base class is `PyExc_Exception`.
+
When more advanced exception translation is needed, the function
``py::register_exception_translator(translator)`` can be used to register
functions that can translate arbitrary exception types (and which may include
to make this a static declaration when using it inside a lambda expression
without requiring capturing).
-
The following example demonstrates this for a hypothetical exception classes
``MyCustomException`` and ``OtherException``: the first is translated to a
custom python exception ``MyCustomError``, while the second is translated to a
.. note::
- You must call either ``PyErr_SetString`` or a custom exception's call
+ Call either ``PyErr_SetString`` or a custom exception's call
operator (``exc(string)``) for every exception caught in a custom exception
translator. Failure to do so will cause Python to crash with ``SystemError:
error return without exception set``.
Exceptions that you do not plan to handle should simply not be caught, or
may be explicitly (re-)thrown to delegate it to the other,
previously-declared existing exception translators.
+
+.. _handling_python_exceptions_cpp:
+
+Handling exceptions from Python in C++
+======================================
+
+When C++ calls Python functions, such as in a callback function or when
+manipulating Python objects, and Python raises an ``Exception``, pybind11
+converts the Python exception into a C++ exception of type
+:class:`pybind11::error_already_set` whose payload contains a C++ string textual
+summary and the actual Python exception. ``error_already_set`` is used to
+propagate Python exception back to Python (or possibly, handle them in C++).
+
+.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}|
+
++--------------------------------------+--------------------------------------+
+| Exception raised in Python | Thrown as C++ exception type |
++======================================+======================================+
+| Any Python ``Exception`` | :class:`pybind11::error_already_set` |
++--------------------------------------+--------------------------------------+
+
+For example:
+
+.. code-block:: cpp
+
+ try {
+ // open("missing.txt", "r")
+ auto file = py::module_::import("io").attr("open")("missing.txt", "r");
+ auto text = file.attr("read")();
+ file.attr("close")();
+ } catch (py::error_already_set &e) {
+ if (e.matches(PyExc_FileNotFoundError)) {
+ py::print("missing.txt not found");
+ } else if (e.matches(PyExc_PermissionError)) {
+ py::print("missing.txt found but not accessible");
+ } else {
+ throw;
+ }
+ }
+
+Note that C++ to Python exception translation does not apply here, since that is
+a method for translating C++ exceptions to Python, not vice versa. The error raised
+from Python is always ``error_already_set``.
+
+This example illustrates this behavior:
+
+.. code-block:: cpp
+
+ try {
+ py::eval("raise ValueError('The Ring')");
+ } catch (py::value_error &boromir) {
+ // Boromir never gets the ring
+ assert(false);
+ } catch (py::error_already_set &frodo) {
+ // Frodo gets the ring
+ py::print("I will take the ring");
+ }
+
+ try {
+ // py::value_error is a request for pybind11 to raise a Python exception
+ throw py::value_error("The ball");
+ } catch (py::error_already_set &cat) {
+ // cat won't catch the ball since
+ // py::value_error is not a Python exception
+ assert(false);
+ } catch (py::value_error &dog) {
+ // dog will catch the ball
+ py::print("Run Spot run");
+ throw; // Throw it again (pybind11 will raise ValueError)
+ }
+
+Handling errors from the Python C API
+=====================================
+
+Where possible, use :ref:`pybind11 wrappers <wrappers>` instead of calling
+the Python C API directly. When calling the Python C API directly, in
+addition to manually managing reference counts, one must follow the pybind11
+error protocol, which is outlined here.
+
+After calling the Python C API, if Python returns an error,
+``throw py::error_already_set();``, which allows pybind11 to deal with the
+exception and pass it back to the Python interpreter. This includes calls to
+the error setting functions such as ``PyErr_SetString``.
+
+.. code-block:: cpp
+
+ PyErr_SetString(PyExc_TypeError, "C API type error demo");
+ throw py::error_already_set();
+
+ // But it would be easier to simply...
+ throw py::type_error("pybind11 wrapper type error");
+
+Alternately, to ignore the error, call `PyErr_Clear
+<https://docs.python.org/3/c-api/exceptions.html#c.PyErr_Clear>`_.
+
+Any Python error must be thrown or cleared, or Python/pybind11 will be left in
+an invalid state.
+
+.. _unraisable_exceptions:
+
+Handling unraisable exceptions
+==============================
+
+If a Python function invoked from a C++ destructor or any function marked
+``noexcept(true)`` (collectively, "noexcept functions") throws an exception, there
+is no way to propagate the exception, as such functions may not throw.
+Should they throw or fail to catch any exceptions in their call graph,
+the C++ runtime calls ``std::terminate()`` to abort immediately.
+
+Similarly, Python exceptions raised in a class's ``__del__`` method do not
+propagate, but are logged by Python as an unraisable error. In Python 3.8+, a
+`system hook is triggered
+<https://docs.python.org/3/library/sys.html#sys.unraisablehook>`_
+and an auditing event is logged.
+
+Any noexcept function should have a try-catch block that traps
+class:`error_already_set` (or any other exception that can occur). Note that
+pybind11 wrappers around Python exceptions such as
+:class:`pybind11::value_error` are *not* Python exceptions; they are C++
+exceptions that pybind11 catches and converts to Python exceptions. Noexcept
+functions cannot propagate these exceptions either. A useful approach is to
+convert them to Python exceptions and then ``discard_as_unraisable`` as shown
+below.
+
+.. code-block:: cpp
+
+ void nonthrowing_func() noexcept(true) {
+ try {
+ // ...
+ } catch (py::error_already_set &eas) {
+ // Discard the Python error using Python APIs, using the C++ magic
+ // variable __func__. Python already knows the type and value and of the
+ // exception object.
+ eas.discard_as_unraisable(__func__);
+ } catch (const std::exception &e) {
+ // Log and discard C++ exceptions.
+ third_party::log(e);
+ }
+ }
+
+.. versionadded:: 2.6
type information, it is not clear whether Python should take charge of the
returned value and eventually free its resources, or if this is handled on the
C++ side. For this reason, pybind11 provides a several *return value policy*
-annotations that can be passed to the :func:`module::def` and
+annotations that can be passed to the :func:`module_::def` and
:func:`class_::def` functions. The default policy is
:enum:`return_value_policy::automatic`.
.. code-block:: cpp
py::class_<MyClass>("MyClass")
- .def("myFunction", py::arg("arg") = (SomeType *) nullptr);
+ .def("myFunction", py::arg("arg") = static_cast<SomeType *>(nullptr));
+
+Keyword-only arguments
+======================
+
+Python 3 introduced keyword-only arguments by specifying an unnamed ``*``
+argument in a function definition:
+
+.. code-block:: python
+
+ def f(a, *, b): # a can be positional or via keyword; b must be via keyword
+ pass
+
+ f(a=1, b=2) # good
+ f(b=2, a=1) # good
+ f(1, b=2) # good
+ f(1, 2) # TypeError: f() takes 1 positional argument but 2 were given
+
+Pybind11 provides a ``py::kw_only`` object that allows you to implement
+the same behaviour by specifying the object between positional and keyword-only
+argument annotations when registering the function:
+
+.. code-block:: cpp
+
+ m.def("f", [](int a, int b) { /* ... */ },
+ py::arg("a"), py::kw_only(), py::arg("b"));
+
+Note that you currently cannot combine this with a ``py::args`` argument. This
+feature does *not* require Python 3 to work.
+
+.. versionadded:: 2.6
+
+Positional-only arguments
+=========================
+
+Python 3.8 introduced a new positional-only argument syntax, using ``/`` in the
+function definition (note that this has been a convention for CPython
+positional arguments, such as in ``pow()``, since Python 2). You can
+do the same thing in any version of Python using ``py::pos_only()``:
+
+.. code-block:: cpp
+
+ m.def("f", [](int a, int b) { /* ... */ },
+ py::arg("a"), py::pos_only(), py::arg("b"));
+
+You now cannot give argument ``a`` by keyword. This can be combined with
+keyword-only arguments, as well.
+
+.. versionadded:: 2.6
.. _nonconverting_arguments:
not allow ``None`` as argument. To pass optional argument of these copied types consider
using ``std::optional<T>``
+.. _overload_resolution:
+
Overload resolution order
=========================
If the second pass also fails a ``TypeError`` is raised.
Within each pass, overloads are tried in the order they were registered with
-pybind11.
+pybind11. If the ``py::prepend()`` tag is added to the definition, a function
+can be placed at the beginning of the overload sequence instead, allowing user
+overloads to proceed built in functions.
What this means in practice is that pybind11 will prefer any overload that does
-not require conversion of arguments to an overload that does, but otherwise prefers
-earlier-defined overloads to later-defined ones.
+not require conversion of arguments to an overload that does, but otherwise
+prefers earlier-defined overloads to later-defined ones.
.. note::
requiring one conversion over one requiring three, but only prioritizes
overloads requiring no conversion at all to overloads that require
conversion of at least one argument.
+
+.. versionadded:: 2.6
+
+ The ``py::prepend()`` tag.
==========================================
pybind11 provides a few convenience macros such as
-:func:`PYBIND11_DECLARE_HOLDER_TYPE` and ``PYBIND11_OVERLOAD_*``. Since these
+:func:`PYBIND11_DECLARE_HOLDER_TYPE` and ``PYBIND11_OVERRIDE_*``. Since these
are "just" macros that are evaluated in the preprocessor (which has no concept
of types), they *will* get confused by commas in a template argument; for
example, consider:
.. code-block:: cpp
- PYBIND11_OVERLOAD(MyReturnType<T1, T2>, Class<T3, T4>, func)
+ PYBIND11_OVERRIDE(MyReturnType<T1, T2>, Class<T3, T4>, func)
The limitation of the C preprocessor interprets this as five arguments (with new
arguments beginning after each comma) rather than three. To get around this,
// Version 1: using a type alias
using ReturnType = MyReturnType<T1, T2>;
using ClassType = Class<T3, T4>;
- PYBIND11_OVERLOAD(ReturnType, ClassType, func);
+ PYBIND11_OVERRIDE(ReturnType, ClassType, func);
// Version 2: using the PYBIND11_TYPE macro:
- PYBIND11_OVERLOAD(PYBIND11_TYPE(MyReturnType<T1, T2>),
+ PYBIND11_OVERRIDE(PYBIND11_TYPE(MyReturnType<T1, T2>),
PYBIND11_TYPE(Class<T3, T4>), func)
The ``PYBIND11_MAKE_OPAQUE`` macro does *not* require the above workarounds.
/* Acquire GIL before calling Python code */
py::gil_scoped_acquire acquire;
- PYBIND11_OVERLOAD_PURE(
+ PYBIND11_OVERRIDE_PURE(
std::string, /* Return type */
Animal, /* Parent class */
go, /* Name of function */
.. code-block:: cpp
- py::object pet = (py::object) py::module::import("basic").attr("Pet");
+ py::object pet = (py::object) py::module_::import("basic").attr("Pet");
py::class_<Dog>(m, "Dog", pet)
.def(py::init<const std::string &>())
.. code-block:: cpp
- py::module::import("basic");
+ py::module_::import("basic");
py::class_<Dog, Pet>(m, "Dog")
.def(py::init<const std::string &>())
.. code-block:: cpp
- auto data = (MyData *) py::get_shared_data("mydata");
+ auto data = reinterpret_cast<MyData *>(py::get_shared_data("mydata"));
if (!data)
- data = (MyData *) py::set_shared_data("mydata", new MyData(42));
+ data = static_cast<MyData *>(py::set_shared_data("mydata", new MyData(42)));
If the above snippet was used in several separately compiled extension modules,
the first one to be imported would create a ``MyData`` instance and associate
Both approaches also expose a potentially dangerous ``_cleanup`` attribute in
Python, which may be undesirable from an API standpoint (a premature explicit
-call from Python might lead to undefined behavior). Yet another approach that
+call from Python might lead to undefined behavior). Yet another approach that
avoids this issue involves weak reference with a cleanup callback:
.. code-block:: cpp
- // Register a callback function that is invoked when the BaseClass object is colelcted
+ // Register a callback function that is invoked when the BaseClass object is collected
py::cpp_function cleanup_callback(
[](py::handle weakref) {
// perform cleanup here -- this function is called with the GIL held
.. note::
- PyPy (at least version 5.9) does not garbage collect objects when the
- interpreter exits. An alternative approach (which also works on CPython) is to use
- the :py:mod:`atexit` module [#f7]_, for example:
+ PyPy does not garbage collect objects when the interpreter exits. An alternative
+ approach (which also works on CPython) is to use the :py:mod:`atexit` module [#f7]_,
+ for example:
.. code-block:: cpp
- auto atexit = py::module::import("atexit");
+ auto atexit = py::module_::import("atexit");
atexit.attr("register")(py::cpp_function([]() {
// perform cleanup here -- this function is called with the GIL held
}));
----------
)mydelimiter");
-By default, pybind11 automatically generates and prepends a signature to the docstring of a function
-registered with ``module::def()`` and ``class_::def()``. Sometimes this
-behavior is not desirable, because you want to provide your own signature or remove
+By default, pybind11 automatically generates and prepends a signature to the docstring of a function
+registered with ``module_::def()`` and ``class_::def()``. Sometimes this
+behavior is not desirable, because you want to provide your own signature or remove
the docstring completely to exclude the function from the Sphinx documentation.
The class ``options`` allows you to selectively suppress auto-generated signatures:
m.def("add", [](int a, int b) { return a + b; }, "A function which adds two numbers");
}
-Note that changes to the settings affect only function bindings created during the
-lifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function,
+Note that changes to the settings affect only function bindings created during the
+lifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function,
the default settings are restored to prevent unwanted side effects.
.. [#f4] http://www.sphinx-doc.org
.. [#f5] http://github.com/pybind/python_example
+
+.. _avoiding-cpp-types-in-docstrings:
+
+Avoiding C++ types in docstrings
+================================
+
+Docstrings are generated at the time of the declaration, e.g. when ``.def(...)`` is called.
+At this point parameter and return types should be known to pybind11.
+If a custom type is not exposed yet through a ``py::class_`` constructor or a custom type caster,
+its C++ type name will be used instead to generate the signature in the docstring:
+
+.. code-block:: text
+
+ | __init__(...)
+ | __init__(self: example.Foo, arg0: ns::Bar) -> None
+ ^^^^^^^
+
+
+This limitation can be circumvented by ensuring that C++ classes are registered with pybind11
+before they are used as a parameter or return type of a function:
+
+.. code-block:: cpp
+
+ PYBIND11_MODULE(example, m) {
+
+ auto pyFoo = py::class_<ns::Foo>(m, "Foo");
+ auto pyBar = py::class_<ns::Bar>(m, "Bar");
+
+ pyFoo.def(py::init<const ns::Bar&>());
+ pyBar.def(py::init<const ns::Foo&>());
+ }
struct buffer_info {
void *ptr;
- ssize_t itemsize;
+ py::ssize_t itemsize;
std::string format;
- ssize_t ndim;
- std::vector<ssize_t> shape;
- std::vector<ssize_t> strides;
+ py::ssize_t ndim;
+ std::vector<py::ssize_t> shape;
+ std::vector<py::ssize_t> strides;
};
To create a C++ function that can take a Python buffer object as an argument,
simply use the type ``py::buffer`` as one of its arguments. Buffers can exist
in a great variety of configurations, hence some safety checks are usually
-necessary in the function body. Below, you can see an basic example on how to
+necessary in the function body. Below, you can see a basic example on how to
define a custom constructor for the Eigen double precision matrix
(``Eigen::MatrixXd``) type, which supports initialization from compatible
buffer objects (e.g. a NumPy matrix).
constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit;
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
- .def("__init__", [](Matrix &m, py::buffer b) {
+ .def(py::init([](py::buffer b) {
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides;
/* Request a buffer descriptor from Python */
auto map = Eigen::Map<Matrix, 0, Strides>(
static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides);
- new (&m) Matrix(map);
- });
+ return Matrix(map);
+ }));
For reference, the ``def_buffer()`` call for this Eigen data type should look
as follows:
When it is invoked with a different type (e.g. an integer or a list of
integers), the binding code will attempt to cast the input into a NumPy array
-of the requested type. Note that this feature requires the
-:file:`pybind11/numpy.h` header to be included.
+of the requested type. This feature requires the :file:`pybind11/numpy.h`
+header to be included. Note that :file:`pybind11/numpy.h` does not depend on
+the NumPy headers, and thus can be used without declaring a build-time
+dependency on NumPy; NumPy>=1.7.0 is a runtime dependency.
Data in NumPy arrays is not guaranteed to packed in a dense manner;
furthermore, entries can be separated by arbitrary column and row strides.
py::buffer_info buf3 = result.request();
- double *ptr1 = (double *) buf1.ptr,
- *ptr2 = (double *) buf2.ptr,
- *ptr3 = (double *) buf3.ptr;
+ double *ptr1 = static_cast<double *>(buf1.ptr);
+ double *ptr2 = static_cast<double *>(buf2.ptr);
+ double *ptr3 = static_cast<double *>(buf3.ptr);
for (size_t idx = 0; idx < buf1.shape[0]; idx++)
ptr3[idx] = ptr1[idx] + ptr2[idx];
m.def("sum_3d", [](py::array_t<double> x) {
auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable
double sum = 0;
- for (ssize_t i = 0; i < r.shape(0); i++)
- for (ssize_t j = 0; j < r.shape(1); j++)
- for (ssize_t k = 0; k < r.shape(2); k++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t k = 0; k < r.shape(2); k++)
sum += r(i, j, k);
return sum;
});
m.def("increment_3d", [](py::array_t<double> x) {
auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false
- for (ssize_t i = 0; i < r.shape(0); i++)
- for (ssize_t j = 0; j < r.shape(1); j++)
- for (ssize_t k = 0; k < r.shape(2); k++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) += 1.0;
}, py::arg().noconvert());
Python 3 provides a convenient ``...`` ellipsis notation that is often used to
slice multidimensional arrays. For instance, the following snippet extracts the
middle dimensions of a tensor with the first and last index set to zero.
+In Python 2, the syntactic sugar ``...`` is not available, but the singleton
+``Ellipsis`` (of type ``ellipsis``) can still be used directly.
.. code-block:: python
py::array a = /* A NumPy array */;
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
+
+.. versionchanged:: 2.6
+ ``py::ellipsis()`` is now also avaliable in Python 2.
+
+Memory view
+===========
+
+For a case when we simply want to provide a direct accessor to C/C++ buffer
+without a concrete class object, we can return a ``memoryview`` object. Suppose
+we wish to expose a ``memoryview`` for 2x4 uint8_t array, we can do the
+following:
+
+.. code-block:: cpp
+
+ const uint8_t buffer[] = {
+ 0, 1, 2, 3,
+ 4, 5, 6, 7
+ };
+ m.def("get_memoryview2d", []() {
+ return py::memoryview::from_buffer(
+ buffer, // buffer pointer
+ { 2, 4 }, // shape (rows, cols)
+ { sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes
+ );
+ })
+
+This approach is meant for providing a ``memoryview`` for a C/C++ buffer not
+managed by Python. The user is responsible for managing the lifetime of the
+buffer. Using a ``memoryview`` created in this way after deleting the buffer in
+C++ side results in undefined behavior.
+
+We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer:
+
+.. code-block:: cpp
+
+ m.def("get_memoryview1d", []() {
+ return py::memoryview::from_memory(
+ buffer, // buffer pointer
+ sizeof(uint8_t) * 8 // buffer size
+ );
+ })
+
+.. note::
+
+ ``memoryview::from_memory`` is not available in Python 2.
+
+.. versionchanged:: 2.6
+ ``memoryview::from_memory`` added.
Python types
############
+.. _wrappers:
+
Available wrappers
==================
:class:`iterable`, :class:`iterator`, :class:`function`, :class:`buffer`,
:class:`array`, and :class:`array_t`.
+.. warning::
+
+ Be sure to review the :ref:`pytypes_gotchas` before using this heavily in
+ your C++ API.
+
+.. _casting_back_and_forth:
+
Casting back and forth
======================
.. code-block:: cpp
// Equivalent to "from decimal import Decimal"
- py::object Decimal = py::module::import("decimal").attr("Decimal");
+ py::object Decimal = py::module_::import("decimal").attr("Decimal");
.. code-block:: cpp
// Try to import scipy
- py::object scipy = py::module::import("scipy");
+ py::object scipy = py::module_::import("scipy");
return scipy.attr("__version__");
+
.. _calling_python_functions:
Calling Python functions
========================
-It is also possible to call Python classes, functions and methods
+It is also possible to call Python classes, functions and methods
via ``operator()``.
.. code-block:: cpp
.. code-block:: cpp
// Use Python to make our directories
- py::object os = py::module::import("os");
+ py::object os = py::module_::import("os");
py::object makedirs = os.attr("makedirs");
makedirs("/tmp/path/to/somewhere");
-One can convert the result obtained from Python to a pure C++ version
+One can convert the result obtained from Python to a pure C++ version
if a ``py::class_`` or type conversion is defined.
.. code-block:: cpp
py::print(py::str(exp_pi));
In the example above ``pi.attr("exp")`` is a *bound method*: it will always call
-the method for that same instance of the class. Alternately one can create an
-*unbound method* via the Python class (instead of instance) and pass the ``self``
+the method for that same instance of the class. Alternately one can create an
+*unbound method* via the Python class (instead of instance) and pass the ``self``
object explicitly, followed by other arguments.
.. code-block:: cpp
Python functions from C++, including keywords arguments and unpacking.
.. _PEP448: https://www.python.org/dev/peps/pep-0448/
+
+.. _implicit_casting:
+
+Implicit casting
+================
+
+When using the C++ interface for Python types, or calling Python functions,
+objects of type :class:`object` are returned. It is possible to invoke implicit
+conversions to subclasses like :class:`dict`. The same holds for the proxy objects
+returned by ``operator[]`` or ``obj.attr()``.
+Casting to subtypes improves code readability and allows values to be passed to
+C++ functions that require a specific subtype rather than a generic :class:`object`.
+
+.. code-block:: cpp
+
+ #include <pybind11/numpy.h>
+ using namespace pybind11::literals;
+
+ py::module_ os = py::module_::import("os");
+ py::module_ path = py::module_::import("os.path"); // like 'import os.path as path'
+ py::module_ np = py::module_::import("numpy"); // like 'import numpy as np'
+
+ py::str curdir_abs = path.attr("abspath")(path.attr("curdir"));
+ py::print(py::str("Current directory: ") + curdir_abs);
+ py::dict environ = os.attr("environ");
+ py::print(environ["HOME"]);
+ py::array_t<float> arr = np.attr("ones")(3, "dtype"_a="float32");
+ py::print(py::repr(arr + py::int_(1)));
+
+These implicit conversions are available for subclasses of :class:`object`; there
+is no need to call ``obj.cast()`` explicitly as for custom classes, see
+:ref:`casting_back_and_forth`.
+
+.. note::
+ If a trivial conversion via move constructor is not possible, both implicit and
+ explicit casting (calling ``obj.cast()``) will attempt a "rich" conversion.
+ For instance, ``py::list env = os.attr("environ");`` will succeed and is
+ equivalent to the Python code ``env = list(os.environ)`` that produces a
+ list of the dict keys.
+
+.. TODO: Adapt text once PR #2349 has landed
+
+Handling exceptions
+===================
+
+Python exceptions from wrapper classes will be thrown as a ``py::error_already_set``.
+See :ref:`Handling exceptions from Python in C++
+<handling_python_exceptions_cpp>` for more information on handling exceptions
+raised when calling C++ wrapper classes.
+
+.. _pytypes_gotchas:
+
+Gotchas
+=======
+
+Default-Constructed Wrappers
+----------------------------
+
+When a wrapper type is default-constructed, it is **not** a valid Python object (i.e. it is not ``py::none()``). It is simply the same as
+``PyObject*`` null pointer. To check for this, use
+``static_cast<bool>(my_wrapper)``.
+
+Assigning py::none() to wrappers
+--------------------------------
+
+You may be tempted to use types like ``py::str`` and ``py::dict`` in C++
+signatures (either pure C++, or in bound signatures), and assign them default
+values of ``py::none()``. However, in a best case scenario, it will fail fast
+because ``None`` is not convertible to that type (e.g. ``py::dict``), or in a
+worse case scenario, it will silently work but corrupt the types you want to
+work with (e.g. ``py::str(py::none())`` will yield ``"None"`` in Python).
m.def("noisy_func", []() {
py::scoped_ostream_redirect stream(
std::cout, // std::ostream&
- py::module::import("sys").attr("stdout") // Python output
+ py::module_::import("sys").attr("stdout") // Python output
);
call_noisy_func();
});
...
// Evaluate in scope of main module
- py::object scope = py::module::import("__main__").attr("__dict__");
+ py::object scope = py::module_::import("__main__").attr("__dict__");
// Evaluate an isolated expression
int result = py::eval("my_variable + 10", scope).cast<int>();
Compiling the test cases
========================
-Linux/MacOS
+Linux/macOS
-----------
On Linux you'll need to install the **python-dev** or **python3-dev** packages as
-well as **cmake**. On Mac OS, the included python version works out of the box,
+well as **cmake**. On macOS, the included python version works out of the box,
but **cmake** must still be installed.
After installing the prerequisites, run
On Windows, only **Visual Studio 2015** and newer are supported since pybind11 relies
on various C++11 language features that break older versions of Visual Studio.
+.. Note::
+
+ To use the C++17 in Visual Studio 2017 (MSVC 14.1), pybind11 requires the flag
+ ``/permissive-`` to be passed to the compiler `to enforce standard conformance`_. When
+ building with Visual Studio 2019, this is not strictly necessary, but still advised.
+
+.. _`to enforce standard conformance`: https://docs.microsoft.com/en-us/cpp/build/reference/permissive-standards-conformance?view=vs-2017
+
To compile and run the tests:
.. code-block:: batch
The :func:`PYBIND11_MODULE` macro creates a function that will be called when an
``import`` statement is issued from within Python. The module name (``example``)
is given as the first macro argument (it should not be in quotes). The second
-argument (``m``) defines a variable of type :class:`py::module <module>` which
-is the main interface for creating bindings. The method :func:`module::def`
+argument (``m``) defines a variable of type :class:`py::module_ <module>` which
+is the main interface for creating bindings. The method :func:`module_::def`
generates binding code that exposes the ``add()`` function to Python.
.. note::
.. code-block:: bash
- $ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
+ $ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
+
+.. note::
+
+ If you used :ref:`include_as_a_submodule` to get the pybind11 source, then
+ use ``$(python3-config --includes) -Iextern/pybind11/include`` instead of
+ ``$(python3 -m pybind11 --includes)`` in the above compilation, as
+ explained in :ref:`building_manually`.
-For more details on the required compiler flags on Linux and MacOS, see
+For more details on the required compiler flags on Linux and macOS, see
:ref:`building_manually`. For complete cross-platform compilation instructions,
refer to the :ref:`compiling` page.
Keyword arguments
=================
-With a simple modification code, it is possible to inform Python about the
+With a simple code modification, it is possible to inform Python about the
names of the arguments ("i" and "j" in this case).
.. code-block:: cpp
py::arg("i"), py::arg("j"));
:class:`arg` is one of several special tag classes which can be used to pass
-metadata into :func:`module::def`. With this modified binding code, we can now
+metadata into :func:`module_::def`. With this modified binding code, we can now
call the function using keyword arguments, which is a more readable alternative
particularly for functions taking many parameters:
+# -*- coding: utf-8 -*-
import random
import os
import time
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
- decl += '\n'
+ decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
- params = [random.randint(0, nclasses - 1) for i in range(nargs)]
+ params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
- bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % \
- (fn, cl, fn)
+ bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % (fn, cl, fn)
decl += "};\n\n"
- bindings += ' ;\n'
+ bindings += " ;\n"
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
- result += decl + '\n'
+ result += decl + "\n"
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
- decl += '\n'
+ decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
- params = [random.randint(0, nclasses - 1) for i in range(nargs)]
+ params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
- bindings += ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % \
- (fn, cl, fn)
+ bindings += (
+ ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n'
+ % (fn, cl, fn)
+ )
decl += "};\n\n"
- bindings += ' ;\n'
+ bindings += " ;\n"
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
- result += decl + '\n'
+ result += decl + "\n"
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
- print ("{")
+ print("{")
for i in range(0, 10):
nclasses = 2 ** i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
- os.system("g++ -Os -shared -rdynamic -undefined dynamic_lookup "
+ os.system(
+ "g++ -Os -shared -rdynamic -undefined dynamic_lookup "
"-fvisibility=hidden -std=c++14 test.cpp -I include "
- "-I /System/Library/Frameworks/Python.framework/Headers -o test.so")
+ "-I /System/Library/Frameworks/Python.framework/Headers -o test.so"
+ )
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
- size = os.stat('test.so').st_size
+ size = os.stat("test.so").st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
- print ("}")
+ print("}")
.. only:: latex
.. image:: pybind11_vs_boost_python2.png
-
-
Starting with version 1.8.0, pybind11 releases use a `semantic versioning
<http://semver.org>`_ policy.
+
+v2.6.2 (Jan 26, 2021)
+---------------------
+
+Minor missing functionality added:
+
+* enum: add missing Enum.value property.
+ `#2739 <https://github.com/pybind/pybind11/pull/2739>`_
+
+* Allow thread termination to be avoided during shutdown for CPython 3.7+ via
+ ``.disarm`` for ``gil_scoped_acquire``/``gil_scoped_release``.
+ `#2657 <https://github.com/pybind/pybind11/pull/2657>`_
+
+Fixed or improved behavior in a few special cases:
+
+* Fix bug where the constructor of ``object`` subclasses would not throw on
+ being passed a Python object of the wrong type.
+ `#2701 <https://github.com/pybind/pybind11/pull/2701>`_
+
+* The ``type_caster`` for integers does not convert Python objects with
+ ``__int__`` anymore with ``noconvert`` or during the first round of trying
+ overloads.
+ `#2698 <https://github.com/pybind/pybind11/pull/2698>`_
+
+* When casting to a C++ integer, ``__index__`` is always called and not
+ considered as conversion, consistent with Python 3.8+.
+ `#2801 <https://github.com/pybind/pybind11/pull/2801>`_
+
+Build improvements:
+
+* Setup helpers: ``extra_compile_args`` and ``extra_link_args`` automatically set by
+ Pybind11Extension are now prepended, which allows them to be overridden
+ by user-set ``extra_compile_args`` and ``extra_link_args``.
+ `#2808 <https://github.com/pybind/pybind11/pull/2808>`_
+
+* Setup helpers: Don't trigger unused parameter warning.
+ `#2735 <https://github.com/pybind/pybind11/pull/2735>`_
+
+* CMake: Support running with ``--warn-uninitialized`` active.
+ `#2806 <https://github.com/pybind/pybind11/pull/2806>`_
+
+* CMake: Avoid error if included from two submodule directories.
+ `#2804 <https://github.com/pybind/pybind11/pull/2804>`_
+
+* CMake: Fix ``STATIC`` / ``SHARED`` being ignored in FindPython mode.
+ `#2796 <https://github.com/pybind/pybind11/pull/2796>`_
+
+* CMake: Respect the setting for ``CMAKE_CXX_VISIBILITY_PRESET`` if defined.
+ `#2793 <https://github.com/pybind/pybind11/pull/2793>`_
+
+* CMake: Fix issue with FindPython2/FindPython3 not working with ``pybind11::embed``.
+ `#2662 <https://github.com/pybind/pybind11/pull/2662>`_
+
+* CMake: mixing local and installed pybind11's would prioritize the installed
+ one over the local one (regression in 2.6.0).
+ `#2716 <https://github.com/pybind/pybind11/pull/2716>`_
+
+
+Bug fixes:
+
+* Fixed segfault in multithreaded environments when using
+ ``scoped_ostream_redirect``.
+ `#2675 <https://github.com/pybind/pybind11/pull/2675>`_
+
+* Leave docstring unset when all docstring-related options are disabled, rather
+ than set an empty string.
+ `#2745 <https://github.com/pybind/pybind11/pull/2745>`_
+
+* The module key in builtins that pybind11 uses to store its internals changed
+ from std::string to a python str type (more natural on Python 2, no change on
+ Python 3).
+ `#2814 <https://github.com/pybind/pybind11/pull/2814>`_
+
+* Fixed assertion error related to unhandled (later overwritten) exception in
+ CPython 3.8 and 3.9 debug builds.
+ `#2685 <https://github.com/pybind/pybind11/pull/2685>`_
+
+* Fix ``py::gil_scoped_acquire`` assert with CPython 3.9 debug build.
+ `#2683 <https://github.com/pybind/pybind11/pull/2683>`_
+
+* Fix issue with a test failing on PyTest 6.2.
+ `#2741 <https://github.com/pybind/pybind11/pull/2741>`_
+
+Warning fixes:
+
+* Fix warning modifying constructor parameter 'flag' that shadows a field of
+ 'set_flag' ``[-Wshadow-field-in-constructor-modified]``.
+ `#2780 <https://github.com/pybind/pybind11/pull/2780>`_
+
+* Suppressed some deprecation warnings about old-style
+ ``__init__``/``__setstate__`` in the tests.
+ `#2759 <https://github.com/pybind/pybind11/pull/2759>`_
+
+Valgrind work:
+
+* Fix invalid access when calling a pybind11 ``__init__`` on a non-pybind11
+ class instance.
+ `#2755 <https://github.com/pybind/pybind11/pull/2755>`_
+
+* Fixed various minor memory leaks in pybind11's test suite.
+ `#2758 <https://github.com/pybind/pybind11/pull/2758>`_
+
+* Resolved memory leak in cpp_function initialization when exceptions occurred.
+ `#2756 <https://github.com/pybind/pybind11/pull/2756>`_
+
+* Added a Valgrind build, checking for leaks and memory-related UB, to CI.
+ `#2746 <https://github.com/pybind/pybind11/pull/2746>`_
+
+Compiler support:
+
+* Intel compiler was not activating C++14 support due to a broken define.
+ `#2679 <https://github.com/pybind/pybind11/pull/2679>`_
+
+* Support ICC and NVIDIA HPC SDK in C++17 mode.
+ `#2729 <https://github.com/pybind/pybind11/pull/2729>`_
+
+* Support Intel OneAPI compiler (ICC 20.2) and add to CI.
+ `#2573 <https://github.com/pybind/pybind11/pull/2573>`_
+
+
+
+v2.6.1 (Nov 11, 2020)
+---------------------
+
+* ``py::exec``, ``py::eval``, and ``py::eval_file`` now add the builtins module
+ as ``"__builtins__"`` to their ``globals`` argument, better matching ``exec``
+ and ``eval`` in pure Python.
+ `#2616 <https://github.com/pybind/pybind11/pull/2616>`_
+
+* ``setup_helpers`` will no longer set a minimum macOS version higher than the
+ current version.
+ `#2622 <https://github.com/pybind/pybind11/pull/2622>`_
+
+* Allow deleting static properties.
+ `#2629 <https://github.com/pybind/pybind11/pull/2629>`_
+
+* Seal a leak in ``def_buffer``, cleaning up the ``capture`` object after the
+ ``class_`` object goes out of scope.
+ `#2634 <https://github.com/pybind/pybind11/pull/2634>`_
+
+* ``pybind11_INCLUDE_DIRS`` was incorrect, potentially causing a regression if
+ it was expected to include ``PYTHON_INCLUDE_DIRS`` (please use targets
+ instead).
+ `#2636 <https://github.com/pybind/pybind11/pull/2636>`_
+
+* Added parameter names to the ``py::enum_`` constructor and methods, avoiding
+ ``arg0`` in the generated docstrings.
+ `#2637 <https://github.com/pybind/pybind11/pull/2637>`_
+
+* Added ``needs_recompile`` optional function to the ``ParallelCompiler``
+ helper, to allow a recompile to be skipped based on a user-defined function.
+ `#2643 <https://github.com/pybind/pybind11/pull/2643>`_
+
+
+v2.6.0 (Oct 21, 2020)
+---------------------
+
+See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
+
+New features:
+
+* Keyword-only arguments supported in Python 2 or 3 with ``py::kw_only()``.
+ `#2100 <https://github.com/pybind/pybind11/pull/2100>`_
+
+* Positional-only arguments supported in Python 2 or 3 with ``py::pos_only()``.
+ `#2459 <https://github.com/pybind/pybind11/pull/2459>`_
+
+* ``py::is_final()`` class modifier to block subclassing (CPython only).
+ `#2151 <https://github.com/pybind/pybind11/pull/2151>`_
+
+* Added ``py::prepend()``, allowing a function to be placed at the beginning of
+ the overload chain.
+ `#1131 <https://github.com/pybind/pybind11/pull/1131>`_
+
+* Access to the type object now provided with ``py::type::of<T>()`` and
+ ``py::type::of(h)``.
+ `#2364 <https://github.com/pybind/pybind11/pull/2364>`_
+
+* Perfect forwarding support for methods.
+ `#2048 <https://github.com/pybind/pybind11/pull/2048>`_
+
+* Added ``py::error_already_set::discard_as_unraisable()``.
+ `#2372 <https://github.com/pybind/pybind11/pull/2372>`_
+
+* ``py::hash`` is now public.
+ `#2217 <https://github.com/pybind/pybind11/pull/2217>`_
+
+* ``py::class_<union_type>`` is now supported. Note that writing to one data
+ member of the union and reading another (type punning) is UB in C++. Thus
+ pybind11-bound enums should never be used for such conversions.
+ `#2320 <https://github.com/pybind/pybind11/pull/2320>`_.
+
+* Classes now check local scope when registering members, allowing a subclass
+ to have a member with the same name as a parent (such as an enum).
+ `#2335 <https://github.com/pybind/pybind11/pull/2335>`_
+
+Code correctness features:
+
+* Error now thrown when ``__init__`` is forgotten on subclasses.
+ `#2152 <https://github.com/pybind/pybind11/pull/2152>`_
+
+* Throw error if conversion to a pybind11 type if the Python object isn't a
+ valid instance of that type, such as ``py::bytes(o)`` when ``py::object o``
+ isn't a bytes instance.
+ `#2349 <https://github.com/pybind/pybind11/pull/2349>`_
+
+* Throw if conversion to ``str`` fails.
+ `#2477 <https://github.com/pybind/pybind11/pull/2477>`_
+
+
+API changes:
+
+* ``py::module`` was renamed ``py::module_`` to avoid issues with C++20 when
+ used unqualified, but an alias ``py::module`` is provided for backward
+ compatibility.
+ `#2489 <https://github.com/pybind/pybind11/pull/2489>`_
+
+* Public constructors for ``py::module_`` have been deprecated; please use
+ ``pybind11::module_::create_extension_module`` if you were using the public
+ constructor (fairly rare after ``PYBIND11_MODULE`` was introduced).
+ `#2552 <https://github.com/pybind/pybind11/pull/2552>`_
+
+* ``PYBIND11_OVERLOAD*`` macros and ``get_overload`` function replaced by
+ correctly-named ``PYBIND11_OVERRIDE*`` and ``get_override``, fixing
+ inconsistencies in the presence of a closing ``;`` in these macros.
+ ``get_type_overload`` is deprecated.
+ `#2325 <https://github.com/pybind/pybind11/pull/2325>`_
+
+Packaging / building improvements:
+
+* The Python package was reworked to be more powerful and useful.
+ `#2433 <https://github.com/pybind/pybind11/pull/2433>`_
+
+ * :ref:`build-setuptools` is easier thanks to a new
+ ``pybind11.setup_helpers`` module, which provides utilities to use
+ setuptools with pybind11. It can be used via PEP 518, ``setup_requires``,
+ or by directly importing or copying ``setup_helpers.py`` into your project.
+
+ * CMake configuration files are now included in the Python package. Use
+ ``pybind11.get_cmake_dir()`` or ``python -m pybind11 --cmakedir`` to get
+ the directory with the CMake configuration files, or include the
+ site-packages location in your ``CMAKE_MODULE_PATH``. Or you can use the
+ new ``pybind11[global]`` extra when you install ``pybind11``, which
+ installs the CMake files and headers into your base environment in the
+ standard location.
+
+ * ``pybind11-config`` is another way to write ``python -m pybind11`` if you
+ have your PATH set up.
+
+ * Added external typing support to the helper module, code from
+ ``import pybind11`` can now be type checked.
+ `#2588 <https://github.com/pybind/pybind11/pull/2588>`_
+
+* Minimum CMake required increased to 3.4.
+ `#2338 <https://github.com/pybind/pybind11/pull/2338>`_ and
+ `#2370 <https://github.com/pybind/pybind11/pull/2370>`_
+
+ * Full integration with CMake’s C++ standard system and compile features
+ replaces ``PYBIND11_CPP_STANDARD``.
+
+ * Generated config file is now portable to different Python/compiler/CMake
+ versions.
+
+ * Virtual environments prioritized if ``PYTHON_EXECUTABLE`` is not set
+ (``venv``, ``virtualenv``, and ``conda``) (similar to the new FindPython
+ mode).
+
+ * Other CMake features now natively supported, like
+ ``CMAKE_INTERPROCEDURAL_OPTIMIZATION``, ``set(CMAKE_CXX_VISIBILITY_PRESET
+ hidden)``.
+
+ * ``CUDA`` as a language is now supported.
+
+ * Helper functions ``pybind11_strip``, ``pybind11_extension``,
+ ``pybind11_find_import`` added, see :doc:`cmake/index`.
+
+ * Optional :ref:`find-python-mode` and :ref:`nopython-mode` with CMake.
+ `#2370 <https://github.com/pybind/pybind11/pull/2370>`_
+
+* Uninstall target added.
+ `#2265 <https://github.com/pybind/pybind11/pull/2265>`_ and
+ `#2346 <https://github.com/pybind/pybind11/pull/2346>`_
+
+* ``pybind11_add_module()`` now accepts an optional ``OPT_SIZE`` flag that
+ switches the binding target to size-based optimization if the global build
+ type can not always be fixed to ``MinSizeRel`` (except in debug mode, where
+ optimizations remain disabled). ``MinSizeRel`` or this flag reduces binary
+ size quite substantially (~25% on some platforms).
+ `#2463 <https://github.com/pybind/pybind11/pull/2463>`_
+
+Smaller or developer focused features and fixes:
+
+* Moved ``mkdoc.py`` to a new repo, `pybind11-mkdoc`_. There are no longer
+ submodules in the main repo.
+
+* ``py::memoryview`` segfault fix and update, with new
+ ``py::memoryview::from_memory`` in Python 3, and documentation.
+ `#2223 <https://github.com/pybind/pybind11/pull/2223>`_
+
+* Fix for ``buffer_info`` on Python 2.
+ `#2503 <https://github.com/pybind/pybind11/pull/2503>`_
+
+* If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to
+ ``None``.
+ `#2291 <https://github.com/pybind/pybind11/pull/2291>`_
+
+* ``py::ellipsis`` now also works on Python 2.
+ `#2360 <https://github.com/pybind/pybind11/pull/2360>`_
+
+* Pointer to ``std::tuple`` & ``std::pair`` supported in cast.
+ `#2334 <https://github.com/pybind/pybind11/pull/2334>`_
+
+* Small fixes in NumPy support. ``py::array`` now uses ``py::ssize_t`` as first
+ argument type.
+ `#2293 <https://github.com/pybind/pybind11/pull/2293>`_
+
+* Added missing signature for ``py::array``.
+ `#2363 <https://github.com/pybind/pybind11/pull/2363>`_
+
+* ``unchecked_mutable_reference`` has access to operator ``()`` and ``[]`` when
+ const.
+ `#2514 <https://github.com/pybind/pybind11/pull/2514>`_
+
+* ``py::vectorize`` is now supported on functions that return void.
+ `#1969 <https://github.com/pybind/pybind11/pull/1969>`_
+
+* ``py::capsule`` supports ``get_pointer`` and ``set_pointer``.
+ `#1131 <https://github.com/pybind/pybind11/pull/1131>`_
+
+* Fix crash when different instances share the same pointer of the same type.
+ `#2252 <https://github.com/pybind/pybind11/pull/2252>`_
+
+* Fix for ``py::len`` not clearing Python's error state when it fails and throws.
+ `#2575 <https://github.com/pybind/pybind11/pull/2575>`_
+
+* Bugfixes related to more extensive testing, new GitHub Actions CI.
+ `#2321 <https://github.com/pybind/pybind11/pull/2321>`_
+
+* Bug in timezone issue in Eastern hemisphere midnight fixed.
+ `#2438 <https://github.com/pybind/pybind11/pull/2438>`_
+
+* ``std::chrono::time_point`` now works when the resolution is not the same as
+ the system.
+ `#2481 <https://github.com/pybind/pybind11/pull/2481>`_
+
+* Bug fixed where ``py::array_t`` could accept arrays that did not match the
+ requested ordering.
+ `#2484 <https://github.com/pybind/pybind11/pull/2484>`_
+
+* Avoid a segfault on some compilers when types are removed in Python.
+ `#2564 <https://github.com/pybind/pybind11/pull/2564>`_
+
+* ``py::arg::none()`` is now also respected when passing keyword arguments.
+ `#2611 <https://github.com/pybind/pybind11/pull/2611>`_
+
+* PyPy fixes, PyPy 7.3.x now supported, including PyPy3. (Known issue with
+ PyPy2 and Windows `#2596 <https://github.com/pybind/pybind11/issues/2596>`_).
+ `#2146 <https://github.com/pybind/pybind11/pull/2146>`_
+
+* CPython 3.9.0 workaround for undefined behavior (macOS segfault).
+ `#2576 <https://github.com/pybind/pybind11/pull/2576>`_
+
+* CPython 3.9 warning fixes.
+ `#2253 <https://github.com/pybind/pybind11/pull/2253>`_
+
+* Improved C++20 support, now tested in CI.
+ `#2489 <https://github.com/pybind/pybind11/pull/2489>`_
+ `#2599 <https://github.com/pybind/pybind11/pull/2599>`_
+
+* Improved but still incomplete debug Python interpreter support.
+ `#2025 <https://github.com/pybind/pybind11/pull/2025>`_
+
+* NVCC (CUDA 11) now supported and tested in CI.
+ `#2461 <https://github.com/pybind/pybind11/pull/2461>`_
+
+* NVIDIA PGI compilers now supported and tested in CI.
+ `#2475 <https://github.com/pybind/pybind11/pull/2475>`_
+
+* At least Intel 18 now explicitly required when compiling with Intel.
+ `#2577 <https://github.com/pybind/pybind11/pull/2577>`_
+
+* Extensive style checking in CI, with `pre-commit`_ support. Code
+ modernization, checked by clang-tidy.
+
+* Expanded docs, including new main page, new installing section, and CMake
+ helpers page, along with over a dozen new sections on existing pages.
+
+* In GitHub, new docs for contributing and new issue templates.
+
+.. _pre-commit: https://pre-commit.com
+
+.. _pybind11-mkdoc: https://github.com/pybind/pybind11-mkdoc
+
+v2.5.0 (Mar 31, 2020)
+-----------------------------------------------------
+
+* Use C++17 fold expressions in type casters, if available. This can
+ improve performance during overload resolution when functions have
+ multiple arguments.
+ `#2043 <https://github.com/pybind/pybind11/pull/2043>`_.
+
+* Changed include directory resolution in ``pybind11/__init__.py``
+ and installation in ``setup.py``. This fixes a number of open issues
+ where pybind11 headers could not be found in certain environments.
+ `#1995 <https://github.com/pybind/pybind11/pull/1995>`_.
+
+* C++20 ``char8_t`` and ``u8string`` support. `#2026
+ <https://github.com/pybind/pybind11/pull/2026>`_.
+
+* CMake: search for Python 3.9. `bb9c91
+ <https://github.com/pybind/pybind11/commit/bb9c91>`_.
+
+* Fixes for MSYS-based build environments.
+ `#2087 <https://github.com/pybind/pybind11/pull/2087>`_,
+ `#2053 <https://github.com/pybind/pybind11/pull/2053>`_.
+
+* STL bindings for ``std::vector<...>::clear``. `#2074
+ <https://github.com/pybind/pybind11/pull/2074>`_.
+
+* Read-only flag for ``py::buffer``. `#1466
+ <https://github.com/pybind/pybind11/pull/1466>`_.
+
+* Exception handling during module initialization.
+ `bf2b031 <https://github.com/pybind/pybind11/commit/bf2b031>`_.
+
+* Support linking against a CPython debug build.
+ `#2025 <https://github.com/pybind/pybind11/pull/2025>`_.
+
+* Fixed issues involving the availability and use of aligned ``new`` and
+ ``delete``. `#1988 <https://github.com/pybind/pybind11/pull/1988>`_,
+ `759221 <https://github.com/pybind/pybind11/commit/759221>`_.
+
+* Fixed a resource leak upon interpreter shutdown.
+ `#2020 <https://github.com/pybind/pybind11/pull/2020>`_.
+
+* Fixed error handling in the boolean caster.
+ `#1976 <https://github.com/pybind/pybind11/pull/1976>`_.
+
+v2.4.3 (Oct 15, 2019)
+-----------------------------------------------------
+
+* Adapt pybind11 to a C API convention change in Python 3.8. `#1950
+ <https://github.com/pybind/pybind11/pull/1950>`_.
+
+v2.4.2 (Sep 21, 2019)
+-----------------------------------------------------
+
+* Replaced usage of a C++14 only construct. `#1929
+ <https://github.com/pybind/pybind11/pull/1929>`_.
+
+* Made an ifdef future-proof for Python >= 4. `f3109d
+ <https://github.com/pybind/pybind11/commit/f3109d>`_.
+
v2.4.1 (Sep 20, 2019)
-----------------------------------------------------
* Fixed a problem involving implicit conversion from enumerations to integers
- on Python 3.8. `1780 <https://github.com/pybind/pybind11/pull/1780>`_.
+ on Python 3.8. `#1780 <https://github.com/pybind/pybind11/pull/1780>`_.
v2.4.0 (Sep 19, 2019)
-----------------------------------------------------
v2.2.1 (September 14, 2017)
-----------------------------------------------------
-* Added ``py::module::reload()`` member function for reloading a module.
+* Added ``py::module_::reload()`` member function for reloading a module.
`#1040 <https://github.com/pybind/pybind11/pull/1040>`_.
* Fixed a reference leak in the number converter.
in reference cycles.
`#856 <https://github.com/pybind/pybind11/pull/856>`_.
-* Numpy and buffer protocol related improvements:
+* NumPy and buffer protocol related improvements:
1. Support for negative strides in Python buffer objects/numpy arrays. This
required changing integers from unsigned to signed for the related C++ APIs.
* Improved support for ``std::shared_ptr<>`` conversions
* Initial support for ``std::set<>`` conversions
* Fixed type resolution issue for types defined in a separate plugin module
-* Cmake build system improvements
+* CMake build system improvements
* Factored out generic functionality to non-templated code (smaller code size)
* Added a code size / compile time benchmark vs Boost.Python
* Added an appveyor CI script
>>> print(p)
<example.Pet object at 0x10cd98060>
-To address this, we could bind an utility function that returns a human-readable
+To address this, we could bind a utility function that returns a human-readable
summary to the special method slot named ``__repr__``. Unfortunately, there is no
suitable functionality in the ``Pet`` data structure, and it would be nice if
we did not have to change it. This can easily be accomplished by binding a
py::class_<Pet>(m, "Pet")
.def(py::init<const std::string &, int>())
- .def("set", (void (Pet::*)(int)) &Pet::set, "Set the pet's age")
- .def("set", (void (Pet::*)(const std::string &)) &Pet::set, "Set the pet's name");
+ .def("set", static_cast<void (Pet::*)(int)>(&Pet::set), "Set the pet's age")
+ .def("set", static_cast<void (Pet::*)(const std::string &)>(&Pet::set), "Set the pet's name");
The overload signatures are also visible in the method's docstring:
--- /dev/null
+CMake helpers
+-------------
+
+Pybind11 can be used with ``add_subdirectory(extern/pybind11)``, or from an
+install with ``find_package(pybind11 CONFIG)``. The interface provided in
+either case is functionally identical.
+
+.. cmake-module:: ../../tools/pybind11Config.cmake.in
Build systems
#############
+.. _build-setuptools:
+
Building with setuptools
========================
.. [python_example] https://github.com/pybind/python_example
+A helper file is provided with pybind11 that can simplify usage with setuptools.
+
+To use pybind11 inside your ``setup.py``, you have to have some system to
+ensure that ``pybind11`` is installed when you build your package. There are
+four possible ways to do this, and pybind11 supports all four: You can ask all
+users to install pybind11 beforehand (bad), you can use
+:ref:`setup_helpers-pep518` (good, but very new and requires Pip 10),
+:ref:`setup_helpers-setup_requires` (discouraged by Python packagers now that
+PEP 518 is available, but it still works everywhere), or you can
+:ref:`setup_helpers-copy-manually` (always works but you have to manually sync
+your copy to get updates).
+
+An example of a ``setup.py`` using pybind11's helpers:
+
+.. code-block:: python
+
+ from glob import glob
+ from setuptools import setup
+ from pybind11.setup_helpers import Pybind11Extension
+
+ ext_modules = [
+ Pybind11Extension(
+ "python_example",
+ sorted(glob("src/*.cpp")), # Sort source files for reproducibility
+ ),
+ ]
+
+ setup(
+ ...,
+ ext_modules=ext_modules
+ )
+
+If you want to do an automatic search for the highest supported C++ standard,
+that is supported via a ``build_ext`` command override; it will only affect
+``Pybind11Extensions``:
+
+.. code-block:: python
+
+ from glob import glob
+ from setuptools import setup
+ from pybind11.setup_helpers import Pybind11Extension, build_ext
+
+ ext_modules = [
+ Pybind11Extension(
+ "python_example",
+ sorted(glob("src/*.cpp")),
+ ),
+ ]
+
+ setup(
+ ...,
+ cmdclass={"build_ext": build_ext},
+ ext_modules=ext_modules
+ )
+
+Since pybind11 does not require NumPy when building, a light-weight replacement
+for NumPy's parallel compilation distutils tool is included. Use it like this:
+
+.. code-block:: python
+
+ from pybind11.setup_helpers import ParallelCompile
+
+ # Optional multithreaded build
+ ParallelCompile("NPY_NUM_BUILD_JOBS").install()
+
+ setup(...)
+
+The argument is the name of an environment variable to control the number of
+threads, such as ``NPY_NUM_BUILD_JOBS`` (as used by NumPy), though you can set
+something different if you want; ``CMAKE_BUILD_PARALLEL_LEVEL`` is another choice
+a user might expect. You can also pass ``default=N`` to set the default number
+of threads (0 will take the number of threads available) and ``max=N``, the
+maximum number of threads; if you have a large extension you may want set this
+to a memory dependent number.
+
+If you are developing rapidly and have a lot of C++ files, you may want to
+avoid rebuilding files that have not changed. For simple cases were you are
+using ``pip install -e .`` and do not have local headers, you can skip the
+rebuild if a object file is newer than it's source (headers are not checked!)
+with the following:
+
+.. code-block:: python
+
+ from pybind11.setup_helpers import ParallelCompile, naive_recompile
+
+ SmartCompile("NPY_NUM_BUILD_JOBS", needs_recompile=naive_recompile).install()
+
+
+If you have a more complex build, you can implement a smarter function and pass
+it to ``needs_recompile``, or you can use [Ccache]_ instead. ``CXX="cache g++"
+pip install -e .`` would be the way to use it with GCC, for example. Unlike the
+simple solution, this even works even when not compiling in editable mode, but
+it does require Ccache to be installed.
+
+Keep in mind that Pip will not even attempt to rebuild if it thinks it has
+already built a copy of your code, which it deduces from the version number.
+One way to avoid this is to use [setuptools_scm]_, which will generate a
+version number that includes the number of commits since your last tag and a
+hash for a dirty directory. Another way to force a rebuild is purge your cache
+or use Pip's ``--no-cache-dir`` option.
+
+.. [Ccache] https://ccache.dev
+
+.. [setuptools_scm] https://github.com/pypa/setuptools_scm
+
+.. _setup_helpers-pep518:
+
+PEP 518 requirements (Pip 10+ required)
+---------------------------------------
+
+If you use `PEP 518's <https://www.python.org/dev/peps/pep-0518/>`_
+``pyproject.toml`` file, you can ensure that ``pybind11`` is available during
+the compilation of your project. When this file exists, Pip will make a new
+virtual environment, download just the packages listed here in ``requires=``,
+and build a wheel (binary Python package). It will then throw away the
+environment, and install your wheel.
+
+Your ``pyproject.toml`` file will likely look something like this:
+
+.. code-block:: toml
+
+ [build-system]
+ requires = ["setuptools>=42", "wheel", "pybind11~=2.6.1"]
+ build-backend = "setuptools.build_meta"
+
+.. note::
+
+ The main drawback to this method is that a `PEP 517`_ compliant build tool,
+ such as Pip 10+, is required for this approach to work; older versions of
+ Pip completely ignore this file. If you distribute binaries (called wheels
+ in Python) using something like `cibuildwheel`_, remember that ``setup.py``
+ and ``pyproject.toml`` are not even contained in the wheel, so this high
+ Pip requirement is only for source builds, and will not affect users of
+ your binary wheels. If you are building SDists and wheels, then
+ `pypa-build`_ is the recommended offical tool.
+
+.. _PEP 517: https://www.python.org/dev/peps/pep-0517/
+.. _cibuildwheel: https://cibuildwheel.readthedocs.io
+.. _pypa-build: https://pypa-build.readthedocs.io/en/latest/
+
+.. _setup_helpers-setup_requires:
+
+Classic ``setup_requires``
+--------------------------
+
+If you want to support old versions of Pip with the classic
+``setup_requires=["pybind11"]`` keyword argument to setup, which triggers a
+two-phase ``setup.py`` run, then you will need to use something like this to
+ensure the first pass works (which has not yet installed the ``setup_requires``
+packages, since it can't install something it does not know about):
+
+.. code-block:: python
+
+ try:
+ from pybind11.setup_helpers import Pybind11Extension
+ except ImportError:
+ from setuptools import Extension as Pybind11Extension
+
+
+It doesn't matter that the Extension class is not the enhanced subclass for the
+first pass run; and the second pass will have the ``setup_requires``
+requirements.
+
+This is obviously more of a hack than the PEP 518 method, but it supports
+ancient versions of Pip.
+
+.. _setup_helpers-copy-manually:
+
+Copy manually
+-------------
+
+You can also copy ``setup_helpers.py`` directly to your project; it was
+designed to be usable standalone, like the old example ``setup.py``. You can
+set ``include_pybind11=False`` to skip including the pybind11 package headers,
+so you can use it with git submodules and a specific git version. If you use
+this, you will need to import from a local file in ``setup.py`` and ensure the
+helper file is part of your MANIFEST.
+
+
+Closely related, if you include pybind11 as a subproject, you can run the
+``setup_helpers.py`` inplace. If loaded correctly, this should even pick up
+the correct include for pybind11, though you can turn it off as shown above if
+you want to input it manually.
+
+Suggested usage if you have pybind11 as a submodule in ``extern/pybind11``:
+
+.. code-block:: python
+
+ DIR = os.path.abspath(os.path.dirname(__file__))
+
+ sys.path.append(os.path.join(DIR, "extern", "pybind11"))
+ from pybind11.setup_helpers import Pybind11Extension # noqa: E402
+
+ del sys.path[-1]
+
+
+.. versionchanged:: 2.6
+
+ Added ``setup_helpers`` file.
+
Building with cppimport
========================
.. code-block:: cmake
- cmake_minimum_required(VERSION 2.8.12)
- project(example)
+ cmake_minimum_required(VERSION 3.4...3.18)
+ project(example LANGUAGES CXX)
add_subdirectory(pybind11)
pybind11_add_module(example example.cpp)
.. [cmake_example] https://github.com/pybind/cmake_example
+.. versionchanged:: 2.6
+ CMake 3.4+ is required.
+
+Further information can be found at :doc:`cmake/index`.
+
pybind11_add_module
-------------------
.. code-block:: cmake
pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]
- [NO_EXTRAS] [SYSTEM] [THIN_LTO] source1 [source2 ...])
+ [NO_EXTRAS] [THIN_LTO] [OPT_SIZE] source1 [source2 ...])
This function behaves very much like CMake's builtin ``add_library`` (in fact,
it's a wrapper function around that command). It will add a library target
given, they will always be disabled, even in ``Release`` mode. However, this
will result in code bloat and is generally not recommended.
-By default, pybind11 and Python headers will be included with ``-I``. In order
-to include pybind11 as system library, e.g. to avoid warnings in downstream
-code with warn-levels outside of pybind11's scope, set the option ``SYSTEM``.
-
As stated above, LTO is enabled by default. Some newer compilers also support
different flavors of LTO such as `ThinLTO`_. Setting ``THIN_LTO`` will cause
the function to prefer this flavor if available. The function falls back to
-regular LTO if ``-flto=thin`` is not available.
+regular LTO if ``-flto=thin`` is not available. If
+``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` is set (either ``ON`` or ``OFF``), then
+that will be respected instead of the built-in flag search.
+
+.. note::
+
+ If you want to set the property form on targets or the
+ ``CMAKE_INTERPROCEDURAL_OPTIMIZATION_<CONFIG>`` versions of this, you should
+ still use ``set(CMAKE_INTERPROCEDURAL_OPTIMIZATION OFF)`` (otherwise a
+ no-op) to disable pybind11's ipo flags.
+
+The ``OPT_SIZE`` flag enables size-based optimization equivalent to the
+standard ``/Os`` or ``-Os`` compiler flags and the ``MinSizeRel`` build type,
+which avoid optimizations that that can substantially increase the size of the
+resulting binary. This flag is particularly useful in projects that are split
+into performance-critical parts and associated bindings. In this case, we can
+compile the project in release mode (and hence, optimize performance globally),
+and specify ``OPT_SIZE`` for the binding target, where size might be the main
+concern as performance is often less critical here. A ~25% size reduction has
+been observed in practice. This flag only changes the optimization behavior at
+a per-target level and takes precedence over the global CMake build type
+(``Release``, ``RelWithDebInfo``) except for ``Debug`` builds, where
+optimizations remain disabled.
.. _ThinLTO: http://clang.llvm.org/docs/ThinLTO.html
Configuration variables
-----------------------
-By default, pybind11 will compile modules with the C++14 standard, if available
-on the target compiler, falling back to C++11 if C++14 support is not
-available. Note, however, that this default is subject to change: future
-pybind11 releases are expected to migrate to newer C++ standards as they become
-available. To override this, the standard flag can be given explicitly in
-``PYBIND11_CPP_STANDARD``:
+By default, pybind11 will compile modules with the compiler default or the
+minimum standard required by pybind11, whichever is higher. You can set the
+standard explicitly with
+`CMAKE_CXX_STANDARD <https://cmake.org/cmake/help/latest/variable/CMAKE_CXX_STANDARD.html>`_:
.. code-block:: cmake
- # Use just one of these:
- # GCC/clang:
- set(PYBIND11_CPP_STANDARD -std=c++11)
- set(PYBIND11_CPP_STANDARD -std=c++14)
- set(PYBIND11_CPP_STANDARD -std=c++1z) # Experimental C++17 support
- # MSVC:
- set(PYBIND11_CPP_STANDARD /std:c++14)
- set(PYBIND11_CPP_STANDARD /std:c++latest) # Enables some MSVC C++17 features
-
- add_subdirectory(pybind11) # or find_package(pybind11)
+ set(CMAKE_CXX_STANDARD 14 CACHE STRING "C++ version selection") # or 11, 14, 17, 20
+ set(CMAKE_CXX_STANDARD_REQUIRED ON) # optional, ensure standard is supported
+ set(CMAKE_CXX_EXTENSIONS OFF) # optional, keep compiler extensionsn off
-Note that this and all other configuration variables must be set **before** the
-call to ``add_subdirectory`` or ``find_package``. The variables can also be set
-when calling CMake from the command line using the ``-D<variable>=<value>`` flag.
+The variables can also be set when calling CMake from the command line using
+the ``-D<variable>=<value>`` flag. You can also manually set ``CXX_STANDARD``
+on a target or use ``target_compile_features`` on your targets - anything that
+CMake supports.
-The target Python version can be selected by setting ``PYBIND11_PYTHON_VERSION``
-or an exact Python installation can be specified with ``PYTHON_EXECUTABLE``.
-For example:
+Classic Python support: The target Python version can be selected by setting
+``PYBIND11_PYTHON_VERSION`` or an exact Python installation can be specified
+with ``PYTHON_EXECUTABLE``. For example:
.. code-block:: bash
cmake -DPYBIND11_PYTHON_VERSION=3.6 ..
- # or
- cmake -DPYTHON_EXECUTABLE=path/to/python ..
+
+ # Another method:
+ cmake -DPYTHON_EXECUTABLE=/path/to/python ..
+
+ # This often is a good way to get the current Python, works in environments:
+ cmake -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") ..
+
find_package vs. add_subdirectory
---------------------------------
.. code-block:: cmake
- cmake_minimum_required(VERSION 2.8.12)
- project(example)
+ cmake_minimum_required(VERSION 3.4...3.18)
+ project(example LANGUAGES CXX)
find_package(pybind11 REQUIRED)
pybind11_add_module(example example.cpp)
.. code-block:: bash
+ # Classic CMake
cd pybind11
mkdir build
cd build
cmake ..
make install
+ # CMake 3.15+
+ cd pybind11
+ cmake -S . -B build
+ cmake --build build -j 2 # Build on 2 cores
+ cmake --install build
+
Once detected, the aforementioned ``pybind11_add_module`` can be employed as
before. The function usage and configuration variables are identical no matter
if pybind11 is added as a subdirectory or found as an installed package. You
.. _Config file: https://github.com/pybind/pybind11/blob/master/tools/pybind11Config.cmake.in
-Advanced: interface library target
-----------------------------------
-When using a version of CMake greater than 3.0, pybind11 can additionally
-be used as a special *interface library* . The target ``pybind11::module``
-is available with pybind11 headers, Python headers and libraries as needed,
-and C++ compile definitions attached. This target is suitable for linking
-to an independently constructed (through ``add_library``, not
-``pybind11_add_module``) target in the consuming project.
+.. _find-python-mode:
+
+FindPython mode
+---------------
+
+CMake 3.12+ (3.15+ recommended, 3.18.2+ ideal) added a new module called
+FindPython that had a highly improved search algorithm and modern targets
+and tools. If you use FindPython, pybind11 will detect this and use the
+existing targets instead:
+
+.. code-block:: cmake
+
+ cmake_minumum_required(VERSION 3.15...3.19)
+ project(example LANGUAGES CXX)
+
+ find_package(Python COMPONENTS Interpreter Development REQUIRED)
+ find_package(pybind11 CONFIG REQUIRED)
+ # or add_subdirectory(pybind11)
+
+ pybind11_add_module(example example.cpp)
+
+You can also use the targets (as listed below) with FindPython. If you define
+``PYBIND11_FINDPYTHON``, pybind11 will perform the FindPython step for you
+(mostly useful when building pybind11's own tests, or as a way to change search
+algorithms from the CMake invocation, with ``-DPYBIND11_FINDPYTHON=ON``.
+
+.. warning::
+
+ If you use FindPython2 and FindPython3 to dual-target Python, use the
+ individual targets listed below, and avoid targets that directly include
+ Python parts.
+
+There are `many ways to hint or force a discovery of a specific Python
+installation <https://cmake.org/cmake/help/latest/module/FindPython.html>`_),
+setting ``Python_ROOT_DIR`` may be the most common one (though with
+virtualenv/venv support, and Conda support, this tends to find the correct
+Python version more often than the old system did).
+
+.. warning::
+
+ When the Python libraries (i.e. ``libpythonXX.a`` and ``libpythonXX.so``
+ on Unix) are not available, as is the case on a manylinux image, the
+ ``Development`` component will not be resolved by ``FindPython``. When not
+ using the embedding functionality, CMake 3.18+ allows you to specify
+ ``Development.Module`` instead of ``Development`` to resolve this issue.
+
+.. versionadded:: 2.6
+
+Advanced: interface library targets
+-----------------------------------
+
+Pybind11 supports modern CMake usage patterns with a set of interface targets,
+available in all modes. The targets provided are:
+
+ ``pybind11::headers``
+ Just the pybind11 headers and minimum compile requirements
+
+ ``pybind11::python2_no_register``
+ Quiets the warning/error when mixing C++14 or higher and Python 2
+
+ ``pybind11::pybind11``
+ Python headers + ``pybind11::headers`` + ``pybind11::python2_no_register`` (Python 2 only)
+
+ ``pybind11::python_link_helper``
+ Just the "linking" part of pybind11:module
+
+ ``pybind11::module``
+ Everything for extension modules - ``pybind11::pybind11`` + ``Python::Module`` (FindPython CMake 3.15+) or ``pybind11::python_link_helper``
+
+ ``pybind11::embed``
+ Everything for embedding the Python interpreter - ``pybind11::pybind11`` + ``Python::Embed`` (FindPython) or Python libs
+
+ ``pybind11::lto`` / ``pybind11::thin_lto``
+ An alternative to `INTERPROCEDURAL_OPTIMIZATION` for adding link-time optimization.
+
+ ``pybind11::windows_extras``
+ ``/bigobj`` and ``/mp`` for MSVC.
+
+ ``pybind11::opt_size``
+ ``/Os`` for MSVC, ``-Os`` for other compilers. Does nothing for debug builds.
+
+Two helper functions are also provided:
+
+ ``pybind11_strip(target)``
+ Strips a target (uses ``CMAKE_STRIP`` after the target is built)
+
+ ``pybind11_extension(target)``
+ Sets the correct extension (with SOABI) for a target.
+
+You can use these targets to build complex applications. For example, the
+``add_python_module`` function is identical to:
.. code-block:: cmake
- cmake_minimum_required(VERSION 3.0)
- project(example)
+ cmake_minimum_required(VERSION 3.4)
+ project(example LANGUAGES CXX)
find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11)
add_library(example MODULE main.cpp)
- target_link_libraries(example PRIVATE pybind11::module)
- set_target_properties(example PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
+
+ target_link_libraries(example PRIVATE pybind11::module pybind11::lto pybind11::windows_extras)
+
+ pybind11_extension(example)
+ pybind11_strip(example)
+
+ set_target_properties(example PROPERTIES CXX_VISIBILITY_PRESET "hidden"
+ CUDA_VISIBILITY_PRESET "hidden")
+
+Instead of setting properties, you can set ``CMAKE_*`` variables to initialize these correctly.
.. warning::
Since pybind11 is a metatemplate library, it is crucial that certain
compiler flags are provided to ensure high quality code generation. In
contrast to the ``pybind11_add_module()`` command, the CMake interface
- library only provides the *minimal* set of parameters to ensure that the
- code using pybind11 compiles, but it does **not** pass these extra compiler
- flags (i.e. this is up to you).
+ provides a *composable* set of targets to ensure that you retain flexibility.
+ It can be expecially important to provide or set these properties; the
+ :ref:`FAQ <faq:symhidden>` contains an explanation on why these are needed.
+
+.. versionadded:: 2.6
+
+.. _nopython-mode:
- These include Link Time Optimization (``-flto`` on GCC/Clang/ICPC, ``/GL``
- and ``/LTCG`` on Visual Studio) and .OBJ files with many sections on Visual
- Studio (``/bigobj``). The :ref:`FAQ <faq:symhidden>` contains an
- explanation on why these are needed.
+Advanced: NOPYTHON mode
+-----------------------
+
+If you want complete control, you can set ``PYBIND11_NOPYTHON`` to completely
+disable Python integration (this also happens if you run ``FindPython2`` and
+``FindPython3`` without running ``FindPython``). This gives you complete
+freedom to integrate into an existing system (like `Scikit-Build's
+<https://scikit-build.readthedocs.io>`_ ``PythonExtensions``).
+``pybind11_add_module`` and ``pybind11_extension`` will be unavailable, and the
+targets will be missing any Python specific behavior.
+
+.. versionadded:: 2.6
Embedding the Python interpreter
--------------------------------
.. code-block:: cmake
- cmake_minimum_required(VERSION 3.0)
- project(example)
+ cmake_minimum_required(VERSION 3.4...3.18)
+ project(example LANGUAGES CXX)
find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11)
.. code-block:: bash
- $ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
+ $ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
The flags given here assume that you're using Python 3. For Python 2, just
change the executable appropriately (to ``python`` or ``python2``).
``python3-config --includes``.
Note that Python 2.7 modules don't use a special suffix, so you should simply
-use ``example.so`` instead of ``example`python3-config --extension-suffix```.
+use ``example.so`` instead of ``example$(python3-config --extension-suffix)``.
Besides, the ``--extension-suffix`` option may or may not be available, depending
on the distribution; in the latter case, the module extension can be manually
set to ``.so``.
-On Mac OS: the build command is almost the same but it also requires passing
+On macOS: the build command is almost the same but it also requires passing
the ``-undefined dynamic_lookup`` flag so as to ignore missing symbols when
building the module:
.. code-block:: bash
- $ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
+ $ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
In general, it is advisable to include several additional build parameters
that can considerably reduce the size of the created binary. Refer to section
of possibly importing a second Python library into a process that already
contains one (which will lead to a segfault).
+
+Building with Bazel
+===================
+
+You can build with the Bazel build system using the `pybind11_bazel
+<https://github.com/pybind/pybind11_bazel>`_ repository.
+
Generating binding code automatically
=====================================
[binder]_ documentation for details.
.. [binder] http://cppbinder.readthedocs.io/en/latest/about.html
+
+[AutoWIG]_ is a Python library that wraps automatically compiled libraries into
+high-level languages. It parses C++ code using LLVM/Clang technologies and
+generates the wrappers using the Mako templating engine. The approach is automatic,
+extensible, and applies to very complex C++ libraries, composed of thousands of
+classes or incorporating modern meta-programming constructs.
+
+.. [AutoWIG] https://github.com/StatisKit/AutoWIG
+
+[robotpy-build]_ is a is a pure python, cross platform build tool that aims to
+simplify creation of python wheels for pybind11 projects, and provide
+cross-project dependency management. Additionally, it is able to autogenerate
+customizable pybind11-based wrappers by parsing C++ header files.
+
+.. [robotpy-build] https://robotpy-build.readthedocs.io
import os
import shlex
import subprocess
+from pathlib import Path
+import re
+
+DIR = Path(__file__).parent.resolve()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['breathe']
+extensions = [
+ "breathe",
+ "sphinxcontrib.rsvgconverter",
+ "sphinxcontrib.moderncmakedomain",
+]
-breathe_projects = {'pybind11': '.build/doxygenxml/'}
-breathe_default_project = 'pybind11'
-breathe_domain_by_extension = {'h': 'cpp'}
+breathe_projects = {"pybind11": ".build/doxygenxml/"}
+breathe_default_project = "pybind11"
+breathe_domain_by_extension = {"h": "cpp"}
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['.templates']
+templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = 'pybind11'
-copyright = '2017, Wenzel Jakob'
-author = 'Wenzel Jakob'
+project = "pybind11"
+copyright = "2017, Wenzel Jakob"
+author = "Wenzel Jakob"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
-#
-# The short X.Y version.
-version = '2.4'
+
+# Read the listed version
+with open("../pybind11/_version.py") as f:
+ code = compile(f.read(), "../pybind11/_version.py", "exec")
+loc = {}
+exec(code, loc)
+
# The full version, including alpha/beta/rc tags.
-release = '2.4.1'
+version = loc["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ['.build', 'release.rst']
+exclude_patterns = [".build", "release.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
-default_role = 'any'
+default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-#pygments_style = 'monokai'
+# pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
+# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
+
+ html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
- html_context = {
- 'css_files': [
- '_static/theme_overrides.css'
- ]
- }
+ html_context = {"css_files": ["_static/theme_overrides.css"]}
else:
html_context = {
- 'css_files': [
- '//media.readthedocs.org/css/sphinx_rtd_theme.css',
- '//media.readthedocs.org/css/readthedocs-doc-embed.css',
- '_static/theme_overrides.css'
+ "css_files": [
+ "//media.readthedocs.org/css/sphinx_rtd_theme.css",
+ "//media.readthedocs.org/css/readthedocs-doc-embed.css",
+ "_static/theme_overrides.css",
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
+# "<project> v<version> documentation".
+# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
-#html_extra_path = []
+# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
-#html_search_language = 'en'
+# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
-#html_search_options = {'type': 'default'}
+# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
-#html_search_scorer = 'scorer.js'
+# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
-htmlhelp_basename = 'pybind11doc'
+htmlhelp_basename = "pybind11doc"
# -- Options for LaTeX output ---------------------------------------------
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-'preamble': '\DeclareUnicodeCharacter{00A0}{}',
+latex_engine = "pdflatex"
-# Latex figure (float) alignment
-#'figure_align': 'htbp',
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
+ #
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
+ #
+ # Additional stuff for the LaTeX preamble.
+ # remove blank pages (between the title page and the TOC, etc.)
+ "classoptions": ",openany,oneside",
+ "preamble": r"""
+\usepackage{fontawesome}
+\usepackage{textgreek}
+\DeclareUnicodeCharacter{00A0}{}
+\DeclareUnicodeCharacter{2194}{\faArrowsH}
+\DeclareUnicodeCharacter{1F382}{\faBirthdayCake}
+\DeclareUnicodeCharacter{1F355}{\faAdjust}
+\DeclareUnicodeCharacter{0301}{'}
+\DeclareUnicodeCharacter{03C0}{\textpi}
+
+""",
+ # Latex figure (float) alignment
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'pybind11.tex', 'pybind11 Documentation',
- 'Wenzel Jakob', 'manual'),
+ (master_doc, "pybind11.tex", "pybind11 Documentation", "Wenzel Jakob", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'pybind11', 'pybind11 Documentation',
- [author], 1)
-]
+man_pages = [(master_doc, "pybind11", "pybind11 Documentation", [author], 1)]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'pybind11', 'pybind11 Documentation',
- author, 'pybind11', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ master_doc,
+ "pybind11",
+ "pybind11 Documentation",
+ author,
+ "pybind11",
+ "One line description of project.",
+ "Miscellaneous",
+ ),
]
# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
# If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
-#texinfo_no_detailmenu = False
+# texinfo_no_detailmenu = False
-primary_domain = 'cpp'
-highlight_language = 'cpp'
+primary_domain = "cpp"
+highlight_language = "cpp"
def generate_doxygen_xml(app):
- build_dir = os.path.join(app.confdir, '.build')
+ build_dir = os.path.join(app.confdir, ".build")
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
- subprocess.call(['doxygen', '--version'])
- retcode = subprocess.call(['doxygen'], cwd=app.confdir)
+ subprocess.call(["doxygen", "--version"])
+ retcode = subprocess.call(["doxygen"], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
+def prepare(app):
+ with open(DIR.parent / "README.rst") as f:
+ contents = f.read()
+
+ if app.builder.name == "latex":
+ # Remove badges and stuff from start
+ contents = contents[contents.find(r".. start") :]
+
+ # Filter out section titles for index.rst for LaTeX
+ contents = re.sub(r"^(.*)\n[-~]{3,}$", r"**\1**", contents, flags=re.MULTILINE)
+
+ with open(DIR / "readme.rst", "w") as f:
+ f.write(contents)
+
+
+def clean_up(app, exception):
+ (DIR / "readme.rst").unlink()
+
+
def setup(app):
- """Add hook for building doxygen xml when needed"""
+
+ # Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)
+
+ # Copy the readme in
+ app.connect("builder-inited", prepare)
+
+ # Clean up the generated readme
+ app.connect("build-finished", clean_up)
===========================================================
1. Make sure that the name specified in PYBIND11_MODULE is identical to the
-filename of the extension library (without prefixes such as .so)
+filename of the extension library (without suffixes such as .so)
2. If the above did not fix the issue, you are likely using an incompatible
version of Python (for instance, the extension library was compiled against
See the first answer.
-CMake doesn't detect the right Python version
-=============================================
-
-The CMake-based build system will try to automatically detect the installed
-version of Python and link against that. When this fails, or when there are
-multiple versions of Python and it finds the wrong one, delete
-``CMakeCache.txt`` and then invoke CMake as follows:
-
-.. code-block:: bash
-
- cmake -DPYTHON_EXECUTABLE:FILEPATH=<path-to-python-executable> .
-
.. _faq_reference_arguments:
Limitations involving reference arguments
.. code-block:: cpp
- void init_ex1(py::module &);
- void init_ex2(py::module &);
+ void init_ex1(py::module_ &);
+ void init_ex2(py::module_ &);
/* ... */
PYBIND11_MODULE(example, m) {
.. code-block:: cpp
- void init_ex1(py::module &m) {
+ void init_ex1(py::module_ &m) {
m.def("add", [](int a, int b) { return a + b; });
}
.. code-block:: cpp
- void init_ex2(py::module &m) {
+ void init_ex2(py::module_ &m) {
m.def("sub", [](int a, int b) { return a - b; });
}
structures with incompatible ABIs, and so on. pybind11 is very careful not
to make these types of mistakes.
+How can I properly handle Ctrl-C in long-running functions?
+===========================================================
+
+Ctrl-C is received by the Python interpreter, and holds it until the GIL
+is released, so a long-running function won't be interrupted.
+
+To interrupt from inside your function, you can use the ``PyErr_CheckSignals()``
+function, that will tell if a signal has been raised on the Python side. This
+function merely checks a flag, so its impact is negligible. When a signal has
+been received, you must either explicitly interrupt execution by throwing
+``py::error_already_set`` (which will propagate the existing
+``KeyboardInterrupt``), or clear the error (which you usually will not want):
+
+.. code-block:: cpp
+
+ PYBIND11_MODULE(example, m)
+ {
+ m.def("long running_func", []()
+ {
+ for (;;) {
+ if (PyErr_CheckSignals() != 0)
+ throw py::error_already_set();
+ // Long running iteration
+ }
+ });
+ }
+
+CMake doesn't detect the right Python version
+=============================================
+
+The CMake-based build system will try to automatically detect the installed
+version of Python and link against that. When this fails, or when there are
+multiple versions of Python and it finds the wrong one, delete
+``CMakeCache.txt`` and then add ``-DPYTHON_EXECUTABLE=$(which python)`` to your
+CMake configure line. (Replace ``$(which python)`` with a path to python if
+your prefer.)
+
+You can alternatively try ``-DPYBIND11_FINDPYTHON=ON``, which will activate the
+new CMake FindPython support instead of pybind11's custom search. Requires
+CMake 3.12+, and 3.15+ or 3.18.2+ are even better. You can set this in your
+``CMakeLists.txt`` before adding or finding pybind11, as well.
+
Inconsistent detection of Python version in CMake and pybind11
==============================================================
-The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` provided by CMake
-for Python version detection are not used by pybind11 due to unreliability and limitations that make
-them unsuitable for pybind11's needs. Instead pybind provides its own, more reliable Python detection
-CMake code. Conflicts can arise, however, when using pybind11 in a project that *also* uses the CMake
-Python detection in a system with several Python versions installed.
+The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)``
+provided by CMake for Python version detection are modified by pybind11 due to
+unreliability and limitations that make them unsuitable for pybind11's needs.
+Instead pybind11 provides its own, more reliable Python detection CMake code.
+Conflicts can arise, however, when using pybind11 in a project that *also* uses
+the CMake Python detection in a system with several Python versions installed.
-This difference may cause inconsistencies and errors if *both* mechanisms are used in the same project. Consider the following
-Cmake code executed in a system with Python 2.7 and 3.x installed:
+This difference may cause inconsistencies and errors if *both* mechanisms are
+used in the same project. Consider the following CMake code executed in a
+system with Python 2.7 and 3.x installed:
.. code-block:: cmake
find_package(PythonInterp)
find_package(PythonLibs)
-will detect Python 3.x for pybind11 and may crash on ``find_package(PythonLibs)`` afterwards.
-
-It is advised to avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` from CMake and rely
-on pybind11 in detecting Python version. If this is not possible CMake machinery should be called *before* including pybind11.
+will detect Python 3.x for pybind11 and may crash on
+``find_package(PythonLibs)`` afterwards.
+
+There are three possible solutions:
+
+1. Avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)``
+ from CMake and rely on pybind11 in detecting Python version. If this is not
+ possible, the CMake machinery should be called *before* including pybind11.
+2. Set ``PYBIND11_FINDPYTHON`` to ``True`` or use ``find_package(Python
+ COMPONENTS Interpreter Development)`` on modern CMake (3.12+, 3.15+ better,
+ 3.18.2+ best). Pybind11 in these cases uses the new CMake FindPython instead
+ of the old, deprecated search tools, and these modules are much better at
+ finding the correct Python.
+3. Set ``PYBIND11_NOPYTHON`` to ``TRUE``. Pybind11 will not search for Python.
+ However, you will have to use the target-based system, and do more setup
+ yourself, because it does not know about or include things that depend on
+ Python, like ``pybind11_add_module``. This might be ideal for integrating
+ into an existing system, like scikit-build's Python helpers.
How to cite this project?
=========================
-.. only: not latex
+.. only:: latex
- .. image:: pybind11-logo.png
+ Intro
+ =====
-pybind11 --- Seamless operability between C++11 and Python
-==========================================================
+.. include:: readme.rst
-.. only: not latex
+.. only:: not latex
Contents:
.. toctree::
:maxdepth: 1
- intro
changelog
upgrade
:caption: The Basics
:maxdepth: 2
+ installing
basics
classes
compiling
benchmark
limitations
reference
+ cmake/index
--- /dev/null
+.. _installing:
+
+Installing the library
+######################
+
+There are several ways to get the pybind11 source, which lives at
+`pybind/pybind11 on GitHub <https://github.com/pybind/pybind11>`_. The pybind11
+developers recommend one of the first three ways listed here, submodule, PyPI,
+or conda-forge, for obtaining pybind11.
+
+.. _include_as_a_submodule:
+
+Include as a submodule
+======================
+
+When you are working on a project in Git, you can use the pybind11 repository
+as a submodule. From your git repository, use:
+
+.. code-block:: bash
+
+ git submodule add -b stable ../../pybind/pybind11 extern/pybind11
+ git submodule update --init
+
+This assumes you are placing your dependencies in ``extern/``, and that you are
+using GitHub; if you are not using GitHub, use the full https or ssh URL
+instead of the relative URL ``../../pybind/pybind11`` above. Some other servers
+also require the ``.git`` extension (GitHub does not).
+
+From here, you can now include ``extern/pybind11/include``, or you can use
+the various integration tools (see :ref:`compiling`) pybind11 provides directly
+from the local folder.
+
+Include with PyPI
+=================
+
+You can download the sources and CMake files as a Python package from PyPI
+using Pip. Just use:
+
+.. code-block:: bash
+
+ pip install pybind11
+
+This will provide pybind11 in a standard Python package format. If you want
+pybind11 available directly in your environment root, you can use:
+
+.. code-block:: bash
+
+ pip install "pybind11[global]"
+
+This is not recommended if you are installing with your system Python, as it
+will add files to ``/usr/local/include/pybind11`` and
+``/usr/local/share/cmake/pybind11``, so unless that is what you want, it is
+recommended only for use in virtual environments or your ``pyproject.toml``
+file (see :ref:`compiling`).
+
+Include with conda-forge
+========================
+
+You can use pybind11 with conda packaging via `conda-forge
+<https://github.com/conda-forge/pybind11-feedstock>`_:
+
+.. code-block:: bash
+
+ conda install -c conda-forge pybind11
+
+
+Include with vcpkg
+==================
+You can download and install pybind11 using the Microsoft `vcpkg
+<https://github.com/Microsoft/vcpkg/>`_ dependency manager:
+
+.. code-block:: bash
+
+ git clone https://github.com/Microsoft/vcpkg.git
+ cd vcpkg
+ ./bootstrap-vcpkg.sh
+ ./vcpkg integrate install
+ vcpkg install pybind11
+
+The pybind11 port in vcpkg is kept up to date by Microsoft team members and
+community contributors. If the version is out of date, please `create an issue
+or pull request <https://github.com/Microsoft/vcpkg/>`_ on the vcpkg
+repository.
+
+Global install with brew
+========================
+
+The brew package manager (Homebrew on macOS, or Linuxbrew on Linux) has a
+`pybind11 package
+<https://github.com/Homebrew/homebrew-core/blob/master/Formula/pybind11.rb>`_.
+To install:
+
+.. code-block:: bash
+
+ brew install pybind11
+
+.. We should list Conan, and possibly a few other C++ package managers (hunter,
+.. perhaps). Conan has a very clean CMake integration that would be good to show.
+
+Other options
+=============
+
+Other locations you can find pybind11 are `listed here
+<https://repology.org/project/python:pybind11/versions>`_; these are maintained
+by various packagers and the community.
+++ /dev/null
-.. image:: pybind11-logo.png
-
-About this project
-==================
-**pybind11** is a lightweight header-only library that exposes C++ types in Python
-and vice versa, mainly to create Python bindings of existing C++ code. Its
-goals and syntax are similar to the excellent `Boost.Python`_ library by David
-Abrahams: to minimize boilerplate code in traditional extension modules by
-inferring type information using compile-time introspection.
-
-.. _Boost.Python: http://www.boost.org/doc/libs/release/libs/python/doc/index.html
-
-The main issue with Boost.Python—and the reason for creating such a similar
-project—is Boost. Boost is an enormously large and complex suite of utility
-libraries that works with almost every C++ compiler in existence. This
-compatibility has its cost: arcane template tricks and workarounds are
-necessary to support the oldest and buggiest of compiler specimens. Now that
-C++11-compatible compilers are widely available, this heavy machinery has
-become an excessively large and unnecessary dependency.
-Think of this library as a tiny self-contained version of Boost.Python with
-everything stripped away that isn't relevant for binding generation. Without
-comments, the core header files only require ~4K lines of code and depend on
-Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
-compact implementation was possible thanks to some of the new C++11 language
-features (specifically: tuples, lambda functions and variadic templates). Since
-its creation, this library has grown beyond Boost.Python in many ways, leading
-to dramatically simpler binding code in many common situations.
-
-Core features
-*************
-The following core C++ features can be mapped to Python
-
-- Functions accepting and returning custom data structures per value, reference, or pointer
-- Instance methods and static methods
-- Overloaded functions
-- Instance attributes and static attributes
-- Arbitrary exception types
-- Enumerations
-- Callbacks
-- Iterators and ranges
-- Custom operators
-- Single and multiple inheritance
-- STL data structures
-- Smart pointers with reference counting like ``std::shared_ptr``
-- Internal references with correct reference counting
-- C++ classes with virtual (and pure virtual) methods can be extended in Python
-
-Goodies
-*******
-In addition to the core functionality, pybind11 provides some extra goodies:
-
-- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an
- implementation-agnostic interface.
-
-- It is possible to bind C++11 lambda functions with captured variables. The
- lambda capture data is stored inside the resulting Python function object.
-
-- pybind11 uses C++11 move constructors and move assignment operators whenever
- possible to efficiently transfer custom data types.
-
-- It's easy to expose the internal storage of custom data types through
- Pythons' buffer protocols. This is handy e.g. for fast conversion between
- C++ matrix classes like Eigen and NumPy without expensive copy operations.
-
-- pybind11 can automatically vectorize functions so that they are transparently
- applied to all entries of one or more NumPy array arguments.
-
-- Python's slice-based access and assignment operations can be supported with
- just a few lines of code.
-
-- Everything is contained in just a few header files; there is no need to link
- against any additional libraries.
-
-- Binaries are generally smaller by a factor of at least 2 compared to
- equivalent bindings generated by Boost.Python. A recent pybind11 conversion
- of `PyRosetta`_, an enormous Boost.Python binding project, reported a binary
- size reduction of **5.4x** and compile time reduction by **5.8x**.
-
-- Function signatures are precomputed at compile time (using ``constexpr``),
- leading to smaller binaries.
-
-- With little extra effort, C++ types can be pickled and unpickled similar to
- regular Python objects.
-
-.. _PyRosetta: http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf
-
-Supported compilers
-*******************
-
-1. Clang/LLVM (any non-ancient version with C++11 support)
-2. GCC 4.8 or newer
-3. Microsoft Visual Studio 2015 or newer
-4. Intel C++ compiler v17 or newer (v16 with pybind11 v2.0 and v15 with pybind11 v2.0 and a `workaround <https://github.com/pybind/pybind11/issues/276>`_ )
Limitations
###########
+Design choices
+^^^^^^^^^^^^^^
+
pybind11 strives to be a general solution to binding generation, but it also has
certain limitations:
- The NumPy interface ``pybind11::array`` greatly simplifies accessing
numerical data from C++ (and vice versa), but it's not a full-blown array
- class like ``Eigen::Array`` or ``boost.multi_array``.
+ class like ``Eigen::Array`` or ``boost.multi_array``. ``Eigen`` objects are
+ directly supported, however, with ``pybind11/eigen.h``.
+
+Large but useful features could be implemented in pybind11 but would lead to a
+significant increase in complexity. Pybind11 strives to be simple and compact.
+Users who require large new features are encouraged to write an extension to
+pybind11; see `pybind11_json <https://github.com/pybind/pybind11_json>`_ for an
+example.
+
+
+Known bugs
+^^^^^^^^^^
+
+These are issues that hopefully will one day be fixed, but currently are
+unsolved. If you know how to help with one of these issues, contributions
+are welcome!
+
+- Intel 20.2 is currently having an issue with the test suite.
+ `#2573 <https://github.com/pybind/pybind11/pull/2573>`_
+
+- Debug mode Python does not support 1-5 tests in the test suite currently.
+ `#2422 <https://github.com/pybind/pybind11/pull/2422>`_
+
+- PyPy3 7.3.1 and 7.3.2 have issues with several tests on 32-bit Windows.
+
+Known limitations
+^^^^^^^^^^^^^^^^^
+
+These are issues that are probably solvable, but have not been fixed yet. A
+clean, well written patch would likely be accepted to solve them.
+
+- Type casters are not kept alive recursively.
+ `#2527 <https://github.com/pybind/pybind11/issues/2527>`_
+ One consequence is that containers of ``char *`` are currently not supported.
+ `#2245 <https://github.com/pybind/pybind11/issues/2245>`_
+
+- The ``cpptest`` does not run on Windows with Python 3.8 or newer, due to DLL
+ loader changes. User code that is correctly installed should not be affected.
+ `#2560 <https://github.com/pybind/pybind11/issue/2560>`_
+
+Python 3.9.0 warning
+^^^^^^^^^^^^^^^^^^^^
-These features could be implemented but would lead to a significant increase in
-complexity. I've decided to draw the line here to keep this project simple and
-compact. Users who absolutely require these features are encouraged to fork
-pybind11.
+Combining older versions of pybind11 (< 2.6.0) with Python on 3.9.0 will
+trigger undefined behavior that typically manifests as crashes during
+interpreter shutdown (but could also destroy your data. **You have been
+warned**).
+This issue has been
+`fixed in Python <https://github.com/python/cpython/pull/22670>`_. As a
+mitigation until 3.9.1 is released and commonly used, pybind11 (2.6.0 or newer)
+includes a temporary workaround specifically when Python 3.9.0 is detected at
+runtime, leaking about 50 bytes of memory when a callback function is garbage
+collected. For reference; the pybind11 test suite has about 2,000 such
+callbacks, but only 49 are garbage collected before the end-of-process. Wheels
+built with Python 3.9.0 will correctly avoid the leak when run in Python 3.9.1.
Convenience classes for specific Python types
=============================================
-.. doxygenclass:: module
+.. doxygenclass:: module_
:members:
.. doxygengroup:: pytypes
See :doc:`/classes` and :doc:`/advanced/classes` for more detail.
-.. doxygendefine:: PYBIND11_OVERLOAD
+.. doxygendefine:: PYBIND11_OVERRIDE
-.. doxygendefine:: PYBIND11_OVERLOAD_PURE
+.. doxygendefine:: PYBIND11_OVERRIDE_PURE
-.. doxygendefine:: PYBIND11_OVERLOAD_NAME
+.. doxygendefine:: PYBIND11_OVERRIDE_NAME
-.. doxygendefine:: PYBIND11_OVERLOAD_PURE_NAME
+.. doxygendefine:: PYBIND11_OVERRIDE_PURE_NAME
-.. doxygenfunction:: get_overload
+.. doxygenfunction:: get_override
Exceptions
==========
+On version numbers
+^^^^^^^^^^^^^^^^^^
+
+The two version numbers (C++ and Python) must match when combined (checked when
+you build the PyPI package), and must be a valid `PEP 440
+<https://www.python.org/dev/peps/pep-0440>`_ version when combined.
+
+For example:
+
+.. code-block:: C++
+
+ #define PYBIND11_VERSION_MAJOR X
+ #define PYBIND11_VERSION_MINOR Y
+ #define PYBIND11_VERSION_PATCH Z.dev1
+
+For beta, ``PYBIND11_VERSION_PATCH`` should be ``Z.b1``. RC's can be ``Z.rc1``.
+Always include the dot (even though PEP 440 allows it to be dropped). For a
+final release, this must be a simple integer.
+
+
To release a new version of pybind11:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- Update the version number and push to pypi
- - Update ``pybind11/_version.py`` (set release version, remove 'dev').
- - Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``.
- - Ensure that all the information in ``setup.py`` is up-to-date.
- - Update version in ``docs/conf.py``.
- - Tag release date in ``docs/changelog.rst``.
- - ``git add`` and ``git commit``.
- - if new minor version: ``git checkout -b vX.Y``, ``git push -u origin vX.Y``
- - ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``.
+- Update the version number
+ - Update ``PYBIND11_VERSION_MAJOR`` etc. in
+ ``include/pybind11/detail/common.h``. PATCH should be a simple integer.
+ - Update ``pybind11/_version.py`` (match above)
+ - Ensure that all the information in ``setup.cfg`` is up-to-date, like
+ supported Python versions.
+ - Add release date in ``docs/changelog.rst``.
+ - Check to make sure
+ `needs-changelog <https://github.com/pybind/pybind11/pulls?q=is%3Apr+is%3Aclosed+label%3A%22needs+changelog%22>`_
+ issues are entered in the changelog (clear the label when done).
+ - ``git add`` and ``git commit``, ``git push``. **Ensure CI passes**. (If it
+ fails due to a known flake issue, either ignore or restart CI.)
+- Add a release branch if this is a new minor version, or update the existing release branch if it is a patch version
+ - New branch: ``git checkout -b vX.Y``, ``git push -u origin vX.Y``
+ - Update branch: ``git checkout vX.Y``, ``git merge <release branch>``, ``git push``
+- Update tags (optional; if you skip this, the GitHub release makes a
+ non-annotated tag for you)
+ - ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``.
+ - ``git push --tags``.
+- Update stable
+ - ``git checkout stable``
+ - ``git merge master``
- ``git push``
- - ``git push --tags``.
- - ``python setup.py sdist upload``.
- - ``python setup.py bdist_wheel upload``.
-- Update conda-forge (https://github.com/conda-forge/pybind11-feedstock) via PR
- - download release package from Github: ``wget https://github.com/pybind/pybind11/archive/vX.Y.Z.tar.gz``
- - compute checksum: ``shasum -a 256 vX.Y.Z.tar.gz``
- - change version number and checksum in ``recipe/meta.yml``
+- Make a GitHub release (this shows up in the UI, sends new release
+ notifications to users watching releases, and also uploads PyPI packages).
+ (Note: if you do not use an existing tag, this creates a new lightweight tag
+ for you, so you could skip the above step).
+ - GUI method: click "Create a new release" on the far right, fill in the tag
+ name (if you didn't tag above, it will be made here), fill in a release
+ name like "Version X.Y.Z", and optionally copy-and-paste the changelog into
+ the description (processed as markdown by Pandoc). Check "pre-release" if
+ this is a beta/RC. You can get partway there with
+ ``cat docs/changelog.rst | pandsoc -f rst -t markdown``.
+ - CLI method: with ``gh`` installed, run ``gh release create vX.Y.Z -t "Version X.Y.Z"``
+ If this is a pre-release, add ``-p``.
+
- Get back to work
- - Update ``_version.py`` (add 'dev' and increment minor).
- - Update version in ``docs/conf.py``
- - Update version macros in ``include/pybind11/common.h``
- - ``git add`` and ``git commit``.
- ``git push``
+ - Make sure you are on master, not somewhere else: ``git checkout master``
+ - Update version macros in ``include/pybind11/detail/common.h`` (set PATCH to
+ ``0.dev1`` and increment MINOR).
+ - Update ``_version.py`` to match
+ - Add a spot for in-development updates in ``docs/changelog.rst``.
+ - ``git add``, ``git commit``, ``git push``
+
+If a version branch is updated, remember to set PATCH to ``1.dev1``.
+
+If you'd like to bump homebrew, run:
+
+.. code-block::
+
+ brew bump-formula-pr --url https://github.com/pybind/pybind11/archive/vX.Y.Z.tar.gz
+
+Conda-forge should automatically make a PR in a few hours, and automatically
+merge it if there are no issues.
+
+
+Manual packaging
+^^^^^^^^^^^^^^^^
+
+If you need to manually upload releases, you can download the releases from the job artifacts and upload them with twine. You can also make the files locally (not recommended in general, as your local directory is more likely to be "dirty" and SDists love picking up random unrelated/hidden files); this is the procedure:
+
+.. code-block:: bash
+
+ python3 -m pip install build
+ python3 -m build
+ PYBIND11_SDIST_GLOBAL=1 python3 -m build
+ twine upload dist/*
+
+This makes SDists and wheels, and the final line uploads them.
-breathe == 4.5.0
+breathe==4.25.1
+commonmark==0.9.1
+recommonmark==0.7.1
+sphinx==3.3.1
+sphinx_rtd_theme==0.5.0
+sphinxcontrib-moderncmakedomain==3.17
+sphinxcontrib-svg2pdfconverter==1.1.0
deprecated APIs and their replacements, build system changes, general code
modernization and other useful information.
+.. _upgrade-guide-2.6:
+
+v2.6
+====
+
+Usage of the ``PYBIND11_OVERLOAD*`` macros and ``get_overload`` function should
+be replaced by ``PYBIND11_OVERRIDE*`` and ``get_override``. In the future, the
+old macros may be deprecated and removed.
+
+``py::module`` has been renamed ``py::module_``, but a backward compatible
+typedef has been included. This change was to avoid a language change in C++20
+that requires unqualified ``module`` not be placed at the start of a logical
+line. Qualified usage is unaffected and the typedef will remain unless the
+C++ language rules change again.
+
+The public constructors of ``py::module_`` have been deprecated. Use
+``PYBIND11_MODULE`` or ``module_::create_extension_module`` instead.
+
+An error is now thrown when ``__init__`` is forgotten on subclasses. This was
+incorrect before, but was not checked. Add a call to ``__init__`` if it is
+missing.
+
+A ``py::type_error`` is now thrown when casting to a subclass (like
+``py::bytes`` from ``py::object``) if the conversion is not valid. Make a valid
+conversion instead.
+
+The undocumented ``h.get_type()`` method has been deprecated and replaced by
+``py::type::of(h)``.
+
+Enums now have a ``__str__`` method pre-defined; if you want to override it,
+the simplest fix is to add the new ``py::prepend()`` tag when defining
+``"__str__"``.
+
+If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to
+``None``, as in normal CPython. You should add ``__hash__`` if you intended the
+class to be hashable, possibly using the new ``py::hash`` shortcut.
+
+The constructors for ``py::array`` now always take signed integers for size,
+for consistency. This may lead to compiler warnings on some systems. Cast to
+``py::ssize_t`` instead of ``std::size_t``.
+
+The ``tools/clang`` submodule and ``tools/mkdoc.py`` have been moved to a
+standalone package, `pybind11-mkdoc`_. If you were using those tools, please
+use them via a pip install from the new location.
+
+The ``pybind11`` package on PyPI no longer fills the wheel "headers" slot - if
+you were using the headers from this slot, they are available by requesting the
+``global`` extra, that is, ``pip install "pybind11[global]"``. (Most users will
+be unaffected, as the ``pybind11/include`` location is reported by ``python -m
+pybind11 --includes`` and ``pybind11.get_include()`` is still correct and has
+not changed since 2.5).
+
+.. _pybind11-mkdoc: https://github.com/pybind/pybind11-mkdoc
+
+CMake support:
+--------------
+
+The minimum required version of CMake is now 3.4. Several details of the CMake
+support have been deprecated; warnings will be shown if you need to change
+something. The changes are:
+
+* ``PYBIND11_CPP_STANDARD=<platform-flag>`` is deprecated, please use
+ ``CMAKE_CXX_STANDARD=<number>`` instead, or any other valid CMake CXX or CUDA
+ standard selection method, like ``target_compile_features``.
+
+* If you do not request a standard, pybind11 targets will compile with the
+ compiler default, but not less than C++11, instead of forcing C++14 always.
+ If you depend on the old behavior, please use ``set(CMAKE_CXX_STANDARD 14 CACHE STRING "")``
+ instead.
+
+* Direct ``pybind11::module`` usage should always be accompanied by at least
+ ``set(CMAKE_CXX_VISIBILITY_PRESET hidden)`` or similar - it used to try to
+ manually force this compiler flag (but not correctly on all compilers or with
+ CUDA).
+
+* ``pybind11_add_module``'s ``SYSTEM`` argument is deprecated and does nothing;
+ linking now behaves like other imported libraries consistently in both
+ config and submodule mode, and behaves like a ``SYSTEM`` library by
+ default.
+
+* If ``PYTHON_EXECUTABLE`` is not set, virtual environments (``venv``,
+ ``virtualenv``, and ``conda``) are prioritized over the standard search
+ (similar to the new FindPython mode).
+
+In addition, the following changes may be of interest:
+
+* ``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` will be respected by
+ ``pybind11_add_module`` if set instead of linking to ``pybind11::lto`` or
+ ``pybind11::thin_lto``.
+
+* Using ``find_package(Python COMPONENTS Interpreter Development)`` before
+ pybind11 will cause pybind11 to use the new Python mechanisms instead of its
+ own custom search, based on a patched version of classic ``FindPythonInterp``
+ / ``FindPythonLibs``. In the future, this may become the default. A recent
+ (3.15+ or 3.18.2+) version of CMake is recommended.
+
+
+
+v2.5
+====
+
+The Python package now includes the headers as data in the package itself, as
+well as in the "headers" wheel slot. ``pybind11 --includes`` and
+``pybind11.get_include()`` report the new location, which is always correct
+regardless of how pybind11 was installed, making the old ``user=`` argument
+meaningless. If you are not using the function to get the location already, you
+are encouraged to switch to the package location.
+
v2.2
====
...
.def(py::pickle(
[](const Foo &self) { // __getstate__
- return py::make_tuple(f.value1(), f.value2(), ...); // unchanged
+ return py::make_tuple(self.value1(), self.value2(), ...); // unchanged
},
[](py::tuple t) { // __setstate__, note: no `self` argument
return new Foo(t[0].cast<std::string>(), ...);
#include "cast.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/// \addtogroup annotations
/// @{
/// Annotation for operators
struct is_operator { };
+/// Annotation for classes that cannot be subclassed
+struct is_final { };
+
/// Annotation for parent scope
struct scope { handle value; scope(const handle &s) : value(s) { } };
/// Annotation indicating that a class derives from another given type
template <typename T> struct base {
+
PYBIND11_DEPRECATED("base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
- base() { }
+ base() { } // NOLINT(modernize-use-equals-default): breaks MSVC 2015 when adding an attribute
};
/// Keep patient alive while nurse lives
handle value;
PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
- metaclass() {}
+ metaclass() { } // NOLINT(modernize-use-equals-default): breaks MSVC 2015 when adding an attribute
/// Override pybind11's default metaclass
explicit metaclass(handle value) : value(value) { }
/// Annotation to mark enums as an arithmetic type
struct arithmetic { };
+/// Mark a function for addition at the beginning of the existing overload chain instead of the end
+struct prepend { };
+
/** \rst
A call policy which places one or more guard variables (``Ts...``) around the function call.
/// @} annotations
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
/* Forward declarations */
enum op_id : int;
enum op_type : int;
struct function_record {
function_record()
: is_constructor(false), is_new_style_constructor(false), is_stateless(false),
- is_operator(false), has_args(false), has_kwargs(false), is_method(false) { }
+ is_operator(false), is_method(false), has_args(false),
+ has_kwargs(false), has_kw_only_args(false), prepend(false) { }
/// Function name
char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
/// True if this is an operator (__add__), etc.
bool is_operator : 1;
+ /// True if this is a method
+ bool is_method : 1;
+
/// True if the function has a '*args' argument
bool has_args : 1;
/// True if the function has a '**kwargs' argument
bool has_kwargs : 1;
- /// True if this is a method
- bool is_method : 1;
+ /// True once a 'py::kw_only' is encountered (any following args are keyword-only)
+ bool has_kw_only_args : 1;
+
+ /// True if this function is to be inserted at the beginning of the overload resolution chain
+ bool prepend : 1;
/// Number of arguments (including py::args and/or py::kwargs, if present)
std::uint16_t nargs;
+ /// Number of trailing arguments (counted in `nargs`) that are keyword-only
+ std::uint16_t nargs_kw_only = 0;
+
+ /// Number of leading arguments (counted in `nargs`) that are positional-only
+ std::uint16_t nargs_pos_only = 0;
+
/// Python method object
PyMethodDef *def = nullptr;
struct type_record {
PYBIND11_NOINLINE type_record()
: multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
- default_holder(true), module_local(false) { }
+ default_holder(true), module_local(false), is_final(false) { }
/// Handle to the parent scope
handle scope;
/// Is the class definition local to the module shared object?
bool module_local : 1;
+ /// Is the class inheritable from python classes?
+ bool is_final : 1;
+
PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *)) {
auto base_info = detail::get_type_info(base, false);
if (!base_info) {
static void init(const is_new_style_constructor &, function_record *r) { r->is_new_style_constructor = true; }
};
+inline void process_kw_only_arg(const arg &a, function_record *r) {
+ if (!a.name || strlen(a.name) == 0)
+ pybind11_fail("arg(): cannot specify an unnamed argument after an kw_only() annotation");
+ ++r->nargs_kw_only;
+}
+
/// Process a keyword argument attribute (*without* a default value)
template <> struct process_attribute<arg> : process_attribute_default<arg> {
static void init(const arg &a, function_record *r) {
if (r->is_method && r->args.empty())
r->args.emplace_back("self", nullptr, handle(), true /*convert*/, false /*none not allowed*/);
r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
+
+ if (r->has_kw_only_args) process_kw_only_arg(a, r);
}
};
#endif
}
r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
+
+ if (r->has_kw_only_args) process_kw_only_arg(a, r);
+ }
+};
+
+/// Process a keyword-only-arguments-follow pseudo argument
+template <> struct process_attribute<kw_only> : process_attribute_default<kw_only> {
+ static void init(const kw_only &, function_record *r) {
+ r->has_kw_only_args = true;
+ }
+};
+
+/// Process a positional-only-argument maker
+template <> struct process_attribute<pos_only> : process_attribute_default<pos_only> {
+ static void init(const pos_only &, function_record *r) {
+ r->nargs_pos_only = static_cast<std::uint16_t>(r->args.size());
}
};
static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
};
+template <>
+struct process_attribute<is_final> : process_attribute_default<is_final> {
+ static void init(const is_final &, type_record *r) { r->is_final = true; }
+};
+
template <>
struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
};
+/// Process a 'prepend' attribute, putting this at the beginning of the overload chain
+template <>
+struct process_attribute<prepend> : process_attribute_default<prepend> {
+ static void init(const prepend &, function_record *r) { r->prepend = true; }
+};
+
/// Process an 'arithmetic' attribute for enums (does nothing here)
template <>
struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
- return named == 0 || (self + named + has_args + has_kwargs) == nargs;
+ return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs;
}
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "detail/common.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// Default, C-style strides
+inline std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
+ auto ndim = shape.size();
+ std::vector<ssize_t> strides(ndim, itemsize);
+ if (ndim > 0)
+ for (size_t i = ndim - 1; i > 0; --i)
+ strides[i - 1] = strides[i] * shape[i];
+ return strides;
+}
+
+// F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
+inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
+ auto ndim = shape.size();
+ std::vector<ssize_t> strides(ndim, itemsize);
+ for (size_t i = 1; i < ndim; ++i)
+ strides[i] = strides[i - 1] * shape[i - 1];
+ return strides;
+}
+
+PYBIND11_NAMESPACE_END(detail)
/// Information record describing a Python buffer object
struct buffer_info {
std::string format; // For homogeneous buffers, this should be set to format_descriptor<T>::format()
ssize_t ndim = 0; // Number of dimensions
std::vector<ssize_t> shape; // Shape of the tensor (1 entry per dimension)
- std::vector<ssize_t> strides; // Number of entries between adjacent entries (for each per dimension)
+ std::vector<ssize_t> strides; // Number of bytes between adjacent entries (for each per dimension)
+ bool readonly = false; // flag to indicate if the underlying storage may be written to
- buffer_info() { }
+ buffer_info() = default;
buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim,
- detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in)
+ detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in, bool readonly=false)
: ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),
- shape(std::move(shape_in)), strides(std::move(strides_in)) {
+ shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) {
if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size())
pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length");
for (size_t i = 0; i < (size_t) ndim; ++i)
}
template <typename T>
- buffer_info(T *ptr, detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in)
- : buffer_info(private_ctr_tag(), ptr, sizeof(T), format_descriptor<T>::format(), static_cast<ssize_t>(shape_in->size()), std::move(shape_in), std::move(strides_in)) { }
+ buffer_info(T *ptr, detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in, bool readonly=false)
+ : buffer_info(private_ctr_tag(), ptr, sizeof(T), format_descriptor<T>::format(), static_cast<ssize_t>(shape_in->size()), std::move(shape_in), std::move(strides_in), readonly) { }
- buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t size)
- : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}) { }
+ buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t size, bool readonly=false)
+ : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) { }
template <typename T>
- buffer_info(T *ptr, ssize_t size)
- : buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size) { }
+ buffer_info(T *ptr, ssize_t size, bool readonly=false)
+ : buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size, readonly) { }
+
+ template <typename T>
+ buffer_info(const T *ptr, ssize_t size, bool readonly=true)
+ : buffer_info(const_cast<T*>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) { }
explicit buffer_info(Py_buffer *view, bool ownview = true)
: buffer_info(view->buf, view->itemsize, view->format, view->ndim,
- {view->shape, view->shape + view->ndim}, {view->strides, view->strides + view->ndim}) {
- this->view = view;
+ {view->shape, view->shape + view->ndim},
+ /* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
+ * ignore this flag and return a view with NULL strides.
+ * When strides are NULL, build them manually. */
+ view->strides
+ ? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
+ : detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
+ view->readonly) {
+ this->m_view = view;
this->ownview = ownview;
}
ndim = rhs.ndim;
shape = std::move(rhs.shape);
strides = std::move(rhs.strides);
- std::swap(view, rhs.view);
+ std::swap(m_view, rhs.m_view);
std::swap(ownview, rhs.ownview);
+ readonly = rhs.readonly;
return *this;
}
~buffer_info() {
- if (view && ownview) { PyBuffer_Release(view); delete view; }
+ if (m_view && ownview) { PyBuffer_Release(m_view); delete m_view; }
}
+ Py_buffer *view() const { return m_view; }
+ Py_buffer *&view() { return m_view; }
private:
struct private_ctr_tag { };
buffer_info(private_ctr_tag, void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim,
- detail::any_container<ssize_t> &&shape_in, detail::any_container<ssize_t> &&strides_in)
- : buffer_info(ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in)) { }
+ detail::any_container<ssize_t> &&shape_in, detail::any_container<ssize_t> &&strides_in, bool readonly)
+ : buffer_info(ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) { }
- Py_buffer *view = nullptr;
+ Py_buffer *m_view = nullptr;
bool ownview = false;
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, typename SFINAE = void> struct compare_buffer_info {
static bool compare(const buffer_info& b) {
}
};
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include <string_view>
#endif
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
+# define PYBIND11_HAS_U8STRING
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// A life support system for temporary objects created by `type_caster::load()`.
/// Adding a patient will keep it alive up until the enclosing function returns.
Py_CLEAR(ptr);
// A heuristic to reduce the stack's capacity (e.g. after long recursive calls)
- if (stack.capacity() > 16 && stack.size() != 0 && stack.capacity() / stack.size() > 2)
+ if (stack.capacity() > 16 && !stack.empty() && stack.capacity() / stack.size() > 2)
stack.shrink_to_fit();
}
*/
PYBIND11_NOINLINE inline detail::type_info* get_type_info(PyTypeObject *type) {
auto &bases = all_type_info(type);
- if (bases.size() == 0)
+ if (bases.empty())
return nullptr;
if (bases.size() > 1)
pybind11_fail("pybind11::detail::get_type_info: type has multiple pybind11-registered bases");
{}
// Default constructor (used to signal a value-and-holder not found by get_value_and_holder())
- value_and_holder() {}
+ value_and_holder() = default;
// Used for past-the-end iterator
value_and_holder(size_t index) : index{index} {}
// Past-the-end iterator:
iterator(size_t end) : curr(end) {}
public:
- bool operator==(const iterator &other) { return curr.index == other.curr.index; }
- bool operator!=(const iterator &other) { return curr.index != other.curr.index; }
+ bool operator==(const iterator &other) const { return curr.index == other.curr.index; }
+ bool operator!=(const iterator &other) const { return curr.index != other.curr.index; }
iterator &operator++() {
if (!inst->simple_layout)
curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs;
"(compile in debug mode for type details)");
#else
pybind11_fail("pybind11::detail::instance::get_value_and_holder: `" +
- std::string(find_type->type->tp_name) + "' is not a pybind11 base of the given `" +
- std::string(Py_TYPE(this)->tp_name) + "' instance");
+ get_fully_qualified_tp_name(find_type->type) + "' is not a pybind11 base of the given `" +
+ get_fully_qualified_tp_name(Py_TYPE(this)) + "' instance");
#endif
}
#if !defined(PYPY_VERSION)
if (scope.trace) {
- PyTracebackObject *trace = (PyTracebackObject *) scope.trace;
+ auto *trace = (PyTracebackObject *) scope.trace;
/* Get the deepest trace possible */
while (trace->tb_next)
auto &instances = get_internals().registered_instances;
auto range = instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
- for (auto vh : values_and_holders(it->second)) {
+ for (const auto &vh : values_and_holders(it->second)) {
if (vh.type == type)
return handle((PyObject *) it->second);
}
case return_value_policy::copy:
if (copy_constructor)
valueptr = copy_constructor(src);
- else
- throw cast_error("return_value_policy = copy, but the "
- "object is non-copyable!");
+ else {
+#if defined(NDEBUG)
+ throw cast_error("return_value_policy = copy, but type is "
+ "non-copyable! (compile in debug mode for details)");
+#else
+ std::string type_name(tinfo->cpptype->name());
+ detail::clean_type_id(type_name);
+ throw cast_error("return_value_policy = copy, but type " +
+ type_name + " is non-copyable!");
+#endif
+ }
wrapper->owned = true;
break;
valueptr = move_constructor(src);
else if (copy_constructor)
valueptr = copy_constructor(src);
- else
- throw cast_error("return_value_policy = move, but the "
- "object is neither movable nor copyable!");
+ else {
+#if defined(NDEBUG)
+ throw cast_error("return_value_policy = move, but type is neither "
+ "movable nor copyable! "
+ "(compile in debug mode for details)");
+#else
+ std::string type_name(tinfo->cpptype->name());
+ detail::clean_type_id(type_name);
+ throw cast_error("return_value_policy = move, but type " +
+ type_name + " is neither movable nor copyable!");
+#endif
+ }
wrapper->owned = true;
break;
if (type->operator_new) {
vptr = type->operator_new(type->type_size);
} else {
- #if defined(PYBIND11_CPP17)
+ #if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
vptr = ::operator new(type->type_size,
- (std::align_val_t) type->type_align);
+ std::align_val_t(type->type_align));
else
#endif
vptr = ::operator new(type->type_size);
/// native typeinfo, or when the native one wasn't able to produce a value.
PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) {
constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID;
- const auto pytype = src.get_type();
+ const auto pytype = type::handle_of(src);
if (!hasattr(pytype, local_key))
return false;
negation<std::is_same<Container, typename Container::value_type>>
>::value>> : is_copy_constructible<typename Container::value_type> {};
-#if !defined(PYBIND11_CPP17)
-// Likewise for std::pair before C++17 (which mandates that the copy constructor not exist when the
-// two types aren't themselves copy constructible).
+// Likewise for std::pair
+// (after C++17 it is mandatory that the copy constructor not exist when the two types aren't themselves
+// copy constructible, but this can not be relied upon when T1 or T2 are themselves containers).
template <typename T1, typename T2> struct is_copy_constructible<std::pair<T1, T2>>
: all_of<is_copy_constructible<T1>, is_copy_constructible<T2>> {};
-#endif
-NAMESPACE_END(detail)
+// The same problems arise with std::is_copy_assignable, so we use the same workaround.
+template <typename T, typename SFINAE = void> struct is_copy_assignable : std::is_copy_assignable<T> {};
+template <typename Container> struct is_copy_assignable<Container, enable_if_t<all_of<
+ std::is_copy_assignable<Container>,
+ std::is_same<typename Container::value_type &, typename Container::reference>
+ >::value>> : is_copy_assignable<typename Container::value_type> {};
+template <typename T1, typename T2> struct is_copy_assignable<std::pair<T1, T2>>
+ : all_of<is_copy_assignable<T1>, is_copy_assignable<T2>> {};
+
+PYBIND11_NAMESPACE_END(detail)
// polymorphic_type_hook<itype>::get(src, tinfo) determines whether the object pointed
// to by `src` actually is an instance of some class derived from `itype`.
// You may specialize polymorphic_type_hook yourself for types that want to appear
// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern
// in performance-sensitive applications, used most notably in LLVM.)
+//
+// polymorphic_type_hook_base allows users to specialize polymorphic_type_hook with
+// std::enable_if. User provided specializations will always have higher priority than
+// the default implementation and specialization provided in polymorphic_type_hook_base.
template <typename itype, typename SFINAE = void>
-struct polymorphic_type_hook
+struct polymorphic_type_hook_base
{
static const void *get(const itype *src, const std::type_info*&) { return src; }
};
template <typename itype>
-struct polymorphic_type_hook<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>>
+struct polymorphic_type_hook_base<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>>
{
static const void *get(const itype *src, const std::type_info*& type) {
type = src ? &typeid(*src) : nullptr;
return dynamic_cast<const void*>(src);
}
};
+template <typename itype, typename SFINAE = void>
+struct polymorphic_type_hook : public polymorphic_type_hook_base<itype> {};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// Generic type caster for objects stored on the heap
template <typename type> class type_caster_base : public type_caster_generic {
private:
using caster_t = make_caster<type>;
caster_t subcaster;
- using subcaster_cast_op_type = typename caster_t::template cast_op_type<type>;
- static_assert(std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value,
- "std::reference_wrapper<T> caster requires T to have a caster with an `T &` operator");
+ using reference_t = type&;
+ using subcaster_cast_op_type =
+ typename caster_t::template cast_op_type<reference_t>;
+
+ static_assert(std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value ||
+ std::is_same<reference_t, subcaster_cast_op_type>::value,
+ "std::reference_wrapper<T> caster requires T to have a caster with an "
+ "`operator T &()` or `operator const T &()`");
public:
bool load(handle src, bool convert) { return subcaster.load(src, convert); }
static constexpr auto name = caster_t::name;
return caster_t::cast(&src.get(), policy, parent);
}
template <typename T> using cast_op_type = std::reference_wrapper<type>;
- operator std::reference_wrapper<type>() { return subcaster.operator subcaster_cast_op_type&(); }
+ operator std::reference_wrapper<type>() { return cast_op<type &>(subcaster); }
};
#define PYBIND11_TYPE_CASTER(type, py_name) \
template <typename CharT> using is_std_char_type = any_of<
std::is_same<CharT, char>, /* std::string */
+#if defined(PYBIND11_HAS_U8STRING)
+ std::is_same<CharT, char8_t>, /* std::u8string */
+#endif
std::is_same<CharT, char16_t>, /* std::u16string */
std::is_same<CharT, char32_t>, /* std::u32string */
std::is_same<CharT, wchar_t> /* std::wstring */
>;
+
template <typename T>
struct type_caster<T, enable_if_t<std::is_arithmetic<T>::value && !is_std_char_type<T>::value>> {
using _py_type_0 = conditional_t<sizeof(T) <= sizeof(long), long, long long>;
if (!src)
return false;
+#if !defined(PYPY_VERSION)
+ auto index_check = [](PyObject *o) { return PyIndex_Check(o); };
+#else
+ // In PyPy 7.3.3, `PyIndex_Check` is implemented by calling `__index__`,
+ // while CPython only considers the existence of `nb_index`/`__index__`.
+ auto index_check = [](PyObject *o) { return hasattr(o, "__index__"); };
+#endif
+
if (std::is_floating_point<T>::value) {
if (convert || PyFloat_Check(src.ptr()))
py_value = (py_type) PyFloat_AsDouble(src.ptr());
return false;
} else if (PyFloat_Check(src.ptr())) {
return false;
- } else if (std::is_unsigned<py_type>::value) {
- py_value = as_unsigned<py_type>(src.ptr());
- } else { // signed integer:
- py_value = sizeof(T) <= sizeof(long)
- ? (py_type) PyLong_AsLong(src.ptr())
- : (py_type) PYBIND11_LONG_AS_LONGLONG(src.ptr());
+ } else if (!convert && !PYBIND11_LONG_CHECK(src.ptr()) && !index_check(src.ptr())) {
+ return false;
+ } else {
+ handle src_or_index = src;
+#if PY_VERSION_HEX < 0x03080000
+ object index;
+ if (!PYBIND11_LONG_CHECK(src.ptr())) { // So: index_check(src.ptr())
+ index = reinterpret_steal<object>(PyNumber_Index(src.ptr()));
+ if (!index) {
+ PyErr_Clear();
+ if (!convert)
+ return false;
+ }
+ else {
+ src_or_index = index;
+ }
+ }
+#endif
+ if (std::is_unsigned<py_type>::value) {
+ py_value = as_unsigned<py_type>(src_or_index.ptr());
+ } else { // signed integer:
+ py_value = sizeof(T) <= sizeof(long)
+ ? (py_type) PyLong_AsLong(src_or_index.ptr())
+ : (py_type) PYBIND11_LONG_AS_LONGLONG(src_or_index.ptr());
+ }
}
+ // Python API reported an error
bool py_err = py_value == (py_type) -1 && PyErr_Occurred();
- // Protect std::numeric_limits::min/max with parentheses
- if (py_err || (std::is_integral<T>::value && sizeof(py_type) != sizeof(T) &&
- (py_value < (py_type) (std::numeric_limits<T>::min)() ||
- py_value > (py_type) (std::numeric_limits<T>::max)()))) {
- bool type_error = py_err && PyErr_ExceptionMatches(
-#if PY_VERSION_HEX < 0x03000000 && !defined(PYPY_VERSION)
- PyExc_SystemError
-#else
- PyExc_TypeError
-#endif
- );
+ // Check to see if the conversion is valid (integers should match exactly)
+ // Signed/unsigned checks happen elsewhere
+ if (py_err || (std::is_integral<T>::value && sizeof(py_type) != sizeof(T) && py_value != (py_type) (T) py_value)) {
PyErr_Clear();
- if (type_error && convert && PyNumber_Check(src.ptr())) {
+ if (py_err && convert && PyNumber_Check(src.ptr())) {
auto tmp = reinterpret_steal<object>(std::is_floating_point<T>::value
? PyNumber_Float(src.ptr())
: PyNumber_Long(src.ptr()));
}
/* Check if this is a C++ type */
- auto &bases = all_type_info((PyTypeObject *) h.get_type().ptr());
+ auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr());
if (bases.size() == 1) { // Only allowing loading from a single-value type
value = values_and_holders(reinterpret_cast<instance *>(h.ptr())).begin()->value_ptr();
return true;
if (res == 0 || res == 1) {
value = (bool) res;
return true;
+ } else {
+ PyErr_Clear();
}
}
return false;
// Simplify life by being able to assume standard char sizes (the standard only guarantees
// minimums, but Python requires exact sizes)
static_assert(!std::is_same<CharT, char>::value || sizeof(CharT) == 1, "Unsupported char size != 1");
+#if defined(PYBIND11_HAS_U8STRING)
+ static_assert(!std::is_same<CharT, char8_t>::value || sizeof(CharT) == 1, "Unsupported char8_t size != 1");
+#endif
static_assert(!std::is_same<CharT, char16_t>::value || sizeof(CharT) == 2, "Unsupported char16_t size != 2");
static_assert(!std::is_same<CharT, char32_t>::value || sizeof(CharT) == 4, "Unsupported char32_t size != 4");
// wchar_t can be either 16 bits (Windows) or 32 (everywhere else)
#if PY_MAJOR_VERSION >= 3
return load_bytes(load_src);
#else
- if (sizeof(CharT) == 1) {
+ if (std::is_same<CharT, char>::value) {
return load_bytes(load_src);
}
#endif
}
- object utfNbytes = reinterpret_steal<object>(PyUnicode_AsEncodedString(
+ auto utfNbytes = reinterpret_steal<object>(PyUnicode_AsEncodedString(
load_src.ptr(), UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr));
if (!utfNbytes) { PyErr_Clear(); return false; }
- const CharT *buffer = reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));
+ const auto *buffer = reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));
size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT);
if (UTF_N > 8) { buffer++; length--; } // Skip BOM for UTF-16/32
value = StringType(buffer, length);
static handle cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) {
const char *buffer = reinterpret_cast<const char *>(src.data());
- ssize_t nbytes = ssize_t(src.size() * sizeof(CharT));
+ auto nbytes = ssize_t(src.size() * sizeof(CharT));
handle s = decode_utfN(buffer, nbytes);
if (!s) throw error_already_set();
return s;
UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) :
PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr);
#else
- // PyPy seems to have multiple problems related to PyUnicode_UTF*: the UTF8 version
- // sometimes segfaults for unknown reasons, while the UTF16 and 32 versions require a
- // non-const char * arguments, which is also a nuisance, so bypass the whole thing by just
- // passing the encoding as a string value, which works properly:
+ // PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as well),
+ // so bypass the whole thing by just passing the encoding as a string value, which works properly:
return PyUnicode_Decode(buffer, nbytes, UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr);
#endif
}
// without any encoding/decoding attempt). For other C++ char sizes this is a no-op.
// which supports loading a unicode from a str, doesn't take this path.
template <typename C = CharT>
- bool load_bytes(enable_if_t<sizeof(C) == 1, handle> src) {
+ bool load_bytes(enable_if_t<std::is_same<C, char>::value, handle> src) {
if (PYBIND11_BYTES_CHECK(src.ptr())) {
// We were passed a Python 3 raw bytes; accept it into a std::string or char*
// without any encoding attempt.
}
template <typename C = CharT>
- bool load_bytes(enable_if_t<sizeof(C) != 1, handle>) { return false; }
+ bool load_bytes(enable_if_t<!std::is_same<C, char>::value, handle>) { return false; }
};
template <typename CharT, class Traits, class Allocator>
// errors. We also allow want to allow unicode characters U+0080 through U+00FF, as those
// can fit into a single char value.
if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) {
- unsigned char v0 = static_cast<unsigned char>(value[0]);
+ auto v0 = static_cast<unsigned char>(value[0]);
size_t char0_bytes = !(v0 & 0x80) ? 1 : // low bits only: 0-127
(v0 & 0xE0) == 0xC0 ? 2 : // 0b110xxxxx - start of 2-byte sequence
(v0 & 0xF0) == 0xE0 ? 3 : // 0b1110xxxx - start of 3-byte sequence
return cast_impl(std::forward<T>(src), policy, parent, indices{});
}
+ // copied from the PYBIND11_TYPE_CASTER macro
+ template <typename T>
+ static handle cast(T *src, return_value_policy policy, handle parent) {
+ if (!src) return none().release();
+ if (policy == return_value_policy::take_ownership) {
+ auto h = cast(std::move(*src), policy, parent); delete src; return h;
+ } else {
+ return cast(*src, policy, parent);
+ }
+ }
+
static constexpr auto name = _("Tuple[") + concat(make_caster<Ts>::name...) + _("]");
template <typename T> using cast_op_type = type;
template <size_t... Is>
bool load_impl(const sequence &seq, bool convert, index_sequence<Is...>) {
+#ifdef __cpp_fold_expressions
+ if ((... || !std::get<Is>(subcasters).load(seq[Is], convert)))
+ return false;
+#else
for (bool r : {std::get<Is>(subcasters).load(seq[Is], convert)...})
if (!r)
return false;
+#endif
return true;
}
}
explicit operator type*() { return this->value; }
- explicit operator type&() { return *(this->value); }
+ // static_cast works around compiler error with MSVC 17 and CUDA 10.2
+ // see issue #2180
+ explicit operator type&() { return *(static_cast<type *>(this->value)); }
explicit operator holder_type*() { return std::addressof(holder); }
-
- // Workaround for Intel compiler bug
- // see pybind11 issue 94
- #if defined(__ICC) || defined(__INTEL_COMPILER)
- operator holder_type&() { return holder; }
- #else
explicit operator holder_type&() { return holder; }
- #endif
static handle cast(const holder_type &src, return_value_policy, handle) {
const auto *ptr = holder_helper<holder_type>::get(src);
template <typename T> struct handle_type_name { static constexpr auto name = _<T>(); };
template <> struct handle_type_name<bytes> { static constexpr auto name = _(PYBIND11_BYTES_NAME); };
+template <> struct handle_type_name<int_> { static constexpr auto name = _("int"); };
+template <> struct handle_type_name<iterable> { static constexpr auto name = _("Iterable"); };
+template <> struct handle_type_name<iterator> { static constexpr auto name = _("Iterator"); };
+template <> struct handle_type_name<none> { static constexpr auto name = _("None"); };
template <> struct handle_type_name<args> { static constexpr auto name = _("*args"); };
template <> struct handle_type_name<kwargs> { static constexpr auto name = _("**kwargs"); };
throw cast_error("Unable to cast Python instance to C++ type (compile in debug mode for details)");
#else
throw cast_error("Unable to cast Python instance of type " +
- (std::string) str(handle.get_type()) + " to C++ type '" + type_id<T>() + "'");
+ (std::string) str(type::handle_of(handle)) + " to C++ type '" + type_id<T>() + "'");
#endif
}
return conv;
return conv;
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
// pytype -> C++ type
template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
// C++ type -> py::object
template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
-object cast(const T &value, return_value_policy policy = return_value_policy::automatic_reference,
+object cast(T &&value, return_value_policy policy = return_value_policy::automatic_reference,
handle parent = handle()) {
+ using no_ref_T = typename std::remove_reference<T>::type;
if (policy == return_value_policy::automatic)
- policy = std::is_pointer<T>::value ? return_value_policy::take_ownership : return_value_policy::copy;
+ policy = std::is_pointer<no_ref_T>::value ? return_value_policy::take_ownership :
+ std::is_lvalue_reference<T>::value ? return_value_policy::copy : return_value_policy::move;
else if (policy == return_value_policy::automatic_reference)
- policy = std::is_pointer<T>::value ? return_value_policy::reference : return_value_policy::copy;
- return reinterpret_steal<object>(detail::make_caster<T>::cast(value, policy, parent));
+ policy = std::is_pointer<no_ref_T>::value ? return_value_policy::reference :
+ std::is_lvalue_reference<T>::value ? return_value_policy::copy : return_value_policy::move;
+ return reinterpret_steal<object>(detail::make_caster<T>::cast(std::forward<T>(value), policy, parent));
}
template <typename T> T handle::cast() const { return pybind11::cast<T>(*this); }
throw cast_error("Unable to cast Python instance to C++ rvalue: instance has multiple references"
" (compile in debug mode for details)");
#else
- throw cast_error("Unable to move from Python " + (std::string) str(obj.get_type()) +
+ throw cast_error("Unable to move from Python " + (std::string) str(type::handle_of(obj)) +
" instance to C++ " + type_id<T>() + " instance: instance has multiple references");
#endif
return ret;
}
-// Calling cast() on an rvalue calls pybind::cast with the object rvalue, which does:
+// Calling cast() on an rvalue calls pybind11::cast with the object rvalue, which does:
// - If we have to move (because T has no copy constructor), do it. This will fail if the moved
// object has multiple references, but trying to copy will fail to compile.
// - If both movable and copyable, check ref count: if 1, move; otherwise copy
template <> inline void object::cast() const & { return; }
template <> inline void object::cast() && { return; }
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Declared in pytypes.h:
template <typename T, enable_if_t<!is_pyobject<T>::value, int>>
object object_or_cast(T &&o) { return pybind11::cast(std::forward<T>(o)); }
-struct overload_unused {}; // Placeholder type for the unneeded (and dead code) static variable in the OVERLOAD_INT macro
-template <typename ret_type> using overload_caster_t = conditional_t<
- cast_is_temporary_value_reference<ret_type>::value, make_caster<ret_type>, overload_unused>;
+struct override_unused {}; // Placeholder type for the unneeded (and dead code) static variable in the PYBIND11_OVERRIDE_OVERRIDE macro
+template <typename ret_type> using override_caster_t = conditional_t<
+ cast_is_temporary_value_reference<ret_type>::value, make_caster<ret_type>, override_unused>;
// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then
// store the result in the given variable. For other types, this is a no-op.
template <typename T> enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&o, make_caster<T> &caster) {
return cast_op<T>(load_type(caster, o));
}
-template <typename T> enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&, overload_unused &) {
+template <typename T> enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&, override_unused &) {
pybind11_fail("Internal error: cast_ref fallback invoked"); }
// Trampoline use: Having a pybind11::cast with an invalid reference type is going to static_assert, even
pybind11_fail("Internal error: cast_safe fallback invoked"); }
template <> inline void cast_safe<void>(object &&) {}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
template <return_value_policy policy = return_value_policy::automatic_reference>
tuple make_tuple() { return tuple(0); }
#if !defined(NDEBUG)
, type(type_id<T>())
#endif
- { }
+ {
+ // Workaround! See:
+ // https://github.com/pybind/pybind11/issues/2336
+ // https://github.com/pybind/pybind11/pull/2685#issuecomment-731286700
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ }
+ }
public:
/// Direct construction with name, default, and description
#endif
};
+/// \ingroup annotations
+/// Annotation indicating that all following arguments are keyword-only; the is the equivalent of an
+/// unnamed '*' argument (in Python 3)
+struct kw_only {};
+
+/// \ingroup annotations
+/// Annotation indicating that all previous arguments are positional-only; the is the equivalent of an
+/// unnamed '/' argument (in Python 3.8)
+struct pos_only {};
+
template <typename T>
arg_v arg::operator=(T &&value) const { return {std::move(*this), std::forward<T>(value)}; }
String literal version of `arg`
\endrst */
constexpr arg operator"" _a(const char *name, size_t) { return arg(name); }
-}
+} // namespace literals
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// forward declaration (definition in attr.h)
struct function_record;
template <size_t... Is>
bool load_impl_sequence(function_call &call, index_sequence<Is...>) {
+#ifdef __cpp_fold_expressions
+ if ((... || !std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])))
+ return false;
+#else
for (bool r : {std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])...})
if (!r)
return false;
+#endif
return true;
}
template <typename Return, typename Func, size_t... Is, typename Guard>
- Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) {
+ Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) && {
return std::forward<Func>(f)(cast_op<Args>(std::move(std::get<Is>(argcasters)))...);
}
}
void process(list &args_list, detail::args_proxy ap) {
- for (const auto &a : ap)
+ for (auto a : ap)
args_list.append(a);
}
void process(list &/*args_list*/, detail::kwargs_proxy kp) {
if (!kp)
return;
- for (const auto &k : reinterpret_borrow<dict>(kp)) {
+ for (auto k : reinterpret_borrow<dict>(kp)) {
if (m_kwargs.contains(k.first)) {
#if defined(NDEBUG)
multiple_values_error();
dict m_kwargs;
};
+// [workaround(intel)] Separate function required here
+// We need to put this into a separate function because the Intel compiler
+// fails to compile enable_if_t<!all_of<is_positional<Args>...>::value>
+// (tested with ICC 2021.1 Beta 20200827).
+template <typename... Args>
+constexpr bool args_are_all_positional()
+{
+ return all_of<is_positional<Args>...>::value;
+}
+
/// Collect only positional arguments for a Python function call
template <return_value_policy policy, typename... Args,
- typename = enable_if_t<all_of<is_positional<Args>...>::value>>
+ typename = enable_if_t<args_are_all_positional<Args...>()>>
simple_collector<policy> collect_arguments(Args &&...args) {
return simple_collector<policy>(std::forward<Args>(args)...);
}
/// Collect all arguments, including keywords and unpacking (only instantiated when needed)
template <return_value_policy policy, typename... Args,
- typename = enable_if_t<!all_of<is_positional<Args>...>::value>>
+ typename = enable_if_t<!args_are_all_positional<Args...>()>>
unpacking_collector<policy> collect_arguments(Args &&...args) {
// Following argument order rules for generalized unpacking according to PEP 448
static_assert(
return operator()<policy>(std::forward<Args>(args)...);
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
+
+
+template<typename T>
+handle type::handle_of() {
+ static_assert(
+ std::is_base_of<detail::type_caster_generic, detail::make_caster<T>>::value,
+ "py::type::of<T> only supports the case where T is a registered C++ types."
+ );
+
+ return detail::get_type_handle(typeid(T), true);
+}
+
#define PYBIND11_MAKE_OPAQUE(...) \
namespace pybind11 { namespace detail { \
}}
/// Lets you pass a type containing a `,` through a macro parameter without needing a separate
-/// typedef, e.g.: `PYBIND11_OVERLOAD(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`
+/// typedef, e.g.: `PYBIND11_OVERRIDE(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`
#define PYBIND11_TYPE(...) __VA_ARGS__
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds)
#endif
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename type> class duration_caster {
public:
- typedef typename type::rep rep;
- typedef typename type::period period;
+ using rep = typename type::rep;
+ using period = typename type::period;
- typedef std::chrono::duration<uint_fast32_t, std::ratio<86400>> days;
+ using days = std::chrono::duration<uint_fast32_t, std::ratio<86400>>;
bool load(handle src, bool) {
using namespace std::chrono;
// This is for casting times on the system clock into datetime.datetime instances
template <typename Duration> class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {
public:
- typedef std::chrono::time_point<std::chrono::system_clock, Duration> type;
+ using type = std::chrono::time_point<std::chrono::system_clock, Duration>;
bool load(handle src, bool) {
using namespace std::chrono;
}
else return false;
- value = system_clock::from_time_t(std::mktime(&cal)) + msecs;
+ value = time_point_cast<Duration>(system_clock::from_time_t(std::mktime(&cal)) + msecs);
return true;
}
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) { PyDateTime_IMPORT; }
- std::time_t tt = system_clock::to_time_t(time_point_cast<system_clock::duration>(src));
+ // Get out microseconds, and make sure they are positive, to avoid bug in eastern hemisphere time zones
+ // (cfr. https://github.com/pybind/pybind11/issues/2417)
+ using us_t = duration<int, std::micro>;
+ auto us = duration_cast<us_t>(src.time_since_epoch() % seconds(1));
+ if (us.count() < 0)
+ us += seconds(1);
+
+ // Subtract microseconds BEFORE `system_clock::to_time_t`, because:
+ // > If std::time_t has lower precision, it is implementation-defined whether the value is rounded or truncated.
+ // (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t)
+ std::time_t tt = system_clock::to_time_t(time_point_cast<system_clock::duration>(src - us));
// this function uses static memory so it's best to copy it out asap just in case
// otherwise other code that is using localtime may break this (not just python code)
std::tm localtime = *std::localtime(&tt);
- // Declare these special duration types so the conversions happen with the correct primitive types (int)
- using us_t = duration<int, std::micro>;
-
return PyDateTime_FromDateAndTime(localtime.tm_year + 1900,
localtime.tm_mon + 1,
localtime.tm_mday,
localtime.tm_hour,
localtime.tm_min,
localtime.tm_sec,
- (duration_cast<us_t>(src.time_since_epoch() % seconds(1))).count());
+ us.count());
}
PYBIND11_TYPE_CASTER(type, _("datetime.datetime"));
};
: public duration_caster<std::chrono::duration<Rep, Period>> {
};
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
# undef I
#endif
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
template <typename T> struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
static constexpr const char c = format_descriptor<T>::c;
#endif
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T> struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
static constexpr bool value = true;
PYBIND11_TYPE_CASTER(std::complex<T>, _("complex"));
};
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "../attr.h"
#include "../options.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
-#if PY_VERSION_HEX >= 0x03030000
+#if PY_VERSION_HEX >= 0x03030000 && !defined(PYPY_VERSION)
# define PYBIND11_BUILTIN_QUALNAME
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)
#else
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) setattr((PyObject *) obj, "__qualname__", nameobj)
#endif
+inline std::string get_fully_qualified_tp_name(PyTypeObject *type) {
+#if !defined(PYPY_VERSION)
+ return type->tp_name;
+#else
+ auto module_name = handle((PyObject *) type).attr("__module__").cast<std::string>();
+ if (module_name == PYBIND11_BUILTINS_MODULE)
+ return type->tp_name;
+ else
+ return std::move(module_name) + "." + type->tp_name;
+#endif
+}
+
inline PyTypeObject *type_incref(PyTypeObject *type) {
Py_INCREF(type);
return type;
// 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop`
// 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment
const auto static_prop = (PyObject *) get_internals().static_property_type;
- const auto call_descr_set = descr && PyObject_IsInstance(descr, static_prop)
+ const auto call_descr_set = descr && value && PyObject_IsInstance(descr, static_prop)
&& !PyObject_IsInstance(value, static_prop);
if (call_descr_set) {
// Call `static_property.__set__()` instead of replacing the `static_property`.
}
#endif
+/// metaclass `__call__` function that is used to create all pybind11 objects.
+extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) {
+
+ // use the default metaclass call to create/initialize the object
+ PyObject *self = PyType_Type.tp_call(type, args, kwargs);
+ if (self == nullptr) {
+ return nullptr;
+ }
+
+ // This must be a pybind11 instance
+ auto instance = reinterpret_cast<detail::instance *>(self);
+
+ // Ensure that the base __init__ function(s) were called
+ for (const auto &vh : values_and_holders(instance)) {
+ if (!vh.holder_constructed()) {
+ PyErr_Format(PyExc_TypeError, "%.200s.__init__() must be called when overriding __init__",
+ get_fully_qualified_tp_name(vh.type->type).c_str());
+ Py_DECREF(self);
+ return nullptr;
+ }
+ }
+
+ return self;
+}
+
+/// Cleanup the type-info for a pybind11-registered type.
+extern "C" inline void pybind11_meta_dealloc(PyObject *obj) {
+ auto *type = (PyTypeObject *) obj;
+ auto &internals = get_internals();
+
+ // A pybind11-registered type will:
+ // 1) be found in internals.registered_types_py
+ // 2) have exactly one associated `detail::type_info`
+ auto found_type = internals.registered_types_py.find(type);
+ if (found_type != internals.registered_types_py.end() &&
+ found_type->second.size() == 1 &&
+ found_type->second[0]->type == type) {
+
+ auto *tinfo = found_type->second[0];
+ auto tindex = std::type_index(*tinfo->cpptype);
+ internals.direct_conversions.erase(tindex);
+
+ if (tinfo->module_local)
+ registered_local_types_cpp().erase(tindex);
+ else
+ internals.registered_types_cpp.erase(tindex);
+ internals.registered_types_py.erase(tinfo->type);
+
+ // Actually just `std::erase_if`, but that's only available in C++20
+ auto &cache = internals.inactive_override_cache;
+ for (auto it = cache.begin(), last = cache.end(); it != last; ) {
+ if (it->first == (PyObject *) tinfo->type)
+ it = cache.erase(it);
+ else
+ ++it;
+ }
+
+ delete tinfo;
+ }
+
+ PyType_Type.tp_dealloc(obj);
+}
+
/** This metaclass is assigned by default to all pybind11 types and is required in order
for static properties to function correctly. Users may override this using `py::metaclass`.
Return value: New reference. */
type->tp_base = type_incref(&PyType_Type);
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
+ type->tp_call = pybind11_meta_call;
+
type->tp_setattro = pybind11_meta_setattro;
#if PY_MAJOR_VERSION >= 3
type->tp_getattro = pybind11_meta_getattro;
#endif
+ type->tp_dealloc = pybind11_meta_dealloc;
+
if (PyType_Ready(type) < 0)
pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!");
auto ®istered_instances = get_internals().registered_instances;
auto range = registered_instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
- if (Py_TYPE(self) == Py_TYPE(it->second)) {
+ if (self == it->second) {
registered_instances.erase(it);
return true;
}
// Allocate the value/holder internals:
inst->allocate_layout();
- inst->owned = true;
-
return self;
}
/// following default function will be used which simply throws an exception.
extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {
PyTypeObject *type = Py_TYPE(self);
- std::string msg;
-#if defined(PYPY_VERSION)
- msg += handle((PyObject *) type).attr("__module__").cast<std::string>() + ".";
-#endif
- msg += type->tp_name;
- msg += ": No constructor defined!";
+ std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!";
PyErr_SetString(PyExc_TypeError, msg.c_str());
return -1;
}
auto type = Py_TYPE(self);
type->tp_free(self);
+#if PY_VERSION_HEX < 0x03080000
// `type->tp_dealloc != pybind11_object_dealloc` means that we're being called
// as part of a derived type's dealloc, in which case we're not allowed to decref
// the type here. For cross-module compatibility, we shouldn't compare directly
auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;
if (type->tp_dealloc == pybind11_object_type->tp_dealloc)
Py_DECREF(type);
+#else
+ // This was not needed before Python 3.8 (Python issue 35810)
+ // https://github.com/pybind/pybind11/issues/1946
+ Py_DECREF(type);
+#endif
}
/** Create the type which can be used as a common base for all classes. This is
extern "C" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) {
if (!PyDict_Check(new_dict)) {
PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, not a '%.200s'",
- Py_TYPE(new_dict)->tp_name);
+ get_fully_qualified_tp_name(Py_TYPE(new_dict)).c_str());
return -1;
}
PyObject *&dict = *_PyObject_GetDictPtr(self);
/// Give instances of this type a `__dict__` and opt into garbage collection.
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {
auto type = &heap_type->ht_type;
-#if defined(PYPY_VERSION)
- pybind11_fail(std::string(type->tp_name) + ": dynamic attributes are "
- "currently not supported in "
- "conjunction with PyPy!");
-#endif
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
type->tp_dictoffset = type->tp_basicsize; // place dict at the end
type->tp_basicsize += (ssize_t)sizeof(PyObject *); // and allocate enough space for it
}
std::memset(view, 0, sizeof(Py_buffer));
buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data);
+ if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
+ delete info;
+ // view->obj = nullptr; // Was just memset to 0, so not necessary
+ PyErr_SetString(PyExc_BufferError, "Writable buffer requested for readonly storage");
+ return -1;
+ }
view->obj = obj;
view->ndim = 1;
view->internal = info;
view->len = view->itemsize;
for (auto s : info->shape)
view->len *= s;
+ view->readonly = info->readonly;
if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT)
view->format = const_cast<char *>(info->format.c_str());
if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
#endif
}
- object module;
+ object module_;
if (rec.scope) {
if (hasattr(rec.scope, "__module__"))
- module = rec.scope.attr("__module__");
+ module_ = rec.scope.attr("__module__");
else if (hasattr(rec.scope, "__name__"))
- module = rec.scope.attr("__name__");
+ module_ = rec.scope.attr("__name__");
}
auto full_name = c_str(
#if !defined(PYPY_VERSION)
- module ? str(module).cast<std::string>() + "." + rec.name :
+ module_ ? str(module_).cast<std::string>() + "." + rec.name :
#endif
rec.name);
auto &internals = get_internals();
auto bases = tuple(rec.bases);
- auto base = (bases.size() == 0) ? internals.instance_base
+ auto base = (bases.empty()) ? internals.instance_base
: bases[0].ptr();
/* Danger zone: from now (and until PyType_Ready), make sure to
type->tp_doc = tp_doc;
type->tp_base = type_incref((PyTypeObject *)base);
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
- if (bases.size() > 0)
+ if (!bases.empty())
type->tp_bases = bases.release().ptr();
/* Don't inherit base __init__ */
#endif
/* Flags */
- type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
+ type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
#if PY_MAJOR_VERSION < 3
type->tp_flags |= Py_TPFLAGS_CHECKTYPES;
#endif
+ if (!rec.is_final)
+ type->tp_flags |= Py_TPFLAGS_BASETYPE;
if (rec.dynamic_attr)
enable_dynamic_attributes(heap_type);
else
Py_INCREF(type); // Keep it alive forever (reference leak)
- if (module) // Needed by pydoc
- setattr((PyObject *) type, "__module__", module);
+ if (module_) // Needed by pydoc
+ setattr((PyObject *) type, "__module__", module_);
PYBIND11_SET_OLDPY_QUALNAME(type, qualname);
return (PyObject *) type;
}
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#pragma once
-#if !defined(NAMESPACE_BEGIN)
-# define NAMESPACE_BEGIN(name) namespace name {
-#endif
-#if !defined(NAMESPACE_END)
-# define NAMESPACE_END(name) }
-#endif
+#define PYBIND11_VERSION_MAJOR 2
+#define PYBIND11_VERSION_MINOR 6
+#define PYBIND11_VERSION_PATCH 2
+
+#define PYBIND11_NAMESPACE_BEGIN(name) namespace name {
+#define PYBIND11_NAMESPACE_END(name) }
// Robust support for some features and loading modules compiled against different pybind versions
// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute on
# endif
#endif
-#if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER)
+#if !(defined(_MSC_VER) && __cplusplus == 199711L)
# if __cplusplus >= 201402L
# define PYBIND11_CPP14
# if __cplusplus >= 201703L
// Compiler version assertions
#if defined(__INTEL_COMPILER)
-# if __INTEL_COMPILER < 1700
-# error pybind11 requires Intel C++ compiler v17 or newer
+# if __INTEL_COMPILER < 1800
+# error pybind11 requires Intel C++ compiler v18 or newer
+# elif __INTEL_COMPILER < 1900 && defined(PYBIND11_CPP14)
+# error pybind11 supports only C++11 with Intel C++ compiler v18. Use v19 or newer for C++14.
# endif
#elif defined(__clang__) && !defined(__apple_build_version__)
# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)
# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))
#endif
-#define PYBIND11_VERSION_MAJOR 2
-#define PYBIND11_VERSION_MINOR 4
-#define PYBIND11_VERSION_PATCH 1
+#if defined(PYBIND11_CPP17)
+# define PYBIND11_MAYBE_UNUSED [[maybe_unused]]
+#elif defined(_MSC_VER) && !defined(__clang__)
+# define PYBIND11_MAYBE_UNUSED
+#else
+# define PYBIND11_MAYBE_UNUSED __attribute__ ((__unused__))
+#endif
+
+/* Don't let Python.h #define (v)snprintf as macro because they are implemented
+ properly in Visual Studio since 2015. */
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+# define HAVE_SNPRINTF 1
+#endif
/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
#if defined(_MSC_VER)
# endif
# pragma warning(push)
# pragma warning(disable: 4510 4610 4512 4005)
-# if defined(_DEBUG)
+# if defined(_DEBUG) && !defined(Py_DEBUG)
# define PYBIND11_DEBUG_MARKER
# undef _DEBUG
# endif
#include <frameobject.h>
#include <pythread.h>
+/* Python #defines overrides on all sorts of core functions, which
+ tends to weak havok in C++ codebases that expect these to work
+ like regular functions (potentially with several overloads) */
#if defined(isalnum)
# undef isalnum
# undef isalpha
# undef toupper
#endif
+#if defined(copysign)
+# undef copysign
+#endif
+
#if defined(_MSC_VER)
# if defined(PYBIND11_DEBUG_MARKER)
# define _DEBUG
#include <vector>
#include <string>
#include <stdexcept>
+#include <exception>
#include <unordered_set>
#include <unordered_map>
#include <memory>
#define PYBIND11_STR_TYPE ::pybind11::str
#define PYBIND11_BOOL_ATTR "__bool__"
#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
-// Providing a separate declaration to make Clang's -Wmissing-prototypes happy
+#define PYBIND11_BUILTINS_MODULE "builtins"
+// Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
+// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
#define PYBIND11_PLUGIN_IMPL(name) \
- extern "C" PYBIND11_EXPORT PyObject *PyInit_##name(); \
+ extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
#else
#define PYBIND11_STR_TYPE ::pybind11::bytes
#define PYBIND11_BOOL_ATTR "__nonzero__"
#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero)
-// Providing a separate PyInit decl to make Clang's -Wmissing-prototypes happy
+#define PYBIND11_BUILTINS_MODULE "__builtin__"
+// Providing a separate PyInit decl to make Clang's -Wmissing-prototypes happy.
+// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
#define PYBIND11_PLUGIN_IMPL(name) \
- static PyObject *pybind11_init_wrapper(); \
- extern "C" PYBIND11_EXPORT void init##name(); \
- extern "C" PYBIND11_EXPORT void init##name() { \
- (void)pybind11_init_wrapper(); \
- } \
+ static PyObject *pybind11_init_wrapper(); \
+ extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT void init##name(); \
+ extern "C" PYBIND11_EXPORT void init##name() { \
+ (void)pybind11_init_wrapper(); \
+ } \
PyObject *pybind11_init_wrapper()
#endif
#define PYBIND11_STRINGIFY(x) #x
#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
#define PYBIND11_CONCAT(first, second) first##second
+#define PYBIND11_ENSURE_INTERNALS_READY \
+ pybind11::detail::get_internals();
#define PYBIND11_CHECK_PYTHON_VERSION \
{ \
***Deprecated in favor of PYBIND11_MODULE***
This macro creates the entry point that will be invoked when the Python interpreter
- imports a plugin library. Please create a `module` in the function body and return
+ imports a plugin library. Please create a `module_` in the function body and return
the pointer to its underlying Python object at the end.
.. code-block:: cpp
PYBIND11_PLUGIN(example) {
- pybind11::module m("example", "pybind11 example plugin");
+ pybind11::module_ m("example", "pybind11 example plugin");
/// Set up bindings here
return m.ptr();
}
static PyObject *pybind11_init(); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
+ PYBIND11_ENSURE_INTERNALS_READY \
try { \
return pybind11_init(); \
} PYBIND11_CATCH_INIT_EXCEPTIONS \
This macro creates the entry point that will be invoked when the Python interpreter
imports an extension module. The module name is given as the fist argument and it
should not be in quotes. The second macro argument defines a variable of type
- `py::module` which can be used to initialize the module.
+ `py::module_` which can be used to initialize the module.
+
+ The entry point is marked as "maybe unused" to aid dead-code detection analysis:
+ since the entry point is typically only looked up at runtime and not referenced
+ during translation, it would otherwise appear as unused ("dead") code.
.. code-block:: cpp
}
\endrst */
#define PYBIND11_MODULE(name, variable) \
- static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &); \
+ static ::pybind11::module_::module_def \
+ PYBIND11_CONCAT(pybind11_module_def_, name) PYBIND11_MAYBE_UNUSED; \
+ PYBIND11_MAYBE_UNUSED \
+ static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
- auto m = pybind11::module(PYBIND11_TOSTRING(name)); \
+ PYBIND11_ENSURE_INTERNALS_READY \
+ auto m = ::pybind11::module_::create_extension_module( \
+ PYBIND11_TOSTRING(name), nullptr, \
+ &PYBIND11_CONCAT(pybind11_module_def_, name)); \
try { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
return m.ptr(); \
} PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
- void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable)
+ void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &variable)
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
using ssize_t = Py_ssize_t;
using size_t = std::size_t;
reference_internal
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
inline static constexpr int log2(size_t n, int k = 0) { return (n <= 1) ? k : log2(n >> 1, k + 1); }
#else
template<size_t ...> struct index_sequence { };
template<size_t N, size_t ...S> struct make_index_sequence_impl : make_index_sequence_impl <N - 1, N - 1, S...> { };
-template<size_t ...S> struct make_index_sequence_impl <0, S...> { typedef index_sequence<S...> type; };
+template<size_t ...S> struct make_index_sequence_impl <0, S...> { using type = index_sequence<S...>; };
template<size_t N> using make_index_sequence = typename make_index_sequence_impl<N>::type;
#endif
template <bool B> using bool_constant = std::integral_constant<bool, B>;
template <typename T> struct negation : bool_constant<!T::value> { };
+// PGI/Intel cannot detect operator delete with the "compatible" void_t impl, so
+// using the new one (C++14 defect, so generally works on newer compilers, even
+// if not in C++17 mode)
+#if defined(__PGIC__) || defined(__INTEL_COMPILER)
+template<typename... > using void_t = void;
+#else
template <typename...> struct void_t_impl { using type = void; };
template <typename... Ts> using void_t = typename void_t_impl<Ts...>::type;
+#endif
+
/// Compile-time all/any/none of that check the boolean value of all template types
#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))
/// Strip the class from a method type
template <typename T> struct remove_class { };
-template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...)> { typedef R type(A...); };
-template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...) const> { typedef R type(A...); };
+template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...)> { using type = R (A...); };
+template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...) const> { using type = R (A...); };
/// Helper template to strip away type modifiers
-template <typename T> struct intrinsic_type { typedef T type; };
-template <typename T> struct intrinsic_type<const T> { typedef typename intrinsic_type<T>::type type; };
-template <typename T> struct intrinsic_type<T*> { typedef typename intrinsic_type<T>::type type; };
-template <typename T> struct intrinsic_type<T&> { typedef typename intrinsic_type<T>::type type; };
-template <typename T> struct intrinsic_type<T&&> { typedef typename intrinsic_type<T>::type type; };
-template <typename T, size_t N> struct intrinsic_type<const T[N]> { typedef typename intrinsic_type<T>::type type; };
-template <typename T, size_t N> struct intrinsic_type<T[N]> { typedef typename intrinsic_type<T>::type type; };
+template <typename T> struct intrinsic_type { using type = T; };
+template <typename T> struct intrinsic_type<const T> { using type = typename intrinsic_type<T>::type; };
+template <typename T> struct intrinsic_type<T*> { using type = typename intrinsic_type<T>::type; };
+template <typename T> struct intrinsic_type<T&> { using type = typename intrinsic_type<T>::type; };
+template <typename T> struct intrinsic_type<T&&> { using type = typename intrinsic_type<T>::type; };
+template <typename T, size_t N> struct intrinsic_type<const T[N]> { using type = typename intrinsic_type<T>::type; };
+template <typename T, size_t N> struct intrinsic_type<T[N]> { using type = typename intrinsic_type<T>::type; };
template <typename T> using intrinsic_t = typename intrinsic_type<T>::type;
/// Helper type to replace 'void' in some expressions
constexpr size_t constexpr_sum(T n, Ts... ns) { return size_t{n} + constexpr_sum(ns...); }
#endif
-NAMESPACE_BEGIN(constexpr_impl)
+PYBIND11_NAMESPACE_BEGIN(constexpr_impl)
/// Implementation details for constexpr functions
constexpr int first(int i) { return i; }
template <typename T, typename... Ts>
constexpr int last(int /*i*/, int result) { return result; }
template <typename T, typename... Ts>
constexpr int last(int i, int result, T v, Ts... vs) { return last(i + 1, v ? i : result, vs...); }
-NAMESPACE_END(constexpr_impl)
+PYBIND11_NAMESPACE_END(constexpr_impl)
/// Return the index of the first type in Ts which satisfies Predicate<T>. Returns sizeof...(Ts) if
/// none match.
/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived pointer
/// can be converted to a Base pointer)
+/// For unions, `is_base_of<T, T>::value` is False, so we need to check `is_same` as well.
template <typename Base, typename Derived> using is_accessible_base_of = bool_constant<
- std::is_base_of<Base, Derived>::value && std::is_convertible<Derived *, Base *>::value>;
+ (std::is_same<Base, Derived>::value || std::is_base_of<Base, Derived>::value) && std::is_convertible<Derived *, Base *>::value>;
template <template<typename...> class Base>
struct is_template_base_of_impl {
std::is_pointer<T>::value && std::is_function<typename std::remove_pointer<T>::type>::value>;
template <typename F> struct strip_function_object {
+ // If you are encountering an
+ // 'error: name followed by "::" must be a class or namespace name'
+ // with the Intel compiler and a noexcept function here,
+ // try to use noexcept(true) instead of plain noexcept.
using type = typename remove_class<decltype(&F::operator())>::type;
};
/// Ignore that a variable is unused in compiler warnings
inline void ignore_unused(const int *) { }
+// [workaround(intel)] Internal error on fold expression
/// Apply a function over each element of a parameter pack
-#ifdef __cpp_fold_expressions
+#if defined(__cpp_fold_expressions) && !defined(__INTEL_COMPILER)
+// Intel compiler produces an internal error on this fold expression (tested with ICC 19.0.2)
#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)
#else
using expand_side_effects = bool[];
-#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false }
+#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (void)pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false }
#endif
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// C++ bindings of builtin Python exceptions
class builtin_exception : public std::runtime_error {
PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)
PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)
PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError)
+PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError)
PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or handle::call fail due to a type casting error
PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
template <typename T, typename SFINAE = void> struct format_descriptor { };
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Returns the index of the given type in the type char array below, and in the list in numpy.h
// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
// complex float,double,long double. Note that the long double types only participate when long
std::is_integral<T>::value ? detail::log2(sizeof(T))*2 + std::is_unsigned<T>::value : 8 + (
std::is_same<T, double>::value ? 1 : std::is_same<T, long double>::value ? 2 : 0));
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
template <typename T> struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {
static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric<T>::index];
/// Dummy destructor wrapper that can be used to expose classes with a private destructor
struct nodelete { template <typename T> void operator()(T*) { } };
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename... Args>
struct overload_cast_impl {
- constexpr overload_cast_impl() {} // MSVC 2015 needs this
+ constexpr overload_cast_impl() {}; // NOLINT(modernize-use-equals-default): MSVC 2015 needs this
template <typename Return>
constexpr auto operator()(Return (*pf)(Args...)) const noexcept
constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept
-> decltype(pmf) { return pmf; }
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
// overload_cast requires variable templates: C++14
#if defined(PYBIND11_CPP14)
};
#endif // overload_cast
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from
// any standard container (or C-style array) supporting std::begin/std::end, any singleton
const std::vector<T> *operator->() const { return &v; }
};
-NAMESPACE_END(detail)
-
-
+// Forward-declaration; see detail/class.h
+std::string get_fully_qualified_tp_name(PyTypeObject*);
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "common.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
#if !defined(_MSC_VER)
# define PYBIND11_DESCR_CONSTEXPR static constexpr
return _("{") + descr + _("}");
}
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "class.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<value_and_holder> {
value_and_holder *value = nullptr;
};
-NAMESPACE_BEGIN(initimpl)
+PYBIND11_NAMESPACE_BEGIN(initimpl)
inline void no_nullptr(void *ptr) {
if (!ptr) throw type_error("pybind11::init(): factory function returned nullptr");
template <typename Class>
void construct(value_and_holder &v_h, Holder<Class> holder, bool need_alias) {
auto *ptr = holder_helper<Holder<Class>>::get(holder);
+ no_nullptr(ptr);
// If we need an alias, check that the held pointer is actually an alias instance
if (Class::has_alias && need_alias && !is_alias<Class>(ptr))
throw type_error("pybind11::init(): construction failed: returned holder-wrapped instance "
}
};
-NAMESPACE_END(initimpl)
-NAMESPACE_END(detail)
-NAMESPACE_END(pybind11)
+PYBIND11_NAMESPACE_END(initimpl)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(pybind11)
#include "../pytypes.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Forward declarations
inline PyTypeObject *make_static_property_type();
inline PyTypeObject *make_default_metaclass();
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key))
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (value))
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr)
+# define PYBIND11_TLS_FREE(key) PyThread_tss_free(key)
#else
// Usually an int but a long on Cygwin64 with Python 3.x
# define PYBIND11_TLS_KEY_INIT(var) decltype(PyThread_create_key()) var = 0
# define PYBIND11_TLS_REPLACE_VALUE(key, value) \
PyThread_set_key_value((key), (value))
# endif
+# define PYBIND11_TLS_FREE(key) (void)key
#endif
// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly
template <typename value_type>
using type_map = std::unordered_map<std::type_index, value_type, type_hash, type_equal_to>;
-struct overload_hash {
+struct override_hash {
inline size_t operator()(const std::pair<const PyObject *, const char *>& v) const {
size_t value = std::hash<const void *>()(v.first);
- value ^= std::hash<const void *>()(v.second) + 0x9e3779b9 + (value<<6) + (value>>2);
+ value ^= std::hash<const void *>()(v.second) + 0x9e3779b9 + (value<<6) + (value>>2);
return value;
}
};
type_map<type_info *> registered_types_cpp; // std::type_index -> pybind11's type information
std::unordered_map<PyTypeObject *, std::vector<type_info *>> registered_types_py; // PyTypeObject* -> base type_info(s)
std::unordered_multimap<const void *, instance*> registered_instances; // void * -> instance*
- std::unordered_set<std::pair<const PyObject *, const char *>, overload_hash> inactive_overload_cache;
+ std::unordered_set<std::pair<const PyObject *, const char *>, override_hash> inactive_override_cache;
type_map<std::vector<bool (*)(PyObject *, void *&)>> direct_conversions;
std::unordered_map<const PyObject *, std::vector<PyObject *>> patients;
std::forward_list<void (*) (std::exception_ptr)> registered_exception_translators;
#if defined(WITH_THREAD)
PYBIND11_TLS_KEY_INIT(tstate);
PyInterpreterState *istate = nullptr;
+ ~internals() {
+ // This destructor is called *after* Py_Finalize() in finalize_interpreter().
+ // That *SHOULD BE* fine. The following details what happens when PyThread_tss_free is called.
+ // PYBIND11_TLS_FREE is PyThread_tss_free on python 3.7+. On older python, it does nothing.
+ // PyThread_tss_free calls PyThread_tss_delete and PyMem_RawFree.
+ // PyThread_tss_delete just calls TlsFree (on Windows) or pthread_key_delete (on *NIX). Neither
+ // of those have anything to do with CPython internals.
+ // PyMem_RawFree *requires* that the `tstate` be allocated with the CPython allocator.
+ PYBIND11_TLS_FREE(tstate);
+ }
#endif
};
};
/// Tracks the `internals` and `type_info` ABI version independent of the main library version
-#define PYBIND11_INTERNALS_VERSION 3
+#define PYBIND11_INTERNALS_VERSION 4
/// On MSVC, debug and release builds are not ABI-compatible!
#if defined(_MSC_VER) && defined(_DEBUG)
-# define PYBIND11_BUILD_TYPE "_debug"
+# define PYBIND11_BUILD_TYPE "_debug"
#else
-# define PYBIND11_BUILD_TYPE ""
+# define PYBIND11_BUILD_TYPE ""
#endif
/// Let's assume that different compilers are ABI-incompatible.
-#if defined(_MSC_VER)
-# define PYBIND11_COMPILER_TYPE "_msvc"
-#elif defined(__INTEL_COMPILER)
-# define PYBIND11_COMPILER_TYPE "_icc"
-#elif defined(__clang__)
-# define PYBIND11_COMPILER_TYPE "_clang"
-#elif defined(__PGI)
-# define PYBIND11_COMPILER_TYPE "_pgi"
-#elif defined(__MINGW32__)
-# define PYBIND11_COMPILER_TYPE "_mingw"
-#elif defined(__CYGWIN__)
-# define PYBIND11_COMPILER_TYPE "_gcc_cygwin"
-#elif defined(__GNUC__)
-# define PYBIND11_COMPILER_TYPE "_gcc"
-#else
-# define PYBIND11_COMPILER_TYPE "_unknown"
+/// A user can manually set this string if they know their
+/// compiler is compatible.
+#ifndef PYBIND11_COMPILER_TYPE
+# if defined(_MSC_VER)
+# define PYBIND11_COMPILER_TYPE "_msvc"
+# elif defined(__INTEL_COMPILER)
+# define PYBIND11_COMPILER_TYPE "_icc"
+# elif defined(__clang__)
+# define PYBIND11_COMPILER_TYPE "_clang"
+# elif defined(__PGI)
+# define PYBIND11_COMPILER_TYPE "_pgi"
+# elif defined(__MINGW32__)
+# define PYBIND11_COMPILER_TYPE "_mingw"
+# elif defined(__CYGWIN__)
+# define PYBIND11_COMPILER_TYPE "_gcc_cygwin"
+# elif defined(__GNUC__)
+# define PYBIND11_COMPILER_TYPE "_gcc"
+# else
+# define PYBIND11_COMPILER_TYPE "_unknown"
+# endif
#endif
-#if defined(_LIBCPP_VERSION)
-# define PYBIND11_STDLIB "_libcpp"
-#elif defined(__GLIBCXX__) || defined(__GLIBCPP__)
-# define PYBIND11_STDLIB "_libstdcpp"
-#else
-# define PYBIND11_STDLIB ""
+/// Also standard libs
+#ifndef PYBIND11_STDLIB
+# if defined(_LIBCPP_VERSION)
+# define PYBIND11_STDLIB "_libcpp"
+# elif defined(__GLIBCXX__) || defined(__GLIBCPP__)
+# define PYBIND11_STDLIB "_libstdcpp"
+# else
+# define PYBIND11_STDLIB ""
+# endif
#endif
/// On Linux/OSX, changes in __GXX_ABI_VERSION__ indicate ABI incompatibility.
-#if defined(__GXX_ABI_VERSION)
-# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION)
-#else
-# define PYBIND11_BUILD_ABI ""
+#ifndef PYBIND11_BUILD_ABI
+# if defined(__GXX_ABI_VERSION)
+# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION)
+# else
+# define PYBIND11_BUILD_ABI ""
+# endif
#endif
-#if defined(WITH_THREAD)
-# define PYBIND11_INTERNALS_KIND ""
-#else
-# define PYBIND11_INTERNALS_KIND "_without_thread"
+#ifndef PYBIND11_INTERNALS_KIND
+# if defined(WITH_THREAD)
+# define PYBIND11_INTERNALS_KIND ""
+# else
+# define PYBIND11_INTERNALS_KIND "_without_thread"
+# endif
#endif
#define PYBIND11_INTERNALS_ID "__pybind11_internals_v" \
} catch (const std::length_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return;
} catch (const std::out_of_range &e) { PyErr_SetString(PyExc_IndexError, e.what()); return;
} catch (const std::range_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return;
+ } catch (const std::overflow_error &e) { PyErr_SetString(PyExc_OverflowError, e.what()); return;
} catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); return;
} catch (...) {
PyErr_SetString(PyExc_RuntimeError, "Caught an unknown exception!");
const PyGILState_STATE state;
} gil;
- constexpr auto *id = PYBIND11_INTERNALS_ID;
+ PYBIND11_STR_TYPE id(PYBIND11_INTERNALS_ID);
auto builtins = handle(PyEval_GetBuiltins());
if (builtins.contains(id) && isinstance<capsule>(builtins[id])) {
internals_pp = static_cast<internals **>(capsule(builtins[id]));
auto *&internals_ptr = *internals_pp;
internals_ptr = new internals();
#if defined(WITH_THREAD)
- PyEval_InitThreads();
+
+ #if PY_VERSION_HEX < 0x03090000
+ PyEval_InitThreads();
+ #endif
PyThreadState *tstate = PyThreadState_Get();
#if PY_VERSION_HEX >= 0x03070000
internals_ptr->tstate = PyThread_tss_alloc();
return strings.front().c_str();
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// Returns a named pointer that is shared among all extension modules (using the same
/// pybind11 version) running in the current interpreter. Names starting with underscores
return *ptr;
}
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "common.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// Erase all occurrences of a substring
inline void erase_all(std::string &string, const std::string &search) {
for (size_t pos = 0;;) {
#endif
detail::erase_all(name, "pybind11::");
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// Return a string representation of a C++ type
template <typename T> static std::string type_id() {
return name;
}
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
// of matrices seems highly undesirable.
static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7");
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename MatrixType> using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
template <typename MatrixType> using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
#if EIGEN_VERSION_AT_LEAST(3,3,0)
using EigenIndex = Eigen::Index;
if (!need_copy) {
// We don't need a converting copy, but we also need to check whether the strides are
// compatible with the Ref's stride requirements
- Array aref = reinterpret_borrow<Array>(src);
+ auto aref = reinterpret_borrow<Array>(src);
if (aref && (!need_writeable || aref.writeable())) {
fits = props::conformable(aref);
template<typename Type>
struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
- typedef typename Type::Scalar Scalar;
- typedef remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())> StorageIndex;
- typedef typename Type::Index Index;
+ using Scalar = typename Type::Scalar;
+ using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
+ using Index = typename Type::Index;
static constexpr bool rowMajor = Type::IsRowMajor;
bool load(handle src, bool) {
return false;
auto obj = reinterpret_borrow<object>(src);
- object sparse_module = module::import("scipy.sparse");
+ object sparse_module = module_::import("scipy.sparse");
object matrix_type = sparse_module.attr(
rowMajor ? "csr_matrix" : "csc_matrix");
- if (!obj.get_type().is(matrix_type)) {
+ if (!type::handle_of(obj).is(matrix_type)) {
try {
obj = matrix_type(obj);
} catch (const error_already_set &) {
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
const_cast<Type&>(src).makeCompressed();
- object matrix_type = module::import("scipy.sparse").attr(
+ object matrix_type = module_::import("scipy.sparse").attr(
rowMajor ? "csr_matrix" : "csc_matrix");
array data(src.nonZeros(), src.valuePtr());
+ npy_format_descriptor<Scalar>::name + _("]"));
};
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#if defined(__GNUG__) || defined(__clang__)
# pragma GCC diagnostic pop
#if PY_MAJOR_VERSION >= 3
# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \
+ extern "C" PyObject *pybind11_init_impl_##name(); \
extern "C" PyObject *pybind11_init_impl_##name() { \
return pybind11_init_wrapper_##name(); \
}
#else
# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \
+ extern "C" void pybind11_init_impl_##name(); \
extern "C" void pybind11_init_impl_##name() { \
pybind11_init_wrapper_##name(); \
}
});
}
\endrst */
-#define PYBIND11_EMBEDDED_MODULE(name, variable) \
- static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &); \
- static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() { \
- auto m = pybind11::module(PYBIND11_TOSTRING(name)); \
- try { \
- PYBIND11_CONCAT(pybind11_init_, name)(m); \
- return m.ptr(); \
- } catch (pybind11::error_already_set &e) { \
- PyErr_SetString(PyExc_ImportError, e.what()); \
- return nullptr; \
- } catch (const std::exception &e) { \
- PyErr_SetString(PyExc_ImportError, e.what()); \
- return nullptr; \
- } \
- } \
- PYBIND11_EMBEDDED_MODULE_IMPL(name) \
- pybind11::detail::embedded_module name(PYBIND11_TOSTRING(name), \
- PYBIND11_CONCAT(pybind11_init_impl_, name)); \
- void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable)
-
-
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+#define PYBIND11_EMBEDDED_MODULE(name, variable) \
+ static ::pybind11::module_::module_def \
+ PYBIND11_CONCAT(pybind11_module_def_, name); \
+ static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
+ static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() { \
+ auto m = ::pybind11::module_::create_extension_module( \
+ PYBIND11_TOSTRING(name), nullptr, \
+ &PYBIND11_CONCAT(pybind11_module_def_, name)); \
+ try { \
+ PYBIND11_CONCAT(pybind11_init_, name)(m); \
+ return m.ptr(); \
+ } PYBIND11_CATCH_INIT_EXCEPTIONS \
+ } \
+ PYBIND11_EMBEDDED_MODULE_IMPL(name) \
+ ::pybind11::detail::embedded_module PYBIND11_CONCAT(pybind11_module_, name) \
+ (PYBIND11_TOSTRING(name), \
+ PYBIND11_CONCAT(pybind11_init_impl_, name)); \
+ void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &variable)
+
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks.
struct embedded_module {
}
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/** \rst
Initialize the Python interpreter. No other pybind11 or CPython API functions can be
Py_InitializeEx(init_signal_handlers ? 1 : 0);
// Make .py files in the working directory available by default
- module::import("sys").attr("path").cast<list>().append(".");
+ module_::import("sys").attr("path").cast<list>().append(".");
}
/** \rst
bool is_valid = true;
};
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "pybind11.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+inline void ensure_builtins_in_globals(object &global) {
+ #if PY_VERSION_HEX < 0x03080000
+ // Running exec and eval on Python 2 and 3 adds `builtins` module under
+ // `__builtins__` key to globals if not yet present.
+ // Python 3.8 made PyRun_String behave similarly. Let's also do that for
+ // older versions, for consistency.
+ if (!global.contains("__builtins__"))
+ global["__builtins__"] = module_::import(PYBIND11_BUILTINS_MODULE);
+ #else
+ (void) global;
+ #endif
+}
+
+PYBIND11_NAMESPACE_END(detail)
enum eval_mode {
/// Evaluate a string containing an isolated expression
if (!local)
local = global;
+ detail::ensure_builtins_in_globals(global);
+
/* PyRun_String does not accept a PyObject / encoding specifier,
this seems to be the only alternative */
std::string buffer = "# -*- coding: utf-8 -*-\n" + (std::string) expr;
template <eval_mode mode = eval_expr, size_t N>
object eval(const char (&s)[N], object global = globals(), object local = object()) {
/* Support raw string literals by removing common leading whitespace */
- auto expr = (s[0] == '\n') ? str(module::import("textwrap").attr("dedent")(s))
+ auto expr = (s[0] == '\n') ? str(module_::import("textwrap").attr("dedent")(s))
: str(s);
return eval<mode>(expr, global, local);
}
eval<eval_statements>(s, global, local);
}
+#if defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x03000000
+template <eval_mode mode = eval_statements>
+object eval_file(str, object, object) {
+ pybind11_fail("eval_file not supported in PyPy3. Use eval");
+}
+template <eval_mode mode = eval_statements>
+object eval_file(str, object) {
+ pybind11_fail("eval_file not supported in PyPy3. Use eval");
+}
+template <eval_mode mode = eval_statements>
+object eval_file(str) {
+ pybind11_fail("eval_file not supported in PyPy3. Use eval");
+}
+#else
template <eval_mode mode = eval_statements>
object eval_file(str fname, object global = globals(), object local = object()) {
if (!local)
local = global;
+ detail::ensure_builtins_in_globals(global);
+
int start;
switch (mode) {
case eval_expr: start = Py_eval_input; break;
throw error_already_set();
return reinterpret_steal<object>(result);
}
+#endif
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include "pybind11.h"
#include <functional>
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename Return, typename... Args>
struct type_caster<std::function<Return(Args...)>> {
struct func_handle {
function f;
func_handle(function&& f_) : f(std::move(f_)) {}
- func_handle(const func_handle&) = default;
+ func_handle(const func_handle& f_) {
+ gil_scoped_acquire acq;
+ f = f_.f;
+ }
~func_handle() {
gil_scoped_acquire acq;
function kill_f(std::move(f));
+ make_caster<retval_type>::name + _("]"));
};
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include <memory>
#include <iostream>
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Buffer that writes to Python instead of C++
class pythonbuf : public std::streambuf {
object pywrite;
object pyflush;
- int overflow(int c) {
+ int overflow(int c) override {
if (!traits_type::eq_int_type(c, traits_type::eof())) {
*pptr() = traits_type::to_char_type(c);
pbump(1);
return sync() == 0 ? traits_type::not_eof(c) : traits_type::eof();
}
- int sync() {
+ // This function must be non-virtual to be called in a destructor. If the
+ // rare MSVC test failure shows up with this version, then this should be
+ // simplified to a fully qualified call.
+ int _sync() {
if (pbase() != pptr()) {
- // This subtraction cannot be negative, so dropping the sign
- str line(pbase(), static_cast<size_t>(pptr() - pbase()));
{
gil_scoped_acquire tmp;
+
+ // This subtraction cannot be negative, so dropping the sign.
+ str line(pbase(), static_cast<size_t>(pptr() - pbase()));
+
pywrite(line);
pyflush();
+
+ // Placed inside gil_scoped_aquire as a mutex to avoid a race
+ setp(pbase(), epptr());
}
- setp(pbase(), epptr());
}
return 0;
}
+ int sync() override {
+ return _sync();
+ }
+
public:
pythonbuf(object pyostream, size_t buffer_size = 1024)
pythonbuf(pythonbuf&&) = default;
/// Sync before destroy
- ~pythonbuf() {
- sync();
+ ~pythonbuf() override {
+ _sync();
}
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/** \rst
{
py::scoped_ostream_redirect output{std::cerr, py::module::import("sys").attr("stderr")};
- std::cerr << "Hello, World!";
+ std::cout << "Hello, World!";
}
\endrst */
class scoped_ostream_redirect {
public:
scoped_ostream_redirect(
std::ostream &costream = std::cout,
- object pyostream = module::import("sys").attr("stdout"))
+ object pyostream = module_::import("sys").attr("stdout"))
: costream(costream), buffer(pyostream) {
old = costream.rdbuf(&buffer);
}
public:
scoped_estream_redirect(
std::ostream &costream = std::cerr,
- object pyostream = module::import("sys").attr("stderr"))
+ object pyostream = module_::import("sys").attr("stderr"))
: scoped_ostream_redirect(costream,pyostream) {}
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Class to redirect output as a context manager. C++ backend.
class OstreamRedirect {
}
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/** \rst
This is a helper function to add a C++ redirect context manager to Python
m.noisy_function_with_error_printing()
\endrst */
-inline class_<detail::OstreamRedirect> add_ostream_redirect(module m, std::string name = "ostream_redirect") {
+inline class_<detail::OstreamRedirect> add_ostream_redirect(module_ m, std::string name = "ostream_redirect") {
return class_<detail::OstreamRedirect>(m, name.c_str(), module_local())
.def(init<bool,bool>(), arg("stdout")=true, arg("stderr")=true)
.def("__enter__", &detail::OstreamRedirect::enter)
.def("__exit__", [](detail::OstreamRedirect &self_, args) { self_.exit(); });
}
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#include <sstream>
#include <string>
#include <functional>
+#include <type_traits>
#include <utility>
#include <vector>
#include <typeindex>
whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size
and dimension types (e.g. shape, strides, indexing), instead of inflicting this
upon the library user. */
-static_assert(sizeof(ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
+static_assert(sizeof(::pybind11::ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
+static_assert(std::is_signed<Py_intptr_t>::value, "Py_intptr_t must be signed");
+// We now can reinterpret_cast between py::ssize_t and Py_intptr_t (MSVC + PyPy cares)
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
class array; // Forward declaration
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <> struct handle_type_name<array> { static constexpr auto name = _("numpy.ndarray"); };
+
template <typename type, typename SFINAE = void> struct npy_format_descriptor;
struct PyArrayDescr_Proxy {
template <typename U> using as = bool_constant<sizeof(T) == sizeof(U)>;
};
+template <typename Concrete> constexpr int platform_lookup() { return -1; }
+
// Lookup a type according to its size, and return a value corresponding to the NumPy typenum.
-template <typename Concrete, typename... Check, typename... Int>
-constexpr int platform_lookup(Int... codes) {
- using code_index = std::integral_constant<int, constexpr_first<same_size<Concrete>::template as, Check...>()>;
- static_assert(code_index::value != sizeof...(Check), "Unable to match type on this platform");
- return std::get<code_index::value>(std::make_tuple(codes...));
+template <typename Concrete, typename T, typename... Ts, typename... Ints>
+constexpr int platform_lookup(int I, Ints... Is) {
+ return sizeof(Concrete) == sizeof(T) ? I : platform_lookup<Concrete, Ts...>(Is...);
}
struct npy_api {
unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();
PyObject *(*PyArray_DescrFromType_)(int);
PyObject *(*PyArray_NewFromDescr_)
- (PyTypeObject *, PyObject *, int, Py_intptr_t *,
- Py_intptr_t *, void *, int, PyObject *);
+ (PyTypeObject *, PyObject *, int, Py_intptr_t const *,
+ Py_intptr_t const *, void *, int, PyObject *);
+ // Unused. Not removed because that affects ABI of the class.
PyObject *(*PyArray_DescrNewFromType_)(int);
int (*PyArray_CopyInto_)(PyObject *, PyObject *);
PyObject *(*PyArray_NewCopy_)(PyObject *, int);
PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *);
int (*PyArray_DescrConverter_) (PyObject *, PyObject **);
bool (*PyArray_EquivTypes_) (PyObject *, PyObject *);
- int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, char, PyObject **, int *,
- Py_ssize_t *, PyObject **, PyObject *);
+ int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, unsigned char, PyObject **, int *,
+ Py_intptr_t *, PyObject **, PyObject *);
PyObject *(*PyArray_Squeeze_)(PyObject *);
+ // Unused. Not removed because that affects ABI of the class.
int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);
PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int);
private:
API_PyArray_CopyInto = 82,
API_PyArray_NewCopy = 85,
API_PyArray_NewFromDescr = 94,
- API_PyArray_DescrNewFromType = 9,
+ API_PyArray_DescrNewFromType = 96,
API_PyArray_DescrConverter = 174,
API_PyArray_EquivTypes = 182,
API_PyArray_GetArrayParamsFromObject = 278,
};
static npy_api lookup() {
- module m = module::import("numpy.core.multiarray");
+ module_ m = module_::import("numpy.core.multiarray");
auto c = m.attr("_ARRAY_API");
#if PY_MAJOR_VERSION >= 3
void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL);
template <typename T> struct is_complex<std::complex<T>> : std::true_type { };
template <typename T> struct array_info_scalar {
- typedef T type;
+ using type = T;
static constexpr bool is_array = false;
static constexpr bool is_empty = false;
static constexpr auto extents = _("");
satisfies_none_of<T, std::is_reference, std::is_array, is_std_array, std::is_arithmetic, is_complex, std::is_enum>
>;
+// Replacement for std::is_pod (deprecated in C++20)
+template <typename T> using is_pod = all_of<
+ std::is_standard_layout<T>,
+ std::is_trivial<T>
+>;
+
template <ssize_t Dim = 0, typename Strides> ssize_t byte_offset_unsafe(const Strides &) { return 0; }
template <ssize_t Dim = 0, typename Strides, typename... Ix>
ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {
using ConstBase::ConstBase;
using ConstBase::Dynamic;
public:
+ // Bring in const-qualified versions from base class
+ using ConstBase::operator();
+ using ConstBase::operator[];
+
/// Mutable, unchecked access to data at the given indices.
template <typename... Ix> T& operator()(Ix... index) {
static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
template <typename T, ssize_t Dim>
struct type_caster<unchecked_mutable_reference<T, Dim>> : type_caster<unchecked_reference<T, Dim>> {};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
class dtype : public object {
public:
private:
static object _dtype_from_pep3118() {
- static PyObject *obj = module::import("numpy.core._internal")
+ static PyObject *obj = module_::import("numpy.core._internal")
.attr("_dtype_from_pep3118").cast<object>().release().ptr();
return reinterpret_borrow<object>(obj);
}
forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_
};
- array() : array({{0}}, static_cast<const double *>(nullptr)) {}
+ array() : array(0, static_cast<const double *>(nullptr)) {}
using ShapeContainer = detail::any_container<ssize_t>;
using StridesContainer = detail::any_container<ssize_t>;
const void *ptr = nullptr, handle base = handle()) {
if (strides->empty())
- *strides = c_strides(*shape, dt.itemsize());
+ *strides = detail::c_strides(*shape, dt.itemsize());
auto ndim = shape->size();
if (ndim != strides->size())
auto &api = detail::npy_api::get();
auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(
- api.PyArray_Type_, descr.release().ptr(), (int) ndim, shape->data(), strides->data(),
+ api.PyArray_Type_, descr.release().ptr(), (int) ndim,
+ // Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
+ reinterpret_cast<Py_intptr_t*>(shape->data()),
+ reinterpret_cast<Py_intptr_t*>(strides->data()),
const_cast<void *>(ptr), flags, nullptr));
if (!tmp)
throw error_already_set();
template <typename T>
explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { }
- explicit array(const buffer_info &info)
- : array(pybind11::dtype(info), info.shape, info.strides, info.ptr) { }
+ explicit array(const buffer_info &info, handle base = handle())
+ : array(pybind11::dtype(info), info.shape, info.strides, info.ptr, base) { }
/// Array descriptor (dtype)
pybind11::dtype dtype() const {
/// then resize will succeed only if it makes a reshape, i.e. original size doesn't change
void resize(ShapeContainer new_shape, bool refcheck = true) {
detail::npy_api::PyArray_Dims d = {
- new_shape->data(), int(new_shape->size())
+ // Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
+ reinterpret_cast<Py_intptr_t*>(new_shape->data()),
+ int(new_shape->size())
};
// try to resize, set ordering param to -1 cause it's not used anyway
- object new_array = reinterpret_steal<object>(
+ auto new_array = reinterpret_steal<object>(
detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1)
);
if (!new_array) throw error_already_set();
throw std::domain_error("array is not writeable");
}
- // Default, C-style strides
- static std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
- auto ndim = shape.size();
- std::vector<ssize_t> strides(ndim, itemsize);
- if (ndim > 0)
- for (size_t i = ndim - 1; i > 0; --i)
- strides[i - 1] = strides[i] * shape[i];
- return strides;
- }
-
- // F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
- static std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
- auto ndim = shape.size();
- std::vector<ssize_t> strides(ndim, itemsize);
- for (size_t i = 1; i < ndim; ++i)
- strides[i] = strides[i - 1] * shape[i - 1];
- return strides;
- }
-
template<typename... Ix> void check_dimensions(Ix... index) const {
check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);
}
if (!m_ptr) throw error_already_set();
}
- explicit array_t(const buffer_info& info) : array(info) { }
+ explicit array_t(const buffer_info& info, handle base = handle()) : array(info, base) { }
array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle())
: array(std::move(shape), std::move(strides), ptr, base) { }
explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())
: array_t(private_ctor{}, std::move(shape),
- ExtraFlags & f_style ? f_strides(*shape, itemsize()) : c_strides(*shape, itemsize()),
+ ExtraFlags & f_style
+ ? detail::f_strides(*shape, itemsize())
+ : detail::c_strides(*shape, itemsize()),
ptr, base) { }
- explicit array_t(size_t count, const T *ptr = nullptr, handle base = handle())
+ explicit array_t(ssize_t count, const T *ptr = nullptr, handle base = handle())
: array({count}, {}, ptr, base) { }
constexpr ssize_t itemsize() const {
static bool check_(handle h) {
const auto &api = detail::npy_api::get();
return api.PyArray_Check_(h.ptr())
- && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of<T>().ptr());
+ && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of<T>().ptr())
+ && detail::check_flags(h.ptr(), ExtraFlags & (array::c_style | array::f_style));
}
protected:
}
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, int ExtraFlags>
struct pyobject_caster<array_t<T, ExtraFlags>> {
using type = array_t<T, ExtraFlags>;
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {
static constexpr auto name = _<std::is_same<T, bool>::value>(
- _("bool"), _<std::is_signed<T>::value>("int", "uint") + _<sizeof(T)*8>()
+ _("bool"), _<std::is_signed<T>::value>("numpy.int", "numpy.uint") + _<sizeof(T)*8>()
);
};
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {
static constexpr auto name = _<std::is_same<T, float>::value || std::is_same<T, double>::value>(
- _("float") + _<sizeof(T)*8>(), _("longdouble")
+ _("numpy.float") + _<sizeof(T)*8>(), _("numpy.longdouble")
);
};
struct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {
static constexpr auto name = _<std::is_same<typename T::value_type, float>::value
|| std::is_same<typename T::value_type, double>::value>(
- _("complex") + _<sizeof(typename T::value_type)*16>(), _("longcomplex")
+ _("numpy.complex") + _<sizeof(typename T::value_type)*16>(), _("numpy.longcomplex")
);
};
#define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT
#define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0)
#define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next)
-#ifdef _MSC_VER // MSVC is not as eager to expand macros, hence this workaround
+#if defined(_MSC_VER) && !defined(__clang__) // MSVC is not as eager to expand macros, hence this workaround
#define PYBIND11_MAP_LIST_NEXT1(test, next) \
PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
#else
(::std::vector<::pybind11::detail::field_descriptor> \
{PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(__clang__)
#define PYBIND11_MAP2_LIST_NEXT1(test, next) \
PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
#else
#endif // __CLION_IDE__
-template <class T>
-using array_iterator = typename std::add_pointer<T>::type;
-
-template <class T>
-array_iterator<T> array_begin(const buffer_info& buffer) {
- return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr));
-}
-
-template <class T>
-array_iterator<T> array_end(const buffer_info& buffer) {
- return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr) + buffer.size);
-}
-
class common_iterator {
public:
using container_type = std::vector<ssize_t>;
m_strides.back() = static_cast<value_type>(strides.back());
for (size_type i = m_strides.size() - 1; i != 0; --i) {
size_type j = i - 1;
- value_type s = static_cast<value_type>(shape[i]);
+ auto s = static_cast<value_type>(shape[i]);
m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;
}
}
using call_type = remove_reference_t<T>;
// Is this a vectorized argument?
static constexpr bool vectorize =
- satisfies_any_of<call_type, std::is_arithmetic, is_complex, std::is_pod>::value &&
+ satisfies_any_of<call_type, std::is_arithmetic, is_complex, is_pod>::value &&
satisfies_none_of<call_type, std::is_pointer, std::is_array, is_std_array, std::is_enum>::value &&
(!std::is_reference<T>::value ||
(std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));
using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;
};
+
+// py::vectorize when a return type is present
+template <typename Func, typename Return, typename... Args>
+struct vectorize_returned_array {
+ using Type = array_t<Return>;
+
+ static Type create(broadcast_trivial trivial, const std::vector<ssize_t> &shape) {
+ if (trivial == broadcast_trivial::f_trivial)
+ return array_t<Return, array::f_style>(shape);
+ else
+ return array_t<Return>(shape);
+ }
+
+ static Return *mutable_data(Type &array) {
+ return array.mutable_data();
+ }
+
+ static Return call(Func &f, Args &... args) {
+ return f(args...);
+ }
+
+ static void call(Return *out, size_t i, Func &f, Args &... args) {
+ out[i] = f(args...);
+ }
+};
+
+// py::vectorize when a return type is not present
+template <typename Func, typename... Args>
+struct vectorize_returned_array<Func, void, Args...> {
+ using Type = none;
+
+ static Type create(broadcast_trivial, const std::vector<ssize_t> &) {
+ return none();
+ }
+
+ static void *mutable_data(Type &) {
+ return nullptr;
+ }
+
+ static detail::void_type call(Func &f, Args &... args) {
+ f(args...);
+ return {};
+ }
+
+ static void call(void *, size_t, Func &f, Args &... args) {
+ f(args...);
+ }
+};
+
+
template <typename Func, typename Return, typename... Args>
struct vectorize_helper {
+
+// NVCC for some reason breaks if NVectorized is private
+#ifdef __CUDACC__
+public:
+#else
private:
+#endif
+
static constexpr size_t N = sizeof...(Args);
static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);
static_assert(NVectorized >= 1,
using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;
template <size_t Index> using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;
+ using returned_array = vectorize_returned_array<Func, Return, Args...>;
+
// Runs a vectorized function given arguments tuple and three index sequences:
// - Index is the full set of 0 ... (N-1) argument indices;
// - VIndex is the subset of argument indices with vectorized parameters, letting us access
ssize_t nd = 0;
std::vector<ssize_t> shape(0);
auto trivial = broadcast(buffers, nd, shape);
- size_t ndim = (size_t) nd;
+ auto ndim = (size_t) nd;
size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());
// not wrapped in an array).
if (size == 1 && ndim == 0) {
PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);
- return cast(f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...));
+ return cast(returned_array::call(f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...));
}
- array_t<Return> result;
- if (trivial == broadcast_trivial::f_trivial) result = array_t<Return, array::f_style>(shape);
- else result = array_t<Return>(shape);
+ auto result = returned_array::create(trivial, shape);
if (size == 0) return std::move(result);
/* Call the function */
+ auto mutable_data = returned_array::mutable_data(result);
if (trivial == broadcast_trivial::non_trivial)
- apply_broadcast(buffers, params, result, i_seq, vi_seq, bi_seq);
+ apply_broadcast(buffers, params, mutable_data, size, shape, i_seq, vi_seq, bi_seq);
else
- apply_trivial(buffers, params, result.mutable_data(), size, i_seq, vi_seq, bi_seq);
+ apply_trivial(buffers, params, mutable_data, size, i_seq, vi_seq, bi_seq);
return std::move(result);
}
}};
for (size_t i = 0; i < size; ++i) {
- out[i] = f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...);
+ returned_array::call(out, i, f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...);
for (auto &x : vecparams) x.first += x.second;
}
}
template <size_t... Index, size_t... VIndex, size_t... BIndex>
void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,
std::array<void *, N> ¶ms,
- array_t<Return> &output_array,
+ Return *out,
+ size_t size,
+ const std::vector<ssize_t> &output_shape,
index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
- buffer_info output = output_array.request();
- multi_array_iterator<NVectorized> input_iter(buffers, output.shape);
+ multi_array_iterator<NVectorized> input_iter(buffers, output_shape);
- for (array_iterator<Return> iter = array_begin<Return>(output), end = array_end<Return>(output);
- iter != end;
- ++iter, ++input_iter) {
+ for (size_t i = 0; i < size; ++i, ++input_iter) {
PYBIND11_EXPAND_SIDE_EFFECTS((
params[VIndex] = input_iter.template data<BIndex>()
));
- *iter = f(*reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
+ returned_array::call(out, i, f, *reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
}
}
};
static constexpr auto name = _("numpy.ndarray[") + npy_format_descriptor<T>::name + _("]");
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
// Vanilla pointer vectorizer:
template <typename Return, typename... Args>
return Helper(std::mem_fn(f));
}
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#if defined(_MSC_VER)
#pragma warning(pop)
# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
#endif
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// Enumeration with all supported operator types
enum op_id : int {
PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r)
PYBIND11_UNARY_OPERATOR(neg, operator-, -l)
PYBIND11_UNARY_OPERATOR(pos, operator+, +l)
+// WARNING: This usage of `abs` should only be done for existing STL overloads.
+// Adding overloads directly in to the `std::` namespace is advised against:
+// https://en.cppreference.com/w/cpp/language/extending_std
PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l))
PYBIND11_UNARY_OPERATOR(hash, hash, std::hash<L>()(l))
PYBIND11_UNARY_OPERATOR(invert, operator~, (~l))
#undef PYBIND11_BINARY_OPERATOR
#undef PYBIND11_INPLACE_OPERATOR
#undef PYBIND11_UNARY_OPERATOR
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
using detail::self;
+// Add named operators so that they are accessible via `py::`.
+using detail::hash;
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#if defined(_MSC_VER)
# pragma warning(pop)
#include "detail/common.h"
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
class options {
public:
state previous_state;
};
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
*/
#pragma once
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunused-value"
-#pragma clang diagnostic warning "-Wrange-loop-analysis"
-#endif
#if defined(__INTEL_COMPILER)
# pragma warning push
# pragma warning(disable: 4996) // warning C4996: The POSIX name for this item is deprecated. Instead, use the ISO C and C++ conformant name
# pragma warning(disable: 4702) // warning C4702: unreachable code
# pragma warning(disable: 4522) // warning C4522: multiple assignment operators specified
+# pragma warning(disable: 4505) // warning C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only)
#elif defined(__GNUG__) && !defined(__clang__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
#include "detail/class.h"
#include "detail/init.h"
+#include <memory>
+#include <vector>
+#include <string>
+#include <utility>
+
#if defined(__GNUG__) && !defined(__clang__)
# include <cxxabi.h>
#endif
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object
class cpp_function : public function {
public:
- cpp_function() { }
+ cpp_function() = default;
cpp_function(std::nullptr_t) { }
/// Construct a cpp_function from a vanilla function pointer
(detail::function_signature_t<Func> *) nullptr, extra...);
}
- /// Construct a cpp_function from a class method (non-const)
+ /// Construct a cpp_function from a class method (non-const, no ref-qualifier)
template <typename Return, typename Class, typename... Arg, typename... Extra>
cpp_function(Return (Class::*f)(Arg...), const Extra&... extra) {
+ initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
+ (Return (*) (Class *, Arg...)) nullptr, extra...);
+ }
+
+ /// Construct a cpp_function from a class method (non-const, lvalue ref-qualifier)
+ /// A copy of the overload for non-const functions without explicit ref-qualifier
+ /// but with an added `&`.
+ template <typename Return, typename Class, typename... Arg, typename... Extra>
+ cpp_function(Return (Class::*f)(Arg...)&, const Extra&... extra) {
initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(args...); },
(Return (*) (Class *, Arg...)) nullptr, extra...);
}
- /// Construct a cpp_function from a class method (const)
+ /// Construct a cpp_function from a class method (const, no ref-qualifier)
template <typename Return, typename Class, typename... Arg, typename... Extra>
cpp_function(Return (Class::*f)(Arg...) const, const Extra&... extra) {
+ initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
+ (Return (*)(const Class *, Arg ...)) nullptr, extra...);
+ }
+
+ /// Construct a cpp_function from a class method (const, lvalue ref-qualifier)
+ /// A copy of the overload for const functions without explicit ref-qualifier
+ /// but with an added `&`.
+ template <typename Return, typename Class, typename... Arg, typename... Extra>
+ cpp_function(Return (Class::*f)(Arg...) const&, const Extra&... extra) {
initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(args...); },
(Return (*)(const Class *, Arg ...)) nullptr, extra...);
}
object name() const { return attr("__name__"); }
protected:
+ struct InitializingFunctionRecordDeleter {
+ // `destruct(function_record, false)`: `initialize_generic` copies strings and
+ // takes care of cleaning up in case of exceptions. So pass `false` to `free_strings`.
+ void operator()(detail::function_record * rec) { destruct(rec, false); }
+ };
+ using unique_function_record = std::unique_ptr<detail::function_record, InitializingFunctionRecordDeleter>;
+
/// Space optimization: don't inline this frequently instantiated fragment
- PYBIND11_NOINLINE detail::function_record *make_function_record() {
- return new detail::function_record();
+ PYBIND11_NOINLINE unique_function_record make_function_record() {
+ return unique_function_record(new detail::function_record());
}
/// Special internal constructor for functors, lambda functions, etc.
struct capture { remove_reference_t<Func> f; };
/* Store the function including any extra state it might have (e.g. a lambda capture object) */
- auto rec = make_function_record();
+ // The unique_ptr makes sure nothing is leaked in case of an exception.
+ auto unique_rec = make_function_record();
+ auto rec = unique_rec.get();
/* Store the capture object directly in the function record if there is enough space */
if (sizeof(capture) <= sizeof(rec->data)) {
/* Get a pointer to the capture object */
auto data = (sizeof(capture) <= sizeof(call.func.data)
? &call.func.data : call.func.data[0]);
- capture *cap = const_cast<capture *>(reinterpret_cast<const capture *>(data));
+ auto *cap = const_cast<capture *>(reinterpret_cast<const capture *>(data));
/* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */
return_value_policy policy = return_value_policy_override<Return>::policy(call.func.policy);
/* Process any user-provided function attributes */
process_attributes<Extra...>::init(extra..., rec);
+ {
+ constexpr bool has_kw_only_args = any_of<std::is_same<kw_only, Extra>...>::value,
+ has_pos_only_args = any_of<std::is_same<pos_only, Extra>...>::value,
+ has_args = any_of<std::is_same<args, Args>...>::value,
+ has_arg_annotations = any_of<is_keyword<Extra>...>::value;
+ static_assert(has_arg_annotations || !has_kw_only_args, "py::kw_only requires the use of argument annotations");
+ static_assert(has_arg_annotations || !has_pos_only_args, "py::pos_only requires the use of argument annotations (for docstrings and aligning the annotations to the argument)");
+ static_assert(!(has_args && has_kw_only_args), "py::kw_only cannot be combined with a py::args argument");
+ }
+
/* Generate a readable signature describing the function's arguments and return value types */
static constexpr auto signature = _("(") + cast_in::arg_names + _(") -> ") + cast_out::name;
PYBIND11_DESCR_CONSTEXPR auto types = decltype(signature)::types();
/* Register the function with Python from generic (non-templated) code */
- initialize_generic(rec, signature.text, types.data(), sizeof...(Args));
+ // Pass on the ownership over the `unique_rec` to `initialize_generic`. `rec` stays valid.
+ initialize_generic(std::move(unique_rec), signature.text, types.data(), sizeof...(Args));
if (cast_in::has_args) rec->has_args = true;
if (cast_in::has_kwargs) rec->has_kwargs = true;
}
}
+ // Utility class that keeps track of all duplicated strings, and cleans them up in its destructor,
+ // unless they are released. Basically a RAII-solution to deal with exceptions along the way.
+ class strdup_guard {
+ public:
+ ~strdup_guard() {
+ for (auto s : strings)
+ std::free(s);
+ }
+ char *operator()(const char *s) {
+ auto t = strdup(s);
+ strings.push_back(t);
+ return t;
+ }
+ void release() {
+ strings.clear();
+ }
+ private:
+ std::vector<char *> strings;
+ };
+
/// Register a function call with Python (generic non-templated code goes here)
- void initialize_generic(detail::function_record *rec, const char *text,
+ void initialize_generic(unique_function_record &&unique_rec, const char *text,
const std::type_info *const *types, size_t args) {
+ // Do NOT receive `unique_rec` by value. If this function fails to move out the unique_ptr,
+ // we do not want this to destuct the pointer. `initialize` (the caller) still relies on the
+ // pointee being alive after this call. Only move out if a `capsule` is going to keep it alive.
+ auto rec = unique_rec.get();
+
+ // Keep track of strdup'ed strings, and clean them up as long as the function's capsule
+ // has not taken ownership yet (when `unique_rec.release()` is called).
+ // Note: This cannot easily be fixed by a `unique_ptr` with custom deleter, because the strings
+ // are only referenced before strdup'ing. So only *after* the following block could `destruct`
+ // safely be called, but even then, `repr` could still throw in the middle of copying all strings.
+ strdup_guard guarded_strdup;
/* Create copies of all referenced C-style strings */
- rec->name = strdup(rec->name ? rec->name : "");
- if (rec->doc) rec->doc = strdup(rec->doc);
+ rec->name = guarded_strdup(rec->name ? rec->name : "");
+ if (rec->doc) rec->doc = guarded_strdup(rec->doc);
for (auto &a: rec->args) {
if (a.name)
- a.name = strdup(a.name);
+ a.name = guarded_strdup(a.name);
if (a.descr)
- a.descr = strdup(a.descr);
+ a.descr = guarded_strdup(a.descr);
else if (a.value)
- a.descr = strdup(a.value.attr("__repr__")().cast<std::string>().c_str());
+ a.descr = guarded_strdup(repr(a.value).cast<std::string>().c_str());
}
rec->is_constructor = !strcmp(rec->name, "__init__") || !strcmp(rec->name, "__setstate__");
#if !defined(NDEBUG) && !defined(PYBIND11_DISABLE_NEW_STYLE_INIT_WARNING)
if (rec->is_constructor && !rec->is_new_style_constructor) {
- const auto class_name = std::string(((PyTypeObject *) rec->scope.ptr())->tp_name);
+ const auto class_name = detail::get_fully_qualified_tp_name((PyTypeObject *) rec->scope.ptr());
const auto func_name = std::string(rec->name);
PyErr_WarnEx(
PyExc_FutureWarning,
// Write arg name for everything except *args and **kwargs.
if (*(pc + 1) == '*')
continue;
-
+ // Separator for keyword-only arguments, placed before the kw
+ // arguments start
+ if (rec->nargs_kw_only > 0 && arg_index + rec->nargs_kw_only == args)
+ signature += "*, ";
if (arg_index < rec->args.size() && rec->args[arg_index].name) {
signature += rec->args[arg_index].name;
} else if (arg_index == 0 && rec->is_method) {
signature += " = ";
signature += rec->args[arg_index].descr;
}
+ // Separator for positional-only arguments (placed after the
+ // argument, rather than before like *
+ if (rec->nargs_pos_only > 0 && (arg_index + 1) == rec->nargs_pos_only)
+ signature += ", /";
arg_index++;
} else if (c == '%') {
const std::type_info *t = types[type_index++];
signature += c;
}
}
+
if (arg_index != args || types[type_index] != nullptr)
pybind11_fail("Internal error while parsing type signature (2)");
#if PY_MAJOR_VERSION < 3
if (strcmp(rec->name, "__next__") == 0) {
std::free(rec->name);
- rec->name = strdup("next");
+ rec->name = guarded_strdup("next");
} else if (strcmp(rec->name, "__bool__") == 0) {
std::free(rec->name);
- rec->name = strdup("__nonzero__");
+ rec->name = guarded_strdup("__nonzero__");
}
#endif
- rec->signature = strdup(signature.c_str());
+ rec->signature = guarded_strdup(signature.c_str());
rec->args.shrink_to_fit();
rec->nargs = (std::uint16_t) args;
rec->def->ml_meth = reinterpret_cast<PyCFunction>(reinterpret_cast<void (*) (void)>(*dispatcher));
rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS;
- capsule rec_capsule(rec, [](void *ptr) {
+ capsule rec_capsule(unique_rec.release(), [](void *ptr) {
destruct((detail::function_record *) ptr);
});
+ guarded_strdup.release();
object scope_module;
if (rec->scope) {
if (!m_ptr)
pybind11_fail("cpp_function::cpp_function(): Could not allocate function object");
} else {
- /* Append at the end of the overload chain */
+ /* Append at the beginning or end of the overload chain */
m_ptr = rec->sibling.ptr();
inc_ref();
- chain_start = chain;
if (chain->is_method != rec->is_method)
pybind11_fail("overloading a method with both static and instance methods is not supported; "
#if defined(NDEBUG)
std::string(pybind11::str(rec->scope.attr("__name__"))) + "." + std::string(rec->name) + signature
#endif
);
- while (chain->next)
- chain = chain->next;
- chain->next = rec;
+
+ if (rec->prepend) {
+ // Beginning of chain; we need to replace the capsule's current head-of-the-chain
+ // pointer with this one, then make this one point to the previous head of the
+ // chain.
+ chain_start = rec;
+ rec->next = chain;
+ auto rec_capsule = reinterpret_borrow<capsule>(((PyCFunctionObject *) m_ptr)->m_self);
+ rec_capsule.set_pointer(unique_rec.release());
+ guarded_strdup.release();
+ } else {
+ // Or end of chain (normal behavior)
+ chain_start = chain;
+ while (chain->next)
+ chain = chain->next;
+ chain->next = unique_rec.release();
+ guarded_strdup.release();
+ }
}
std::string signatures;
}
/* Install docstring */
- PyCFunctionObject *func = (PyCFunctionObject *) m_ptr;
- if (func->m_ml->ml_doc)
- std::free(const_cast<char *>(func->m_ml->ml_doc));
- func->m_ml->ml_doc = strdup(signatures.c_str());
+ auto *func = (PyCFunctionObject *) m_ptr;
+ std::free(const_cast<char *>(func->m_ml->ml_doc));
+ // Install docstring if it's non-empty (when at least one option is enabled)
+ func->m_ml->ml_doc = signatures.empty() ? nullptr : strdup(signatures.c_str());
if (rec->is_method) {
m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr());
}
/// When a cpp_function is GCed, release any memory allocated by pybind11
- static void destruct(detail::function_record *rec) {
+ static void destruct(detail::function_record *rec, bool free_strings = true) {
+ // If on Python 3.9, check the interpreter "MICRO" (patch) version.
+ // If this is running on 3.9.0, we have to work around a bug.
+ #if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
+ static bool is_zero = Py_GetVersion()[4] == '0';
+ #endif
+
while (rec) {
detail::function_record *next = rec->next;
if (rec->free_data)
rec->free_data(rec);
- std::free((char *) rec->name);
- std::free((char *) rec->doc);
- std::free((char *) rec->signature);
- for (auto &arg: rec->args) {
- std::free(const_cast<char *>(arg.name));
- std::free(const_cast<char *>(arg.descr));
- arg.value.dec_ref();
+ // During initialization, these strings might not have been copied yet,
+ // so they cannot be freed. Once the function has been created, they can.
+ // Check `make_function_record` for more details.
+ if (free_strings) {
+ std::free((char *) rec->name);
+ std::free((char *) rec->doc);
+ std::free((char *) rec->signature);
+ for (auto &arg: rec->args) {
+ std::free(const_cast<char *>(arg.name));
+ std::free(const_cast<char *>(arg.descr));
+ }
}
+ for (auto &arg: rec->args)
+ arg.value.dec_ref();
if (rec->def) {
std::free(const_cast<char *>(rec->def->ml_doc));
- delete rec->def;
+ // Python 3.9.0 decref's these in the wrong order; rec->def
+ // If loaded on 3.9.0, let these leak (use Python 3.9.1 at runtime to fix)
+ // See https://github.com/python/cpython/pull/22670
+ #if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
+ if (!is_zero)
+ delete rec->def;
+ #else
+ delete rec->def;
+ #endif
}
delete rec;
rec = next;
*it = overloads;
/* Need to know how many arguments + keyword arguments there are to pick the right overload */
- const size_t n_args_in = (size_t) PyTuple_GET_SIZE(args_in);
+ const auto n_args_in = (size_t) PyTuple_GET_SIZE(args_in);
handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr,
result = PYBIND11_TRY_NEXT_OVERLOAD;
auto self_value_and_holder = value_and_holder();
if (overloads->is_constructor) {
- const auto tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr());
- const auto pi = reinterpret_cast<instance *>(parent.ptr());
- self_value_and_holder = pi->get_value_and_holder(tinfo, false);
-
- if (!self_value_and_holder.type || !self_value_and_holder.inst) {
+ if (!PyObject_TypeCheck(parent.ptr(), (PyTypeObject *) overloads->scope.ptr())) {
PyErr_SetString(PyExc_TypeError, "__init__(self, ...) called with invalid `self` argument");
return nullptr;
}
+ const auto tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr());
+ const auto pi = reinterpret_cast<instance *>(parent.ptr());
+ self_value_and_holder = pi->get_value_and_holder(tinfo, true);
+
// If this value is already registered it must mean __init__ is invoked multiple times;
// we really can't support that in C++, so just ignore the second __init__.
if (self_value_and_holder.instance_registered())
*/
const function_record &func = *it;
- size_t pos_args = func.nargs; // Number of positional arguments that we need
- if (func.has_args) --pos_args; // (but don't count py::args
- if (func.has_kwargs) --pos_args; // or py::kwargs)
+ size_t num_args = func.nargs; // Number of positional arguments that we need
+ if (func.has_args) --num_args; // (but don't count py::args
+ if (func.has_kwargs) --num_args; // or py::kwargs)
+ size_t pos_args = num_args - func.nargs_kw_only;
if (!func.has_args && n_args_in > pos_args)
- continue; // Too many arguments for this overload
+ continue; // Too many positional arguments for this overload
if (n_args_in < pos_args && func.args.size() < pos_args)
- continue; // Not enough arguments given, and not enough defaults to fill in the blanks
+ continue; // Not enough positional arguments given, and not enough defaults to fill in the blanks
function_call call(func, parent);
self_value_and_holder.type->dealloc(self_value_and_holder);
call.init_self = PyTuple_GET_ITEM(args_in, 0);
- call.args.push_back(reinterpret_cast<PyObject *>(&self_value_and_holder));
+ call.args.emplace_back(reinterpret_cast<PyObject *>(&self_value_and_holder));
call.args_convert.push_back(false);
++args_copied;
}
// We'll need to copy this if we steal some kwargs for defaults
dict kwargs = reinterpret_borrow<dict>(kwargs_in);
+ // 1.5. Fill in any missing pos_only args from defaults if they exist
+ if (args_copied < func.nargs_pos_only) {
+ for (; args_copied < func.nargs_pos_only; ++args_copied) {
+ const auto &arg_rec = func.args[args_copied];
+ handle value;
+
+ if (arg_rec.value) {
+ value = arg_rec.value;
+ }
+ if (value) {
+ call.args.push_back(value);
+ call.args_convert.push_back(arg_rec.convert);
+ } else
+ break;
+ }
+
+ if (args_copied < func.nargs_pos_only)
+ continue; // Not enough defaults to fill the positional arguments
+ }
+
// 2. Check kwargs and, failing that, defaults that may help complete the list
- if (args_copied < pos_args) {
+ if (args_copied < num_args) {
bool copied_kwargs = false;
- for (; args_copied < pos_args; ++args_copied) {
- const auto &arg = func.args[args_copied];
+ for (; args_copied < num_args; ++args_copied) {
+ const auto &arg_rec = func.args[args_copied];
handle value;
- if (kwargs_in && arg.name)
- value = PyDict_GetItemString(kwargs.ptr(), arg.name);
+ if (kwargs_in && arg_rec.name)
+ value = PyDict_GetItemString(kwargs.ptr(), arg_rec.name);
if (value) {
// Consume a kwargs value
kwargs = reinterpret_steal<dict>(PyDict_Copy(kwargs.ptr()));
copied_kwargs = true;
}
- PyDict_DelItemString(kwargs.ptr(), arg.name);
- } else if (arg.value) {
- value = arg.value;
+ PyDict_DelItemString(kwargs.ptr(), arg_rec.name);
+ } else if (arg_rec.value) {
+ value = arg_rec.value;
+ }
+
+ if (!arg_rec.none && value.is_none()) {
+ break;
}
if (value) {
call.args.push_back(value);
- call.args_convert.push_back(arg.convert);
+ call.args_convert.push_back(arg_rec.convert);
}
else
break;
}
- if (args_copied < pos_args)
+ if (args_copied < num_args)
continue; // Not enough arguments, defaults, or kwargs to fill the positional arguments
}
// 3. Check everything was consumed (unless we have a kwargs arg)
- if (kwargs && kwargs.size() > 0 && !func.has_kwargs)
+ if (kwargs && !kwargs.empty() && !func.has_kwargs)
continue; // Unconsumed kwargs, but no py::kwargs argument to accept them
// 4a. If we have a py::args argument, create a new tuple with leftovers
} catch (error_already_set &e) {
e.restore();
return nullptr;
-#if defined(__GNUG__) && !defined(__clang__)
+#ifdef __GLIBCXX__
} catch ( abi::__forced_unwind& ) {
throw;
#endif
for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) {
if (!some_args) some_args = true;
else msg += ", ";
- msg += pybind11::repr(args_[ti]);
+ try {
+ msg += pybind11::repr(args_[ti]);
+ } catch (const error_already_set&) {
+ msg += "<repr raised Error>";
+ }
}
if (kwargs_in) {
auto kwargs = reinterpret_borrow<dict>(kwargs_in);
- if (kwargs.size() > 0) {
+ if (!kwargs.empty()) {
if (some_args) msg += "; ";
msg += "kwargs: ";
bool first = true;
for (auto kwarg : kwargs) {
if (first) first = false;
else msg += ", ";
- msg += pybind11::str("{}={!r}").format(kwarg.first, kwarg.second);
+ msg += pybind11::str("{}=").format(kwarg.first);
+ try {
+ msg += pybind11::repr(kwarg.second);
+ } catch (const error_already_set&) {
+ msg += "<repr raised Error>";
+ }
}
}
}
};
/// Wrapper for Python extension modules
-class module : public object {
+class module_ : public object {
public:
- PYBIND11_OBJECT_DEFAULT(module, object, PyModule_Check)
+ PYBIND11_OBJECT_DEFAULT(module_, object, PyModule_Check)
/// Create a new top-level Python module with the given name and docstring
- explicit module(const char *name, const char *doc = nullptr) {
- if (!options::show_user_defined_docstrings()) doc = nullptr;
+ PYBIND11_DEPRECATED("Use PYBIND11_MODULE or module_::create_extension_module instead")
+ explicit module_(const char *name, const char *doc = nullptr) {
#if PY_MAJOR_VERSION >= 3
- PyModuleDef *def = new PyModuleDef();
- std::memset(def, 0, sizeof(PyModuleDef));
- def->m_name = name;
- def->m_doc = doc;
- def->m_size = -1;
- Py_INCREF(def);
- m_ptr = PyModule_Create(def);
+ *this = create_extension_module(name, doc, new PyModuleDef());
#else
- m_ptr = Py_InitModule3(name, nullptr, doc);
+ *this = create_extension_module(name, doc, nullptr);
#endif
- if (m_ptr == nullptr)
- pybind11_fail("Internal error in module::module()");
- inc_ref();
}
/** \rst
details on the ``Extra&& ... extra`` argument, see section :ref:`extras`.
\endrst */
template <typename Func, typename... Extra>
- module &def(const char *name_, Func &&f, const Extra& ... extra) {
+ module_ &def(const char *name_, Func &&f, const Extra& ... extra) {
cpp_function func(std::forward<Func>(f), name(name_), scope(*this),
sibling(getattr(*this, name_, none())), extra...);
// NB: allow overwriting here because cpp_function sets up a chain with the intention of
.. code-block:: cpp
- py::module m("example", "pybind11 example plugin");
- py::module m2 = m.def_submodule("sub", "A submodule of 'example'");
- py::module m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'");
+ py::module_ m("example", "pybind11 example plugin");
+ py::module_ m2 = m.def_submodule("sub", "A submodule of 'example'");
+ py::module_ m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'");
\endrst */
- module def_submodule(const char *name, const char *doc = nullptr) {
+ module_ def_submodule(const char *name, const char *doc = nullptr) {
std::string full_name = std::string(PyModule_GetName(m_ptr))
+ std::string(".") + std::string(name);
- auto result = reinterpret_borrow<module>(PyImport_AddModule(full_name.c_str()));
+ auto result = reinterpret_borrow<module_>(PyImport_AddModule(full_name.c_str()));
if (doc && options::show_user_defined_docstrings())
result.attr("__doc__") = pybind11::str(doc);
attr(name) = result;
}
/// Import and return a module or throws `error_already_set`.
- static module import(const char *name) {
+ static module_ import(const char *name) {
PyObject *obj = PyImport_ImportModule(name);
if (!obj)
throw error_already_set();
- return reinterpret_steal<module>(obj);
+ return reinterpret_steal<module_>(obj);
}
/// Reload the module or throws `error_already_set`.
PyObject *obj = PyImport_ReloadModule(ptr());
if (!obj)
throw error_already_set();
- *this = reinterpret_steal<module>(obj);
+ *this = reinterpret_steal<module_>(obj);
}
- // Adds an object to the module using the given name. Throws if an object with the given name
- // already exists.
- //
- // overwrite should almost always be false: attempting to overwrite objects that pybind11 has
- // established will, in most cases, break things.
+ /** \rst
+ Adds an object to the module using the given name. Throws if an object with the given name
+ already exists.
+
+ ``overwrite`` should almost always be false: attempting to overwrite objects that pybind11 has
+ established will, in most cases, break things.
+ \endrst */
PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) {
if (!overwrite && hasattr(*this, name))
pybind11_fail("Error during initialization: multiple incompatible definitions with name \"" +
PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */);
}
+
+#if PY_MAJOR_VERSION >= 3
+ using module_def = PyModuleDef;
+#else
+ struct module_def {};
+#endif
+
+ /** \rst
+ Create a new top-level module that can be used as the main module of a C extension.
+
+ For Python 3, ``def`` should point to a statically allocated module_def.
+ For Python 2, ``def`` can be a nullptr and is completely ignored.
+ \endrst */
+ static module_ create_extension_module(const char *name, const char *doc, module_def *def) {
+#if PY_MAJOR_VERSION >= 3
+ // module_def is PyModuleDef
+ def = new (def) PyModuleDef { // Placement new (not an allocation).
+ /* m_base */ PyModuleDef_HEAD_INIT,
+ /* m_name */ name,
+ /* m_doc */ options::show_user_defined_docstrings() ? doc : nullptr,
+ /* m_size */ -1,
+ /* m_methods */ nullptr,
+ /* m_slots */ nullptr,
+ /* m_traverse */ nullptr,
+ /* m_clear */ nullptr,
+ /* m_free */ nullptr
+ };
+ auto m = PyModule_Create(def);
+#else
+ // Ignore module_def *def; only necessary for Python 3
+ (void) def;
+ auto m = Py_InitModule3(name, nullptr, options::show_user_defined_docstrings() ? doc : nullptr);
+#endif
+ if (m == nullptr) {
+ if (PyErr_Occurred())
+ throw error_already_set();
+ pybind11_fail("Internal error in module_::create_extension_module()");
+ }
+ // TODO: Should be reinterpret_steal for Python 3, but Python also steals it again when returned from PyInit_...
+ // For Python 2, reinterpret_borrow is correct.
+ return reinterpret_borrow<module_>(m);
+ }
};
+// When inside a namespace (or anywhere as long as it's not the first item on a line),
+// C++20 allows "module" to be used. This is provided for backward compatibility, and for
+// simplicity, if someone wants to use py::module for example, that is perfectly safe.
+using module = module_;
+
/// \ingroup python_builtins
/// Return a dictionary representing the global variables in the current execution frame,
/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded).
inline dict globals() {
PyObject *p = PyEval_GetGlobals();
- return reinterpret_borrow<dict>(p ? p : module::import("__main__").attr("__dict__").ptr());
+ return reinterpret_borrow<dict>(p ? p : module_::import("__main__").attr("__dict__").ptr());
}
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// Generic support for creating new Python heap types
class generic_type : public object {
- template <typename...> friend class class_;
public:
PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check)
protected:
void initialize(const type_record &rec) {
- if (rec.scope && hasattr(rec.scope, rec.name))
+ if (rec.scope && hasattr(rec.scope, "__dict__") && rec.scope.attr("__dict__").contains(rec.name))
pybind11_fail("generic_type: cannot initialize type \"" + std::string(rec.name) +
"\": an object with that name is already defined");
void install_buffer_funcs(
buffer_info *(*get_buffer)(PyObject *, void *),
void *get_buffer_data) {
- PyHeapTypeObject *type = (PyHeapTypeObject*) m_ptr;
+ auto *type = (PyHeapTypeObject*) m_ptr;
auto tinfo = detail::get_type_info(&type->ht_type);
if (!type->ht_type.tp_as_buffer)
pybind11_fail(
"To be able to register buffer protocol support for the type '" +
- std::string(tinfo->type->tp_name) +
+ get_fully_qualified_tp_name(tinfo->type) +
"' the associated class<>(..) invocation must "
"include the pybind11::buffer_protocol() annotation!");
inline void call_operator_delete(void *p, size_t s, size_t a) {
(void)s; (void)a;
-#if defined(PYBIND11_CPP17)
- if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
- ::operator delete(p, s, std::align_val_t(a));
- else
+ #if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
+ if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
+ #ifdef __cpp_sized_deallocation
+ ::operator delete(p, s, std::align_val_t(a));
+ #else
+ ::operator delete(p, std::align_val_t(a));
+ #endif
+ return;
+ }
+ #endif
+ #ifdef __cpp_sized_deallocation
::operator delete(p, s);
-#else
- ::operator delete(p);
-#endif
+ #else
+ ::operator delete(p);
+ #endif
+}
+
+inline void add_class_method(object& cls, const char *name_, const cpp_function &cf) {
+ cls.attr(cf.name()) = cf;
+ if (strcmp(name_, "__eq__") == 0 && !cls.attr("__dict__").contains("__hash__")) {
+ cls.attr("__hash__") = none();
+ }
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// Given a pointer to a member function, cast it to its `Derived` version.
/// Forward everything else unchanged.
class_ &def(const char *name_, Func&& f, const Extra&... extra) {
cpp_function cf(method_adaptor<type>(std::forward<Func>(f)), name(name_), is_method(*this),
sibling(getattr(*this, name_, none())), extra...);
- attr(cf.name()) = cf;
+ add_class_method(*this, name_, cf);
return *this;
}
return *this;
}
- template <typename Func> class_& def_buffer(Func &&func) {
+ template <typename Func>
+ class_& def_buffer(Func &&func) {
struct capture { Func func; };
- capture *ptr = new capture { std::forward<Func>(func) };
+ auto *ptr = new capture { std::forward<Func>(func) };
install_buffer_funcs([](PyObject *obj, void *ptr) -> buffer_info* {
detail::make_caster<type> caster;
if (!caster.load(obj, false))
return nullptr;
return new buffer_info(((capture *) ptr)->func(caster));
}, ptr);
+ weakref(m_ptr, cpp_function([ptr](handle wr) {
+ delete ptr;
+ wr.dec_ref();
+ })).release();
return *this;
}
/// Deallocates an instance; via holder, if constructed; otherwise via operator delete.
static void dealloc(detail::value_and_holder &v_h) {
+ // We could be deallocating because we are cleaning up after a Python exception.
+ // If so, the Python error indicator will be set. We need to clear that before
+ // running the destructor, in case the destructor code calls more Python.
+ // If we don't, the Python API will exit with an exception, and pybind11 will
+ // throw error_already_set from the C++ destructor which is forbidden and triggers
+ // std::terminate().
+ error_scope scope;
if (v_h.holder_constructed()) {
v_h.holder<holder_type>().~holder_type();
v_h.set_holder_constructed(false);
return {std::forward<GetState>(g), std::forward<SetState>(s)};
}
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+inline str enum_name(handle arg) {
+ dict entries = arg.get_type().attr("__entries");
+ for (auto kv : entries) {
+ if (handle(kv.second[int_(0)]).equal(arg))
+ return pybind11::str(kv.first);
+ }
+ return "???";
+}
+
struct enum_base {
enum_base(handle base, handle parent) : m_base(base), m_parent(parent) { }
auto static_property = handle((PyObject *) get_internals().static_property_type);
m_base.attr("__repr__") = cpp_function(
- [](handle arg) -> str {
- handle type = arg.get_type();
+ [](object arg) -> str {
+ handle type = type::handle_of(arg);
object type_name = type.attr("__name__");
- dict entries = type.attr("__entries");
- for (const auto &kv : entries) {
- object other = kv.second[int_(0)];
- if (other.equal(arg))
- return pybind11::str("{}.{}").format(type_name, kv.first);
- }
- return pybind11::str("{}.???").format(type_name);
- }, is_method(m_base)
+ return pybind11::str("<{}.{}: {}>").format(type_name, enum_name(arg), int_(arg));
+ }, name("__repr__"), is_method(m_base)
);
- m_base.attr("name") = property(cpp_function(
+ m_base.attr("name") = property(cpp_function(&enum_name, name("name"), is_method(m_base)));
+
+ m_base.attr("__str__") = cpp_function(
[](handle arg) -> str {
- dict entries = arg.get_type().attr("__entries");
- for (const auto &kv : entries) {
- if (handle(kv.second[int_(0)]).equal(arg))
- return pybind11::str(kv.first);
- }
- return "???";
- }, is_method(m_base)
- ));
+ object type_name = type::handle_of(arg).attr("__name__");
+ return pybind11::str("{}.{}").format(type_name, enum_name(arg));
+ }, name("name"), is_method(m_base)
+ );
m_base.attr("__doc__") = static_property(cpp_function(
[](handle arg) -> std::string {
if (((PyTypeObject *) arg.ptr())->tp_doc)
docstring += std::string(((PyTypeObject *) arg.ptr())->tp_doc) + "\n\n";
docstring += "Members:";
- for (const auto &kv : entries) {
+ for (auto kv : entries) {
auto key = std::string(pybind11::str(kv.first));
auto comment = kv.second[int_(1)];
docstring += "\n\n " + key;
docstring += " : " + (std::string) pybind11::str(comment);
}
return docstring;
- }
+ }, name("__doc__")
), none(), none(), "");
m_base.attr("__members__") = static_property(cpp_function(
[](handle arg) -> dict {
dict entries = arg.attr("__entries"), m;
- for (const auto &kv : entries)
+ for (auto kv : entries)
m[kv.first] = kv.second[int_(0)];
return m;
- }), none(), none(), ""
+ }, name("__members__")), none(), none(), ""
);
#define PYBIND11_ENUM_OP_STRICT(op, expr, strict_behavior) \
m_base.attr(op) = cpp_function( \
[](object a, object b) { \
- if (!a.get_type().is(b.get_type())) \
+ if (!type::handle_of(a).is(type::handle_of(b))) \
strict_behavior; \
return expr; \
}, \
- is_method(m_base))
+ name(op), is_method(m_base), arg("other"))
#define PYBIND11_ENUM_OP_CONV(op, expr) \
m_base.attr(op) = cpp_function( \
int_ a(a_), b(b_); \
return expr; \
}, \
- is_method(m_base))
+ name(op), is_method(m_base), arg("other"))
#define PYBIND11_ENUM_OP_CONV_LHS(op, expr) \
m_base.attr(op) = cpp_function( \
int_ a(a_); \
return expr; \
}, \
- is_method(m_base))
+ name(op), is_method(m_base), arg("other"))
if (is_convertible) {
PYBIND11_ENUM_OP_CONV_LHS("__eq__", !b.is_none() && a.equal(b));
PYBIND11_ENUM_OP_CONV("__xor__", a ^ b);
PYBIND11_ENUM_OP_CONV("__rxor__", a ^ b);
m_base.attr("__invert__") = cpp_function(
- [](object arg) { return ~(int_(arg)); }, is_method(m_base));
+ [](object arg) { return ~(int_(arg)); }, name("__invert__"), is_method(m_base));
}
} else {
PYBIND11_ENUM_OP_STRICT("__eq__", int_(a).equal(int_(b)), return false);
#undef PYBIND11_ENUM_OP_CONV
#undef PYBIND11_ENUM_OP_STRICT
- object getstate = cpp_function(
- [](object arg) { return int_(arg); }, is_method(m_base));
+ m_base.attr("__getstate__") = cpp_function(
+ [](object arg) { return int_(arg); }, name("__getstate__"), is_method(m_base));
- m_base.attr("__getstate__") = getstate;
- m_base.attr("__hash__") = getstate;
+ m_base.attr("__hash__") = cpp_function(
+ [](object arg) { return int_(arg); }, name("__hash__"), is_method(m_base));
}
PYBIND11_NOINLINE void value(char const* name_, object value, const char *doc = nullptr) {
PYBIND11_NOINLINE void export_values() {
dict entries = m_base.attr("__entries");
- for (const auto &kv : entries)
+ for (auto kv : entries)
m_parent.attr(kv.first) = kv.second[int_(0)];
}
handle m_parent;
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// Binds C++ enumerations and enumeration classes to Python
template <typename Type> class enum_ : public class_<Type> {
constexpr bool is_convertible = std::is_convertible<Type, Scalar>::value;
m_base.init(is_arithmetic, is_convertible);
- def(init([](Scalar i) { return static_cast<Type>(i); }));
+ def(init([](Scalar i) { return static_cast<Type>(i); }), arg("value"));
+ def_property_readonly("value", [](Type value) { return (Scalar) value; });
def("__int__", [](Type value) { return (Scalar) value; });
#if PY_MAJOR_VERSION < 3
def("__long__", [](Type value) { return (Scalar) value; });
#endif
- #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 8
+ #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 8)
def("__index__", [](Type value) { return (Scalar) value; });
#endif
- cpp_function setstate(
- [](Type &value, Scalar arg) { value = static_cast<Type>(arg); },
- is_method(*this));
- attr("__setstate__") = setstate;
+ attr("__setstate__") = cpp_function(
+ [](detail::value_and_holder &v_h, Scalar arg) {
+ detail::initimpl::setstate<Base>(v_h, static_cast<Type>(arg),
+ Py_TYPE(v_h.inst) != v_h.type->type); },
+ detail::is_new_style_constructor(),
+ pybind11::name("__setstate__"), is_method(*this), arg("state"));
}
/// Export enumeration entries into the parent scope
detail::enum_base m_base;
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
inline void keep_alive_impl(handle nurse, handle patient) {
bool first_or_done;
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// Makes a python iterator from a first and past-the-end C++ InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename ValueType = decltype(*std::declval<Iterator>()),
typename... Extra>
iterator make_iterator(Iterator first, Sentinel last, Extra &&... extra) {
- typedef detail::iterator_state<Iterator, Sentinel, false, Policy> state;
+ using state = detail::iterator_state<Iterator, Sentinel, false, Policy>;
if (!detail::get_type_info(typeid(state), false)) {
class_<state>(handle(), "iterator", pybind11::module_local())
typename KeyType = decltype((*std::declval<Iterator>()).first),
typename... Extra>
iterator make_key_iterator(Iterator first, Sentinel last, Extra &&... extra) {
- typedef detail::iterator_state<Iterator, Sentinel, true, Policy> state;
+ using state = detail::iterator_state<Iterator, Sentinel, true, Policy>;
if (!detail::get_type_info(typeid(state), false)) {
class_<state>(handle(), "iterator", pybind11::module_local())
template <typename InputType, typename OutputType> void implicitly_convertible() {
struct set_flag {
bool &flag;
- set_flag(bool &flag) : flag(flag) { flag = true; }
+ set_flag(bool &flag_) : flag(flag_) { flag_ = true; }
~set_flag() { flag = false; }
};
auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * {
class exception : public object {
public:
exception() = default;
- exception(handle scope, const char *name, PyObject *base = PyExc_Exception) {
+ exception(handle scope, const char *name, handle base = PyExc_Exception) {
std::string full_name = scope.attr("__name__").cast<std::string>() +
std::string(".") + name;
- m_ptr = PyErr_NewException(const_cast<char *>(full_name.c_str()), base, NULL);
- if (hasattr(scope, name))
+ m_ptr = PyErr_NewException(const_cast<char *>(full_name.c_str()), base.ptr(), NULL);
+ if (hasattr(scope, "__dict__") && scope.attr("__dict__").contains(name))
pybind11_fail("Error during initialization: multiple incompatible "
"definitions with name \"" + std::string(name) + "\"");
scope.attr(name) = *this;
}
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Returns a reference to a function-local static exception object used in the simple
// register_exception approach below. (It would be simpler to have the static local variable
// directly in register_exception, but that makes clang <3.5 segfault - issue #1349).
template <typename CppException>
exception<CppException> &get_exception_object() { static exception<CppException> ex; return ex; }
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/**
* Registers a Python exception in `m` of the given `name` and installs an exception translator to
template <typename CppException>
exception<CppException> ®ister_exception(handle scope,
const char *name,
- PyObject *base = PyExc_Exception) {
+ handle base = PyExc_Exception) {
auto &ex = detail::get_exception_object<CppException>();
if (!ex) ex = exception<CppException>(scope, name, base);
return ex;
}
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
PYBIND11_NOINLINE inline void print(tuple args, dict kwargs) {
auto strings = tuple(args.size());
for (size_t i = 0; i < args.size(); ++i) {
file = kwargs["file"].cast<object>();
} else {
try {
- file = module::import("sys").attr("stdout");
+ file = module_::import("sys").attr("stdout");
} catch (const error_already_set &) {
/* If print() is called from code that is executed as
part of garbage collection during interpreter shutdown,
if (kwargs.contains("flush") && kwargs["flush"].cast<bool>())
file.attr("flush")();
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
void print(Args &&...args) {
}
if (release) {
- /* Work around an annoying assertion in PyThreadState_Swap */
- #if defined(Py_DEBUG)
- PyInterpreterState *interp = tstate->interp;
- tstate->interp = nullptr;
- #endif
PyEval_AcquireThread(tstate);
- #if defined(Py_DEBUG)
- tstate->interp = interp;
- #endif
}
inc_ref();
pybind11_fail("scoped_acquire::dec_ref(): internal error!");
#endif
PyThreadState_Clear(tstate);
- PyThreadState_DeleteCurrent();
+ if (active)
+ PyThreadState_DeleteCurrent();
PYBIND11_TLS_DELETE_VALUE(detail::get_internals().tstate);
release = false;
}
}
+ /// This method will disable the PyThreadState_DeleteCurrent call and the
+ /// GIL won't be acquired. This method should be used if the interpreter
+ /// could be shutting down when this is called, as thread deletion is not
+ /// allowed during shutdown. Check _Py_IsFinalizing() on Python 3.7+, and
+ /// protect subsequent code.
+ PYBIND11_NOINLINE void disarm() {
+ active = false;
+ }
+
PYBIND11_NOINLINE ~gil_scoped_acquire() {
dec_ref();
if (release)
private:
PyThreadState *tstate = nullptr;
bool release = true;
+ bool active = true;
};
class gil_scoped_release {
PYBIND11_TLS_DELETE_VALUE(key);
}
}
+
+ /// This method will disable the PyThreadState_DeleteCurrent call and the
+ /// GIL won't be acquired. This method should be used if the interpreter
+ /// could be shutting down when this is called, as thread deletion is not
+ /// allowed during shutdown. Check _Py_IsFinalizing() on Python 3.7+, and
+ /// protect subsequent code.
+ PYBIND11_NOINLINE void disarm() {
+ active = false;
+ }
+
~gil_scoped_release() {
if (!tstate)
return;
- PyEval_RestoreThread(tstate);
+ // `PyEval_RestoreThread()` should not be called if runtime is finalizing
+ if (active)
+ PyEval_RestoreThread(tstate);
if (disassoc) {
auto key = detail::get_internals().tstate;
PYBIND11_TLS_REPLACE_VALUE(key, tstate);
private:
PyThreadState *tstate;
bool disassoc;
+ bool active = true;
};
#elif defined(PYPY_VERSION)
class gil_scoped_acquire {
public:
gil_scoped_acquire() { state = PyGILState_Ensure(); }
~gil_scoped_acquire() { PyGILState_Release(state); }
+ void disarm() {}
};
class gil_scoped_release {
public:
gil_scoped_release() { state = PyEval_SaveThread(); }
~gil_scoped_release() { PyEval_RestoreThread(state); }
+ void disarm() {}
};
#else
-class gil_scoped_acquire { };
-class gil_scoped_release { };
+class gil_scoped_acquire {
+ void disarm() {}
+};
+class gil_scoped_release {
+ void disarm() {}
+};
#endif
error_already_set::~error_already_set() {
}
}
-inline function get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) {
- handle self = detail::get_object_handle(this_ptr, this_type);
+PYBIND11_NAMESPACE_BEGIN(detail)
+inline function get_type_override(const void *this_ptr, const type_info *this_type, const char *name) {
+ handle self = get_object_handle(this_ptr, this_type);
if (!self)
return function();
- handle type = self.get_type();
+ handle type = type::handle_of(self);
auto key = std::make_pair(type.ptr(), name);
- /* Cache functions that aren't overloaded in Python to avoid
+ /* Cache functions that aren't overridden in Python to avoid
many costly Python dictionary lookups below */
- auto &cache = detail::get_internals().inactive_overload_cache;
+ auto &cache = get_internals().inactive_override_cache;
if (cache.find(key) != cache.end())
return function();
- function overload = getattr(self, name, function());
- if (overload.is_cpp_function()) {
+ function override = getattr(self, name, function());
+ if (override.is_cpp_function()) {
cache.insert(key);
return function();
}
Py_DECREF(result);
#endif
- return overload;
+ return override;
}
+PYBIND11_NAMESPACE_END(detail)
/** \rst
Try to retrieve a python method by the provided name from the instance pointed to by the this_ptr.
- :this_ptr: The pointer to the object the overload should be retrieved for. This should be the first
- non-trampoline class encountered in the inheritance chain.
- :name: The name of the overloaded Python method to retrieve.
+ :this_ptr: The pointer to the object the overridden method should be retrieved for. This should be
+ the first non-trampoline class encountered in the inheritance chain.
+ :name: The name of the overridden Python method to retrieve.
:return: The Python method by this name from the object or an empty function wrapper.
\endrst */
-template <class T> function get_overload(const T *this_ptr, const char *name) {
+template <class T> function get_override(const T *this_ptr, const char *name) {
auto tinfo = detail::get_type_info(typeid(T));
- return tinfo ? get_type_overload(this_ptr, tinfo, name) : function();
+ return tinfo ? detail::get_type_override(this_ptr, tinfo, name) : function();
}
-#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) { \
+#define PYBIND11_OVERRIDE_IMPL(ret_type, cname, name, ...) \
+ do { \
pybind11::gil_scoped_acquire gil; \
- pybind11::function overload = pybind11::get_overload(static_cast<const cname *>(this), name); \
- if (overload) { \
- auto o = overload(__VA_ARGS__); \
+ pybind11::function override = pybind11::get_override(static_cast<const cname *>(this), name); \
+ if (override) { \
+ auto o = override(__VA_ARGS__); \
if (pybind11::detail::cast_is_temporary_value_reference<ret_type>::value) { \
- static pybind11::detail::overload_caster_t<ret_type> caster; \
+ static pybind11::detail::override_caster_t<ret_type> caster; \
return pybind11::detail::cast_ref<ret_type>(std::move(o), caster); \
} \
else return pybind11::detail::cast_safe<ret_type>(std::move(o)); \
} \
- }
+ } while (false)
/** \rst
Macro to populate the virtual method in the trampoline class. This macro tries to look up a method named 'fn'
.. code-block:: cpp
std::string toString() override {
- PYBIND11_OVERLOAD_NAME(
+ PYBIND11_OVERRIDE_NAME(
std::string, // Return type (ret_type)
Animal, // Parent class (cname)
- toString, // Name of function in C++ (name)
- "__str__", // Name of method in Python (fn)
+ "__str__", // Name of method in Python (name)
+ toString, // Name of function in C++ (fn)
);
}
\endrst */
-#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \
- PYBIND11_OVERLOAD_INT(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__) \
- return cname::fn(__VA_ARGS__)
+#define PYBIND11_OVERRIDE_NAME(ret_type, cname, name, fn, ...) \
+ do { \
+ PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
+ return cname::fn(__VA_ARGS__); \
+ } while (false)
/** \rst
- Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERLOAD_NAME`, except that it
- throws if no overload can be found.
+ Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERRIDE_NAME`, except that it
+ throws if no override can be found.
\endrst */
-#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \
- PYBIND11_OVERLOAD_INT(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__) \
- pybind11::pybind11_fail("Tried to call pure virtual function \"" PYBIND11_STRINGIFY(cname) "::" name "\"");
+#define PYBIND11_OVERRIDE_PURE_NAME(ret_type, cname, name, fn, ...) \
+ do { \
+ PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
+ pybind11::pybind11_fail("Tried to call pure virtual function \"" PYBIND11_STRINGIFY(cname) "::" name "\""); \
+ } while (false)
/** \rst
Macro to populate the virtual method in the trampoline class. This macro tries to look up the method
// Trampoline (need one for each virtual function)
std::string go(int n_times) override {
- PYBIND11_OVERLOAD_PURE(
+ PYBIND11_OVERRIDE_PURE(
std::string, // Return type (ret_type)
Animal, // Parent class (cname)
go, // Name of function in C++ (must match Python name) (fn)
}
};
\endrst */
-#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \
- PYBIND11_OVERLOAD_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
+#define PYBIND11_OVERRIDE(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
/** \rst
- Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERLOAD`, except that it throws
- if no overload can be found.
+ Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERRIDE`, except that it throws
+ if no override can be found.
\endrst */
+#define PYBIND11_OVERRIDE_PURE(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
+
+
+// Deprecated versions
+
+PYBIND11_DEPRECATED("get_type_overload has been deprecated")
+inline function get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) {
+ return detail::get_type_override(this_ptr, this_type, name);
+}
+
+template <class T>
+inline function get_overload(const T *this_ptr, const char *name) {
+ return get_override(this_ptr, name);
+}
+
+#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) \
+ PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__)
+#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \
+ PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__)
+#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \
+ PYBIND11_OVERRIDE_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__);
+#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__)
#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \
- PYBIND11_OVERLOAD_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
+ PYBIND11_OVERRIDE_PURE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__);
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
# pragma warning(pop)
#elif defined(__GNUG__) && !defined(__clang__)
# pragma GCC diagnostic pop
#endif
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
#include <utility>
#include <type_traits>
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/* A few forward declarations */
class handle; class object;
class str; class iterator;
+class type;
struct arg; struct arg_v;
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
class args_proxy;
inline bool isinstance_generic(handle obj, const std::type_info &tp);
struct sequence_item;
struct list_item;
struct tuple_item;
-}
+} // namespace accessor_policies
using obj_attr_accessor = accessor<accessor_policies::obj_attr>;
using str_attr_accessor = accessor<accessor_policies::str_attr>;
using item_accessor = accessor<accessor_policies::generic_item>;
/// Return the object's current reference count
int ref_count() const { return static_cast<int>(Py_REFCNT(derived().ptr())); }
- /// Return a handle to the Python type object underlying the instance
+
+ // TODO PYBIND11_DEPRECATED("Call py::type::handle_of(h) or py::type::of(h) instead of h.get_type()")
handle get_type() const;
private:
bool rich_compare(object_api const &other, int value) const;
};
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/** \rst
Holds a reference to a Python object (no reference counting)
~object() { dec_ref(); }
/** \rst
- Resets the internal pointer to ``nullptr`` without without decreasing the
+ Resets the internal pointer to ``nullptr`` without decreasing the
object's reference count. The function returns a raw handle to the original
Python object.
\endrst */
\endrst */
template <typename T> T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; }
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
inline std::string error_string();
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
/// Fetch and hold an error which was already set in Python. An instance of this is typically
/// thrown to propagate python-side errors back through C++ which can either be caught manually or
error_already_set(const error_already_set &) = default;
error_already_set(error_already_set &&) = default;
- inline ~error_already_set();
+ inline ~error_already_set() override;
/// Give the currently-held error back to Python, if any. If there is currently a Python error
/// already set it is cleared first. After this call, the current object no longer stores the
/// error variables (but the `.what()` string is still available).
void restore() { PyErr_Restore(m_type.release().ptr(), m_value.release().ptr(), m_trace.release().ptr()); }
+ /// If it is impossible to raise the currently-held error, such as in destructor, we can write
+ /// it out using Python's unraisable hook (sys.unraisablehook). The error context should be
+ /// some object whose repr() helps identify the location of the error. Python already knows the
+ /// type and value of the error, so there is no need to repeat that. For example, __func__ could
+ /// be helpful. After this call, the current object no longer stores the error variables,
+ /// and neither does Python.
+ void discard_as_unraisable(object err_context) {
+ restore();
+ PyErr_WriteUnraisable(err_context.ptr());
+ }
+ void discard_as_unraisable(const char *err_context) {
+ discard_as_unraisable(reinterpret_steal<object>(PYBIND11_FROM_STRING(err_context)));
+ }
+
// Does nothing; provided for backwards compatibility.
PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated")
void clear() {}
template <typename T, detail::enable_if_t<!std::is_base_of<object, T>::value, int> = 0>
bool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); }
-template <> inline bool isinstance<handle>(handle obj) = delete;
+template <> inline bool isinstance<handle>(handle) = delete;
template <> inline bool isinstance<object>(handle obj) { return obj.ptr() != nullptr; }
/// \ingroup python_builtins
/// @} python_builtins
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
inline handle get_function(handle value) {
if (value) {
#if PY_MAJOR_VERSION >= 3
mutable object cache;
};
-NAMESPACE_BEGIN(accessor_policies)
+PYBIND11_NAMESPACE_BEGIN(accessor_policies)
struct obj_attr {
using key_type = object;
static object get(handle obj, handle key) { return getattr(obj, key); }
}
}
};
-NAMESPACE_END(accessor_policies)
+PYBIND11_NAMESPACE_END(accessor_policies)
/// STL iterator template used for tuple, list, sequence and dict
template <typename Policy>
friend bool operator<=(const It &a, const It &b) { return !(a > b); }
};
-NAMESPACE_BEGIN(iterator_policies)
+PYBIND11_NAMESPACE_BEGIN(iterator_policies)
/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers
template <typename T>
struct arrow_proxy {
PyObject *key = nullptr, *value = nullptr;
ssize_t pos = -1;
};
-NAMESPACE_END(iterator_policies)
+PYBIND11_NAMESPACE_END(iterator_policies)
#if !defined(PYPY_VERSION)
using tuple_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
}
inline bool PyNone_Check(PyObject *o) { return o == Py_None; }
-#if PY_MAJOR_VERSION >= 3
inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; }
-#endif
inline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); }
template <return_value_policy policy = return_value_policy::automatic_reference>
class unpacking_collector;
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
// TODO: After the deprecated constructors are removed, this macro can be simplified by
// inheriting ctors: `using Parent::Parent`. It's not an option right now because
Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \
PYBIND11_DEPRECATED("Use py::isinstance<py::python_type>(obj) instead") \
bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \
- static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); }
+ static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \
+ template <typename Policy_> \
+ Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) { }
#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
{ if (!m_ptr) throw error_already_set(); } \
Name(object &&o) \
: Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) \
- { if (!m_ptr) throw error_already_set(); } \
- template <typename Policy_> \
- Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) { }
+ { if (!m_ptr) throw error_already_set(); }
+
+#define PYBIND11_OBJECT_CHECK_FAILED(Name, o_ptr) \
+ ::pybind11::type_error("Object of type '" + \
+ ::pybind11::detail::get_fully_qualified_tp_name(Py_TYPE(o_ptr)) + \
+ "' is not an instance of '" #Name "'")
#define PYBIND11_OBJECT(Name, Parent, CheckFun) \
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
- Name(const object &o) : Parent(o) { } \
- Name(object &&o) : Parent(std::move(o)) { }
+ Name(const object &o) : Parent(o) \
+ { if (m_ptr && !check_(m_ptr)) throw PYBIND11_OBJECT_CHECK_FAILED(Name, m_ptr); } \
+ Name(object &&o) : Parent(std::move(o)) \
+ { if (m_ptr && !check_(m_ptr)) throw PYBIND11_OBJECT_CHECK_FAILED(Name, m_ptr); }
#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \
PYBIND11_OBJECT(Name, Parent, CheckFun) \
object value = {};
};
+
+
+class type : public object {
+public:
+ PYBIND11_OBJECT(type, object, PyType_Check)
+
+ /// Return a type handle from a handle or an object
+ static handle handle_of(handle h) { return handle((PyObject*) Py_TYPE(h.ptr())); }
+
+ /// Return a type object from a handle or an object
+ static type of(handle h) { return type(type::handle_of(h), borrowed_t{}); }
+
+ // Defined in pybind11/cast.h
+ /// Convert C++ type to handle if previously registered. Does not convert
+ /// standard types, like int, float. etc. yet.
+ /// See https://github.com/pybind/pybind11/issues/2486
+ template<typename T>
+ static handle handle_of();
+
+ /// Convert C++ type to type if previously registered. Does not convert
+ /// standard types, like int, float. etc. yet.
+ /// See https://github.com/pybind/pybind11/issues/2486
+ template<typename T>
+ static type of() {return type(type::handle_of<T>(), borrowed_t{}); }
+};
+
class iterable : public object {
public:
PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check)
Return a string representation of the object. This is analogous to
the ``str()`` function in Python.
\endrst */
- explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { }
+ explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { if (!m_ptr) throw error_already_set(); }
operator std::string() const {
object temp = *this;
String literal version of `str`
\endrst */
inline str operator"" _s(const char *s, size_t size) { return {s, size}; }
-}
+} // namespace literals
/// \addtogroup pytypes
/// @{
return std::string(buffer, (size_t) length);
}
};
+// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
+// are included in the doxygen group; close here and reopen after as a workaround
+/// @} pytypes
inline bytes::bytes(const pybind11::str &s) {
object temp = s;
m_ptr = obj.release().ptr();
}
+/// \addtogroup pytypes
+/// @{
class none : public object {
public:
PYBIND11_OBJECT(none, object, detail::PyNone_Check)
none() : object(Py_None, borrowed_t{}) { }
};
-#if PY_MAJOR_VERSION >= 3
class ellipsis : public object {
public:
PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check)
ellipsis() : object(Py_Ellipsis, borrowed_t{}) { }
};
-#endif
class bool_ : public object {
public:
}
};
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1;
// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned).
// (The distinction is critically important when casting a returned -1 error value to some other
return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
}
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
class int_ : public object {
public:
}
template <typename T> operator T *() const {
+ return get_pointer<T>();
+ }
+
+ /// Get the pointer the capsule holds.
+ template<typename T = void>
+ T* get_pointer() const {
auto name = this->name();
- T * result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));
+ T *result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));
if (!result) pybind11_fail("Unable to extract capsule contents!");
return result;
}
+ /// Replaces a capsule's pointer *without* calling the destructor on the existing one.
+ void set_pointer(const void *value) {
+ if (PyCapsule_SetPointer(m_ptr, const_cast<void *>(value)) != 0)
+ pybind11_fail("Could not set capsule pointer");
+ }
+
const char *name() const { return PyCapsule_GetName(m_ptr); }
};
detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; }
};
+// We need to put this into a separate function because the Intel compiler
+// fails to compile enable_if_t<all_of<is_keyword_or_ds<Args>...>::value> part below
+// (tested with ICC 2021.1 Beta 20200827).
+template <typename... Args>
+constexpr bool args_are_all_keyword_or_ds()
+{
+ return detail::all_of<detail::is_keyword_or_ds<Args>...>::value;
+}
+
class dict : public object {
public:
PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict)
if (!m_ptr) pybind11_fail("Could not allocate dict object!");
}
template <typename... Args,
- typename = detail::enable_if_t<detail::all_of<detail::is_keyword_or_ds<Args>...>::value>,
+ typename = detail::enable_if_t<args_are_all_keyword_or_ds<Args...>()>,
// MSVC workaround: it can't compile an out-of-line definition, so defer the collector
typename collector = detail::deferred_t<detail::unpacking_collector<>, Args...>>
explicit dict(Args &&...args) : dict(collector(std::forward<Args>(args)...).kwargs()) { }
class sequence : public object {
public:
PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check)
- size_t size() const { return (size_t) PySequence_Size(m_ptr); }
+ size_t size() const {
+ ssize_t result = PySequence_Size(m_ptr);
+ if (result == -1)
+ throw error_already_set();
+ return (size_t) result;
+ }
bool empty() const { return size() == 0; }
detail::sequence_accessor operator[](size_t index) const { return {*this, index}; }
detail::item_accessor operator[](handle h) const { return object::operator[](h); }
buffer_info request(bool writable = false) const {
int flags = PyBUF_STRIDES | PyBUF_FORMAT;
if (writable) flags |= PyBUF_WRITABLE;
- Py_buffer *view = new Py_buffer();
+ auto *view = new Py_buffer();
if (PyObject_GetBuffer(m_ptr, view, flags) != 0) {
delete view;
throw error_already_set();
class memoryview : public object {
public:
- explicit memoryview(const buffer_info& info) {
- static Py_buffer buf { };
- // Py_buffer uses signed sizes, strides and shape!..
- static std::vector<Py_ssize_t> py_strides { };
- static std::vector<Py_ssize_t> py_shape { };
- buf.buf = info.ptr;
- buf.itemsize = info.itemsize;
- buf.format = const_cast<char *>(info.format.c_str());
- buf.ndim = (int) info.ndim;
- buf.len = info.size;
- py_strides.clear();
- py_shape.clear();
- for (size_t i = 0; i < (size_t) info.ndim; ++i) {
- py_strides.push_back(info.strides[i]);
- py_shape.push_back(info.shape[i]);
- }
- buf.strides = py_strides.data();
- buf.shape = py_shape.data();
- buf.suboffsets = nullptr;
- buf.readonly = false;
- buf.internal = nullptr;
+ PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)
- m_ptr = PyMemoryView_FromBuffer(&buf);
+ /** \rst
+ Creates ``memoryview`` from ``buffer_info``.
+
+ ``buffer_info`` must be created from ``buffer::request()``. Otherwise
+ throws an exception.
+
+ For creating a ``memoryview`` from objects that support buffer protocol,
+ use ``memoryview(const object& obj)`` instead of this constructor.
+ \endrst */
+ explicit memoryview(const buffer_info& info) {
+ if (!info.view())
+ pybind11_fail("Prohibited to create memoryview without Py_buffer");
+ // Note: PyMemoryView_FromBuffer never increments obj reference.
+ m_ptr = (info.view()->obj) ?
+ PyMemoryView_FromObject(info.view()->obj) :
+ PyMemoryView_FromBuffer(info.view());
if (!m_ptr)
pybind11_fail("Unable to create memoryview from buffer descriptor");
}
- PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)
+ /** \rst
+ Creates ``memoryview`` from static buffer.
+
+ This method is meant for providing a ``memoryview`` for C/C++ buffer not
+ managed by Python. The caller is responsible for managing the lifetime
+ of ``ptr`` and ``format``, which MUST outlive the memoryview constructed
+ here.
+
+ See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
+
+ .. _PyMemoryView_FromBuffer: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer
+
+ :param ptr: Pointer to the buffer.
+ :param itemsize: Byte size of an element.
+ :param format: Pointer to the null-terminated format string. For
+ homogeneous Buffers, this should be set to
+ ``format_descriptor<T>::value``.
+ :param shape: Shape of the tensor (1 entry per dimension).
+ :param strides: Number of bytes between adjacent entries (for each
+ per dimension).
+ :param readonly: Flag to indicate if the underlying storage may be
+ written to.
+ \endrst */
+ static memoryview from_buffer(
+ void *ptr, ssize_t itemsize, const char *format,
+ detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides, bool readonly = false);
+
+ static memoryview from_buffer(
+ const void *ptr, ssize_t itemsize, const char *format,
+ detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides) {
+ return memoryview::from_buffer(
+ const_cast<void*>(ptr), itemsize, format, shape, strides, true);
+ }
+
+ template<typename T>
+ static memoryview from_buffer(
+ T *ptr, detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides, bool readonly = false) {
+ return memoryview::from_buffer(
+ reinterpret_cast<void*>(ptr), sizeof(T),
+ format_descriptor<T>::value, shape, strides, readonly);
+ }
+
+ template<typename T>
+ static memoryview from_buffer(
+ const T *ptr, detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides) {
+ return memoryview::from_buffer(
+ const_cast<T*>(ptr), shape, strides, true);
+ }
+
+#if PY_MAJOR_VERSION >= 3
+ /** \rst
+ Creates ``memoryview`` from static memory.
+
+ This method is meant for providing a ``memoryview`` for C/C++ buffer not
+ managed by Python. The caller is responsible for managing the lifetime
+ of ``mem``, which MUST outlive the memoryview constructed here.
+
+ This method is not available in Python 2.
+
+ See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
+
+ .. _PyMemoryView_FromMemory: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory
+ \endrst */
+ static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) {
+ PyObject* ptr = PyMemoryView_FromMemory(
+ reinterpret_cast<char*>(mem), size,
+ (readonly) ? PyBUF_READ : PyBUF_WRITE);
+ if (!ptr)
+ pybind11_fail("Could not allocate memoryview object!");
+ return memoryview(object(ptr, stolen_t{}));
+ }
+
+ static memoryview from_memory(const void *mem, ssize_t size) {
+ return memoryview::from_memory(const_cast<void*>(mem), size, true);
+ }
+#endif
};
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+inline memoryview memoryview::from_buffer(
+ void *ptr, ssize_t itemsize, const char* format,
+ detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides, bool readonly) {
+ size_t ndim = shape->size();
+ if (ndim != strides->size())
+ pybind11_fail("memoryview: shape length doesn't match strides length");
+ ssize_t size = ndim ? 1 : 0;
+ for (size_t i = 0; i < ndim; ++i)
+ size *= (*shape)[i];
+ Py_buffer view;
+ view.buf = ptr;
+ view.obj = nullptr;
+ view.len = size * itemsize;
+ view.readonly = static_cast<int>(readonly);
+ view.itemsize = itemsize;
+ view.format = const_cast<char*>(format);
+ view.ndim = static_cast<int>(ndim);
+ view.shape = shape->data();
+ view.strides = strides->data();
+ view.suboffsets = nullptr;
+ view.internal = nullptr;
+ PyObject* obj = PyMemoryView_FromBuffer(&view);
+ if (!obj)
+ throw error_already_set();
+ return memoryview(object(obj, stolen_t{}));
+}
+#endif // DOXYGEN_SHOULD_SKIP_THIS
/// @} pytypes
/// \addtogroup python_builtins
/// @{
+
+/// Get the length of a Python object.
inline size_t len(handle h) {
ssize_t result = PyObject_Length(h.ptr());
if (result < 0)
- pybind11_fail("Unable to compute length of object");
+ throw error_already_set();
return (size_t) result;
}
+/// Get the length hint of a Python object.
+/// Returns 0 when this cannot be determined.
inline size_t len_hint(handle h) {
#if PY_VERSION_HEX >= 0x03040000
ssize_t result = PyObject_LengthHint(h.ptr(), 0);
}
/// @} python_builtins
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <typename D> iterator object_api<D>::begin() const { return iter(derived()); }
template <typename D> iterator object_api<D>::end() const { return iterator::sentinel(); }
template <typename D> item_accessor object_api<D>::operator[](handle key) const {
str_attr_accessor object_api<D>::doc() const { return attr("__doc__"); }
template <typename D>
-handle object_api<D>::get_type() const { return (PyObject *) Py_TYPE(derived().ptr()); }
+handle object_api<D>::get_type() const { return type::handle_of(derived()); }
template <typename D>
bool object_api<D>::rich_compare(object_api const &other, int value) const {
#undef PYBIND11_MATH_OPERATOR_UNARY
#undef PYBIND11_MATH_OPERATOR_BINARY
-NAMESPACE_END(detail)
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
# define PYBIND11_HAS_VARIANT 1
#endif
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for
/// forwarding a container element). Typically used indirect via forwarded_type(), below.
static handle cast(T_ &&src, return_value_policy policy, handle parent) {
if (!src)
return none().inc_ref();
- policy = return_value_policy_override<typename T::value_type>::policy(policy);
+ if (!std::is_lvalue_reference<T>::value) {
+ policy = return_value_policy_override<T>::policy(policy);
+ }
return value_conv::cast(*std::forward<T_>(src), policy, parent);
}
PYBIND11_TYPE_CASTER(T, _("Optional[") + value_conv::name + _("]"));
};
-#ifdef PYBIND11_HAS_OPTIONAL
+#if defined(PYBIND11_HAS_OPTIONAL)
template<typename T> struct type_caster<std::optional<T>>
: public optional_caster<std::optional<T>> {};
: public void_caster<std::nullopt_t> {};
#endif
-#ifdef PYBIND11_HAS_EXP_OPTIONAL
+#if defined(PYBIND11_HAS_EXP_OPTIONAL)
template<typename T> struct type_caster<std::experimental::optional<T>>
: public optional_caster<std::experimental::optional<T>> {};
PYBIND11_TYPE_CASTER(Type, _("Union[") + detail::concat(make_caster<Ts>::name...) + _("]"));
};
-#ifdef PYBIND11_HAS_VARIANT
+#if defined(PYBIND11_HAS_VARIANT)
template <typename... Ts>
struct type_caster<std::variant<Ts...>> : variant_caster<std::variant<Ts...>> { };
#endif
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
inline std::ostream &operator<<(std::ostream &os, const handle &obj) {
os << (std::string) str(obj);
return os;
}
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
#if defined(_MSC_VER)
#pragma warning(pop)
#include <algorithm>
#include <sstream>
-NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
/* SFINAE helper class used by 'is_comparable */
template <typename T> struct container_traits {
return v.release();
}));
+ cl.def("clear",
+ [](Vector &v) {
+ v.clear();
+ },
+ "Clear the contents"
+ );
+
cl.def("extend",
[](Vector &v, const Vector &src) {
v.insert(v.end(), src.begin(), src.end());
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))
throw error_already_set();
- Vector *seq = new Vector();
+ auto *seq = new Vector();
seq->reserve((size_t) slicelength);
for (size_t i=0; i<slicelength; ++i) {
template <typename Vector>
struct vector_has_data_and_format<Vector, enable_if_t<std::is_same<decltype(format_descriptor<typename Vector::value_type>::format(), std::declval<Vector>().data()), typename Vector::value_type*>::value>> : std::true_type {};
+// [workaround(intel)] Separate function required here
+// Workaround as the Intel compiler does not compile the enable_if_t part below
+// (tested with icc (ICC) 2021.1 Beta 20200827)
+template <typename... Args>
+constexpr bool args_any_are_buffer() {
+ return detail::any_of<std::is_same<Args, buffer_protocol>...>::value;
+}
+
+// [workaround(intel)] Separate function required here
+// [workaround(msvc)] Can't use constexpr bool in return type
+
// Add the buffer interface to a vector
template <typename Vector, typename Class_, typename... Args>
-enable_if_t<detail::any_of<std::is_same<Args, buffer_protocol>...>::value>
-vector_buffer(Class_& cl) {
+void vector_buffer_impl(Class_& cl, std::true_type) {
using T = typename Vector::value_type;
static_assert(vector_has_data_and_format<Vector>::value, "There is not an appropriate format descriptor for this vector");
if (!detail::compare_buffer_info<T>::compare(info) || (ssize_t) sizeof(T) != info.itemsize)
throw type_error("Format mismatch (Python: " + info.format + " C++: " + format_descriptor<T>::format() + ")");
- auto vec = std::unique_ptr<Vector>(new Vector());
- vec->reserve((size_t) info.shape[0]);
T *p = static_cast<T*>(info.ptr);
ssize_t step = info.strides[0] / static_cast<ssize_t>(sizeof(T));
T *end = p + info.shape[0] * step;
- for (; p != end; p += step)
- vec->push_back(*p);
- return vec.release();
+ if (step == 1) {
+ return Vector(p, end);
+ }
+ else {
+ Vector vec;
+ vec.reserve((size_t) info.shape[0]);
+ for (; p != end; p += step)
+ vec.push_back(*p);
+ return vec;
+ }
}));
return;
}
template <typename Vector, typename Class_, typename... Args>
-enable_if_t<!detail::any_of<std::is_same<Args, buffer_protocol>...>::value> vector_buffer(Class_&) {}
+void vector_buffer_impl(Class_&, std::false_type) {}
+
+template <typename Vector, typename Class_, typename... Args>
+void vector_buffer(Class_& cl) {
+ vector_buffer_impl<Vector, Class_, Args...>(cl, detail::any_of<std::is_same<Args, buffer_protocol>...>{});
+}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
//
// std::vector
// std::map, std::unordered_map
//
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(detail)
/* Fallback functions */
template <typename, typename, typename... Args> void map_if_insertion_operator(const Args &...) { }
// Map assignment when copy-assignable: just copy the value
template <typename Map, typename Class_>
-void map_assignment(enable_if_t<std::is_copy_assignable<typename Map::mapped_type>::value, Class_> &cl) {
+void map_assignment(enable_if_t<is_copy_assignable<typename Map::mapped_type>::value, Class_> &cl) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
// Not copy-assignable, but still copy-constructible: we can update the value by erasing and reinserting
template<typename Map, typename Class_>
void map_assignment(enable_if_t<
- !std::is_copy_assignable<typename Map::mapped_type>::value &&
+ !is_copy_assignable<typename Map::mapped_type>::value &&
is_copy_constructible<typename Map::mapped_type>::value,
Class_> &cl) {
using KeyType = typename Map::key_type;
}
-NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(detail)
template <typename Map, typename holder_type = std::unique_ptr<Map>, typename... Args>
class_<Map, holder_type> bind_map(handle scope, const std::string &name, Args&&... args) {
return cl;
}
-NAMESPACE_END(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
-from ._version import version_info, __version__ # noqa: F401 imported but unused
+# -*- coding: utf-8 -*-
+from ._version import version_info, __version__
+from .commands import get_include, get_cmake_dir
-def get_include(user=False):
- from distutils.dist import Distribution
- import os
- import sys
- # Are we running in a virtual environment?
- virtualenv = hasattr(sys, 'real_prefix') or \
- sys.prefix != getattr(sys, "base_prefix", sys.prefix)
-
- # Are we running in a conda environment?
- conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
-
- if virtualenv:
- return os.path.join(sys.prefix, 'include', 'site',
- 'python' + sys.version[:3])
- elif conda:
- if os.name == 'nt':
- return os.path.join(sys.prefix, 'Library', 'include')
- else:
- return os.path.join(sys.prefix, 'include')
- else:
- dist = Distribution({'name': 'pybind11'})
- dist.parse_config_files()
-
- dist_cobj = dist.get_command_obj('install', create=True)
-
- # Search for packages in user's home directory?
- if user:
- dist_cobj.user = user
- dist_cobj.prefix = ""
- dist_cobj.finalize_options()
-
- return os.path.dirname(dist_cobj.install_headers)
+__all__ = (
+ "version_info",
+ "__version__",
+ "get_include",
+ "get_cmake_dir",
+)
+# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import sys
import sysconfig
-from . import get_include
+from .commands import get_include, get_cmake_dir
def print_includes():
- dirs = [sysconfig.get_path('include'),
- sysconfig.get_path('platinclude'),
- get_include(),
- get_include(True)]
+ # type: () -> None
+ dirs = [
+ sysconfig.get_path("include"),
+ sysconfig.get_path("platinclude"),
+ get_include(),
+ ]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
- if d not in unique_dirs:
+ if d and d not in unique_dirs:
unique_dirs.append(d)
- print(' '.join('-I' + d for d in unique_dirs))
+ print(" ".join("-I" + d for d in unique_dirs))
def main():
- parser = argparse.ArgumentParser(prog='python -m pybind11')
- parser.add_argument('--includes', action='store_true',
- help='Include flags for both pybind11 and Python headers.')
+ # type: () -> None
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--includes",
+ action="store_true",
+ help="Include flags for both pybind11 and Python headers.",
+ )
+ parser.add_argument(
+ "--cmakedir",
+ action="store_true",
+ help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.",
+ )
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.includes:
print_includes()
+ if args.cmakedir:
+ print(get_cmake_dir())
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
-version_info = (2, 4, 1)
-__version__ = '.'.join(map(str, version_info))
+# -*- coding: utf-8 -*-
+
+
+def _to_int(s):
+ try:
+ return int(s)
+ except ValueError:
+ return s
+
+
+__version__ = "2.6.2"
+version_info = tuple(_to_int(s) for s in __version__.split("."))
--- /dev/null
+from typing import Union, Tuple
+
+def _to_int(s: str) -> Union[int, str]: ...
+
+__version__: str
+version_info: Tuple[Union[int, str], ...]
--- /dev/null
+# -*- coding: utf-8 -*-
+import os
+
+
+DIR = os.path.abspath(os.path.dirname(__file__))
+
+
+def get_include(user=False):
+ # type: (bool) -> str
+ installed_path = os.path.join(DIR, "include")
+ source_path = os.path.join(os.path.dirname(DIR), "include")
+ return installed_path if os.path.exists(installed_path) else source_path
+
+
+def get_cmake_dir():
+ # type: () -> str
+ cmake_installed_path = os.path.join(DIR, "share", "cmake", "pybind11")
+ if os.path.exists(cmake_installed_path):
+ return cmake_installed_path
+ else:
+ msg = "pybind11 not installed, installation required to access the CMake files"
+ raise ImportError(msg)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+"""
+This module provides helpers for C++11+ projects using pybind11.
+
+LICENSE:
+
+Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+# IMPORTANT: If you change this file in the pybind11 repo, also review
+# setup_helpers.pyi for matching changes.
+#
+# If you copy this file in, you don't
+# need the .pyi file; it's just an interface file for static type checkers.
+
+import contextlib
+import os
+import shutil
+import sys
+import tempfile
+import threading
+import platform
+import warnings
+
+try:
+ from setuptools.command.build_ext import build_ext as _build_ext
+ from setuptools import Extension as _Extension
+except ImportError:
+ from distutils.command.build_ext import build_ext as _build_ext
+ from distutils.extension import Extension as _Extension
+
+import distutils.errors
+import distutils.ccompiler
+
+
+WIN = sys.platform.startswith("win32")
+PY2 = sys.version_info[0] < 3
+MACOS = sys.platform.startswith("darwin")
+STD_TMPL = "/std:c++{}" if WIN else "-std=c++{}"
+
+
+# It is recommended to use PEP 518 builds if using this module. However, this
+# file explicitly supports being copied into a user's project directory
+# standalone, and pulling pybind11 with the deprecated setup_requires feature.
+# If you copy the file, remember to add it to your MANIFEST.in, and add the current
+# directory into your path if it sits beside your setup.py.
+
+
+class Pybind11Extension(_Extension):
+ """
+ Build a C++11+ Extension module with pybind11. This automatically adds the
+ recommended flags when you init the extension and assumes C++ sources - you
+ can further modify the options yourself.
+
+ The customizations are:
+
+ * ``/EHsc`` and ``/bigobj`` on Windows
+ * ``stdlib=libc++`` on macOS
+ * ``visibility=hidden`` and ``-g0`` on Unix
+
+ Finally, you can set ``cxx_std`` via constructor or afterwords to enable
+ flags for C++ std, and a few extra helper flags related to the C++ standard
+ level. It is _highly_ recommended you either set this, or use the provided
+ ``build_ext``, which will search for the highest supported extension for
+ you if the ``cxx_std`` property is not set. Do not set the ``cxx_std``
+ property more than once, as flags are added when you set it. Set the
+ property to None to disable the addition of C++ standard flags.
+
+ If you want to add pybind11 headers manually, for example for an exact
+ git checkout, then set ``include_pybind11=False``.
+
+ Warning: do not use property-based access to the instance on Python 2 -
+ this is an ugly old-style class due to Distutils.
+ """
+
+ # flags are prepended, so that they can be further overridden, e.g. by
+ # ``extra_compile_args=["-g"]``.
+
+ def _add_cflags(self, flags):
+ self.extra_compile_args[:0] = flags
+
+ def _add_ldflags(self, flags):
+ self.extra_link_args[:0] = flags
+
+ def __init__(self, *args, **kwargs):
+
+ self._cxx_level = 0
+ cxx_std = kwargs.pop("cxx_std", 0)
+
+ if "language" not in kwargs:
+ kwargs["language"] = "c++"
+
+ include_pybind11 = kwargs.pop("include_pybind11", True)
+
+ # Can't use super here because distutils has old-style classes in
+ # Python 2!
+ _Extension.__init__(self, *args, **kwargs)
+
+ # Include the installed package pybind11 headers
+ if include_pybind11:
+ # If using setup_requires, this fails the first time - that's okay
+ try:
+ import pybind11
+
+ pyinc = pybind11.get_include()
+
+ if pyinc not in self.include_dirs:
+ self.include_dirs.append(pyinc)
+ except ImportError:
+ pass
+
+ # Have to use the accessor manually to support Python 2 distutils
+ Pybind11Extension.cxx_std.__set__(self, cxx_std)
+
+ cflags = []
+ ldflags = []
+ if WIN:
+ cflags += ["/EHsc", "/bigobj"]
+ else:
+ cflags += ["-fvisibility=hidden", "-g0"]
+ if MACOS:
+ cflags += ["-stdlib=libc++"]
+ ldflags += ["-stdlib=libc++"]
+ self._add_cflags(cflags)
+ self._add_ldflags(ldflags)
+
+ @property
+ def cxx_std(self):
+ """
+ The CXX standard level. If set, will add the required flags. If left
+ at 0, it will trigger an automatic search when pybind11's build_ext
+ is used. If None, will have no effect. Besides just the flags, this
+ may add a register warning/error fix for Python 2 or macos-min 10.9
+ or 10.14.
+ """
+ return self._cxx_level
+
+ @cxx_std.setter
+ def cxx_std(self, level):
+
+ if self._cxx_level:
+ warnings.warn("You cannot safely change the cxx_level after setting it!")
+
+ # MSVC 2015 Update 3 and later only have 14 (and later 17) modes, so
+ # force a valid flag here.
+ if WIN and level == 11:
+ level = 14
+
+ self._cxx_level = level
+
+ if not level:
+ return
+
+ cflags = [STD_TMPL.format(level)]
+ ldflags = []
+
+ if MACOS and "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
+ # C++17 requires a higher min version of macOS. An earlier version
+ # (10.12 or 10.13) can be set manually via environment variable if
+ # you are careful in your feature usage, but 10.14 is the safest
+ # setting for general use. However, never set higher than the
+ # current macOS version!
+ current_macos = tuple(int(x) for x in platform.mac_ver()[0].split(".")[:2])
+ desired_macos = (10, 9) if level < 17 else (10, 14)
+ macos_string = ".".join(str(x) for x in min(current_macos, desired_macos))
+ macosx_min = "-mmacosx-version-min=" + macos_string
+ cflags += [macosx_min]
+ ldflags += [macosx_min]
+
+ if PY2:
+ if WIN:
+ # Will be ignored on MSVC 2015, where C++17 is not supported so
+ # this flag is not valid.
+ cflags += ["/wd5033"]
+ elif level >= 17:
+ cflags += ["-Wno-register"]
+ elif level >= 14:
+ cflags += ["-Wno-deprecated-register"]
+
+ self._add_cflags(cflags)
+ self._add_ldflags(ldflags)
+
+
+# Just in case someone clever tries to multithread
+tmp_chdir_lock = threading.Lock()
+cpp_cache_lock = threading.Lock()
+
+
+@contextlib.contextmanager
+def tmp_chdir():
+ "Prepare and enter a temporary directory, cleanup when done"
+
+ # Threadsafe
+ with tmp_chdir_lock:
+ olddir = os.getcwd()
+ try:
+ tmpdir = tempfile.mkdtemp()
+ os.chdir(tmpdir)
+ yield tmpdir
+ finally:
+ os.chdir(olddir)
+ shutil.rmtree(tmpdir)
+
+
+# cf http://bugs.python.org/issue26689
+def has_flag(compiler, flag):
+ """
+ Return the flag if a flag name is supported on the
+ specified compiler, otherwise None (can be used as a boolean).
+ If multiple flags are passed, return the first that matches.
+ """
+
+ with tmp_chdir():
+ fname = "flagcheck.cpp"
+ with open(fname, "w") as f:
+ # Don't trigger -Wunused-parameter.
+ f.write("int main (int, char **) { return 0; }")
+
+ try:
+ compiler.compile([fname], extra_postargs=[flag])
+ except distutils.errors.CompileError:
+ return False
+ return True
+
+
+# Every call will cache the result
+cpp_flag_cache = None
+
+
+def auto_cpp_level(compiler):
+ """
+ Return the max supported C++ std level (17, 14, or 11). Returns latest on Windows.
+ """
+
+ if WIN:
+ return "latest"
+
+ global cpp_flag_cache
+
+ # If this has been previously calculated with the same args, return that
+ with cpp_cache_lock:
+ if cpp_flag_cache:
+ return cpp_flag_cache
+
+ levels = [17, 14, 11]
+
+ for level in levels:
+ if has_flag(compiler, STD_TMPL.format(level)):
+ with cpp_cache_lock:
+ cpp_flag_cache = level
+ return level
+
+ msg = "Unsupported compiler -- at least C++11 support is needed!"
+ raise RuntimeError(msg)
+
+
+class build_ext(_build_ext): # noqa: N801
+ """
+ Customized build_ext that allows an auto-search for the highest supported
+ C++ level for Pybind11Extension. This is only needed for the auto-search
+ for now, and is completely optional otherwise.
+ """
+
+ def build_extensions(self):
+ """
+ Build extensions, injecting C++ std for Pybind11Extension if needed.
+ """
+
+ for ext in self.extensions:
+ if hasattr(ext, "_cxx_level") and ext._cxx_level == 0:
+ # Python 2 syntax - old-style distutils class
+ ext.__class__.cxx_std.__set__(ext, auto_cpp_level(self.compiler))
+
+ # Python 2 doesn't allow super here, since distutils uses old-style
+ # classes!
+ _build_ext.build_extensions(self)
+
+
+def naive_recompile(obj, src):
+ """
+ This will recompile only if the source file changes. It does not check
+ header files, so a more advanced function or Ccache is better if you have
+ editable header files in your package.
+ """
+ return os.stat(obj).st_mtime < os.stat(src).st_mtime
+
+
+def no_recompile(obg, src):
+ """
+ This is the safest but slowest choice (and is the default) - will always
+ recompile sources.
+ """
+ return True
+
+
+# Optional parallel compile utility
+# inspired by: http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils
+# and: https://github.com/tbenthompson/cppimport/blob/stable/cppimport/build_module.py
+# and NumPy's parallel distutils module:
+# https://github.com/numpy/numpy/blob/master/numpy/distutils/ccompiler.py
+class ParallelCompile(object):
+ """
+ Make a parallel compile function. Inspired by
+ numpy.distutils.ccompiler.CCompiler_compile and cppimport.
+
+ This takes several arguments that allow you to customize the compile
+ function created:
+
+ envvar:
+ Set an environment variable to control the compilation threads, like
+ NPY_NUM_BUILD_JOBS
+ default:
+ 0 will automatically multithread, or 1 will only multithread if the
+ envvar is set.
+ max:
+ The limit for automatic multithreading if non-zero
+ needs_recompile:
+ A function of (obj, src) that returns True when recompile is needed. No
+ effect in isolated mode; use ccache instead, see
+ https://github.com/matplotlib/matplotlib/issues/1507/
+
+ To use::
+
+ ParallelCompile("NPY_NUM_BUILD_JOBS").install()
+
+ or::
+
+ with ParallelCompile("NPY_NUM_BUILD_JOBS"):
+ setup(...)
+
+ By default, this assumes all files need to be recompiled. A smarter
+ function can be provided via needs_recompile. If the output has not yet
+ been generated, the compile will always run, and this function is not
+ called.
+ """
+
+ __slots__ = ("envvar", "default", "max", "_old", "needs_recompile")
+
+ def __init__(self, envvar=None, default=0, max=0, needs_recompile=no_recompile):
+ self.envvar = envvar
+ self.default = default
+ self.max = max
+ self.needs_recompile = needs_recompile
+ self._old = []
+
+ def function(self):
+ """
+ Builds a function object usable as distutils.ccompiler.CCompiler.compile.
+ """
+
+ def compile_function(
+ compiler,
+ sources,
+ output_dir=None,
+ macros=None,
+ include_dirs=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ depends=None,
+ ):
+
+ # These lines are directly from distutils.ccompiler.CCompiler
+ macros, objects, extra_postargs, pp_opts, build = compiler._setup_compile(
+ output_dir, macros, include_dirs, sources, depends, extra_postargs
+ )
+ cc_args = compiler._get_cc_args(pp_opts, debug, extra_preargs)
+
+ # The number of threads; start with default.
+ threads = self.default
+
+ # Determine the number of compilation threads, unless set by an environment variable.
+ if self.envvar is not None:
+ threads = int(os.environ.get(self.envvar, self.default))
+
+ def _single_compile(obj):
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ return
+
+ if not os.path.exists(obj) or self.needs_recompile(obj, src):
+ compiler._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
+ try:
+ import multiprocessing
+ from multiprocessing.pool import ThreadPool
+ except ImportError:
+ threads = 1
+
+ if threads == 0:
+ try:
+ threads = multiprocessing.cpu_count()
+ threads = self.max if self.max and self.max < threads else threads
+ except NotImplementedError:
+ threads = 1
+
+ if threads > 1:
+ for _ in ThreadPool(threads).imap_unordered(_single_compile, objects):
+ pass
+ else:
+ for ob in objects:
+ _single_compile(ob)
+
+ return objects
+
+ return compile_function
+
+ def install(self):
+ distutils.ccompiler.CCompiler.compile = self.function()
+ return self
+
+ def __enter__(self):
+ self._old.append(distutils.ccompiler.CCompiler.compile)
+ return self.install()
+
+ def __exit__(self, *args):
+ distutils.ccompiler.CCompiler.compile = self._old.pop()
--- /dev/null
+# IMPORTANT: Should stay in sync with setup_helpers.py (mostly checked by CI /
+# pre-commit).
+
+from typing import Any, Callable, Iterator, Optional, Type, TypeVar, Union
+from types import TracebackType
+
+from distutils.command.build_ext import build_ext as _build_ext # type: ignore
+from distutils.extension import Extension as _Extension
+import distutils.ccompiler
+import contextlib
+
+WIN: bool
+PY2: bool
+MACOS: bool
+STD_TMPL: str
+
+class Pybind11Extension(_Extension):
+ def _add_cflags(self, *flags: str) -> None: ...
+ def _add_lflags(self, *flags: str) -> None: ...
+ def __init__(
+ self, *args: Any, cxx_std: int = 0, language: str = "c++", **kwargs: Any
+ ) -> None: ...
+ @property
+ def cxx_std(self) -> int: ...
+ @cxx_std.setter
+ def cxx_std(self, level: int) -> None: ...
+
+@contextlib.contextmanager
+def tmp_chdir() -> Iterator[str]: ...
+def has_flag(compiler: distutils.ccompiler.CCompiler, flag: str) -> bool: ...
+def auto_cpp_level(compiler: distutils.ccompiler.CCompiler) -> Union[int, str]: ...
+
+class build_ext(_build_ext): # type: ignore
+ def build_extensions(self) -> None: ...
+
+def no_recompile(obj: str, src: str) -> bool: ...
+def naive_recompile(obj: str, src: str) -> bool: ...
+
+T = TypeVar("T", bound="ParallelCompile")
+
+class ParallelCompile:
+ envvar: Optional[str]
+ default: int
+ max: int
+ needs_recompile: Callable[[str, str], bool]
+ def __init__(
+ self,
+ envvar: Optional[str] = None,
+ default: int = 0,
+ max: int = 0,
+ needs_recompile: Callable[[str, str], bool] = no_recompile,
+ ) -> None: ...
+ def function(self) -> Any: ...
+ def install(self: T) -> T: ...
+ def __enter__(self: T) -> T: ...
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ ) -> None: ...
--- /dev/null
+[build-system]
+requires = ["setuptools>=42", "wheel", "cmake>=3.18", "ninja"]
+build-backend = "setuptools.build_meta"
+[metadata]
+long_description = file: README.rst
+long_description_content_type = text/x-rst
+description = Seamless operability between C++11 and Python
+author = Wenzel Jakob
+author_email = wenzel.jakob@epfl.ch
+url = https://github.com/pybind/pybind11
+license = BSD
+
+classifiers =
+ Development Status :: 5 - Production/Stable
+ Intended Audience :: Developers
+ Topic :: Software Development :: Libraries :: Python Modules
+ Topic :: Utilities
+ Programming Language :: C++
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.5
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
+ License :: OSI Approved :: BSD License
+ Programming Language :: Python :: Implementation :: PyPy
+ Programming Language :: Python :: Implementation :: CPython
+ Programming Language :: C++
+ Topic :: Software Development :: Libraries :: Python Modules
+
+keywords =
+ C++11
+ Python bindings
+
+[options]
+python_requires = >=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4
+zip_safe = False
+
[bdist_wheel]
universal=1
+[check-manifest]
+ignore =
+ tests/**
+ docs/**
+ tools/**
+ include/**
+ .*
+ pybind11/include/**
+ pybind11/share/**
+ CMakeLists.txt
+
+
[flake8]
max-line-length = 99
show_source = True
E201, E241, W504,
# camelcase 'cPickle' imported as lowercase 'pickle'
N813
+ # Black conflict
+ W503, E203
+
+[mypy]
+files = pybind11
+python_version = 2.7
+warn_unused_configs = True
+
+# Currently (0.800) identical to --strict
+disallow_any_generics = True
+disallow_subclassing_any = True
+disallow_untyped_calls = True
+disallow_untyped_defs = True
+disallow_incomplete_defs = True
+check_untyped_defs = True
+disallow_untyped_decorators = True
+no_implicit_optional = True
+warn_redundant_casts = True
+warn_unused_ignores = True
+warn_return_any = True
+no_implicit_reexport = True
+strict_equality = True
+
+[tool:pytest]
+timeout = 300
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
# Setup script for PyPI; use CMakeFile.txt to build extension modules
-from setuptools import setup
-from distutils.command.install_headers import install_headers
-from pybind11 import __version__
+import contextlib
import os
+import re
+import shutil
+import string
+import subprocess
+import sys
+import tempfile
-# Prevent installation of pybind11 headers by setting
-# PYBIND11_USE_CMAKE.
-if os.environ.get('PYBIND11_USE_CMAKE'):
- headers = []
-else:
- headers = [
- 'include/pybind11/detail/class.h',
- 'include/pybind11/detail/common.h',
- 'include/pybind11/detail/descr.h',
- 'include/pybind11/detail/init.h',
- 'include/pybind11/detail/internals.h',
- 'include/pybind11/detail/typeid.h',
- 'include/pybind11/attr.h',
- 'include/pybind11/buffer_info.h',
- 'include/pybind11/cast.h',
- 'include/pybind11/chrono.h',
- 'include/pybind11/common.h',
- 'include/pybind11/complex.h',
- 'include/pybind11/eigen.h',
- 'include/pybind11/embed.h',
- 'include/pybind11/eval.h',
- 'include/pybind11/functional.h',
- 'include/pybind11/iostream.h',
- 'include/pybind11/numpy.h',
- 'include/pybind11/operators.h',
- 'include/pybind11/options.h',
- 'include/pybind11/pybind11.h',
- 'include/pybind11/pytypes.h',
- 'include/pybind11/stl.h',
- 'include/pybind11/stl_bind.h',
- ]
-
-
-class InstallHeaders(install_headers):
- """Use custom header installer because the default one flattens subdirectories"""
- def run(self):
- if not self.distribution.headers:
- return
-
- for header in self.distribution.headers:
- subdir = os.path.dirname(os.path.relpath(header, 'include/pybind11'))
- install_dir = os.path.join(self.install_dir, subdir)
- self.mkpath(install_dir)
-
- (out, _) = self.copy_file(header, install_dir)
- self.outfiles.append(out)
-
-
-setup(
- name='pybind11',
- version=__version__,
- description='Seamless operability between C++11 and Python',
- author='Wenzel Jakob',
- author_email='wenzel.jakob@epfl.ch',
- url='https://github.com/pybind/pybind11',
- download_url='https://github.com/pybind/pybind11/tarball/v' + __version__,
- packages=['pybind11'],
- license='BSD',
- headers=headers,
- cmdclass=dict(install_headers=InstallHeaders),
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Developers',
- 'Topic :: Software Development :: Libraries :: Python Modules',
- 'Topic :: Utilities',
- 'Programming Language :: C++',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.2',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
- 'Programming Language :: Python :: 3.6',
- 'License :: OSI Approved :: BSD License'
- ],
- keywords='C++11, Python bindings',
- long_description="""pybind11 is a lightweight header-only library that
-exposes C++ types in Python and vice versa, mainly to create Python bindings of
-existing C++ code. Its goals and syntax are similar to the excellent
-Boost.Python by David Abrahams: to minimize boilerplate code in traditional
-extension modules by inferring type information using compile-time
-introspection.
-
-The main issue with Boost.Python-and the reason for creating such a similar
-project-is Boost. Boost is an enormously large and complex suite of utility
-libraries that works with almost every C++ compiler in existence. This
-compatibility has its cost: arcane template tricks and workarounds are
-necessary to support the oldest and buggiest of compiler specimens. Now that
-C++11-compatible compilers are widely available, this heavy machinery has
-become an excessively large and unnecessary dependency.
-
-Think of this library as a tiny self-contained version of Boost.Python with
-everything stripped away that isn't relevant for binding generation. Without
-comments, the core header files only require ~4K lines of code and depend on
-Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
-compact implementation was possible thanks to some of the new C++11 language
-features (specifically: tuples, lambda functions and variadic templates). Since
-its creation, this library has grown beyond Boost.Python in many ways, leading
-to dramatically simpler binding code in many common situations.""")
+import setuptools.command.sdist
+
+DIR = os.path.abspath(os.path.dirname(__file__))
+VERSION_REGEX = re.compile(
+ r"^\s*#\s*define\s+PYBIND11_VERSION_([A-Z]+)\s+(.*)$", re.MULTILINE
+)
+
+# PYBIND11_GLOBAL_SDIST will build a different sdist, with the python-headers
+# files, and the sys.prefix files (CMake and headers).
+
+global_sdist = os.environ.get("PYBIND11_GLOBAL_SDIST", False)
+
+setup_py = "tools/setup_global.py.in" if global_sdist else "tools/setup_main.py.in"
+extra_cmd = 'cmdclass["sdist"] = SDist\n'
+
+to_src = (
+ ("pyproject.toml", "tools/pyproject.toml"),
+ ("setup.py", setup_py),
+)
+
+# Read the listed version
+with open("pybind11/_version.py") as f:
+ code = compile(f.read(), "pybind11/_version.py", "exec")
+loc = {}
+exec(code, loc)
+version = loc["__version__"]
+
+# Verify that the version matches the one in C++
+with open("include/pybind11/detail/common.h") as f:
+ matches = dict(VERSION_REGEX.findall(f.read()))
+cpp_version = "{MAJOR}.{MINOR}.{PATCH}".format(**matches)
+if version != cpp_version:
+ msg = "Python version {} does not match C++ version {}!".format(
+ version, cpp_version
+ )
+ raise RuntimeError(msg)
+
+
+def get_and_replace(filename, binary=False, **opts):
+ with open(filename, "rb" if binary else "r") as f:
+ contents = f.read()
+ # Replacement has to be done on text in Python 3 (both work in Python 2)
+ if binary:
+ return string.Template(contents.decode()).substitute(opts).encode()
+ else:
+ return string.Template(contents).substitute(opts)
+
+
+# Use our input files instead when making the SDist (and anything that depends
+# on it, like a wheel)
+class SDist(setuptools.command.sdist.sdist):
+ def make_release_tree(self, base_dir, files):
+ setuptools.command.sdist.sdist.make_release_tree(self, base_dir, files)
+
+ for to, src in to_src:
+ txt = get_and_replace(src, binary=True, version=version, extra_cmd="")
+
+ dest = os.path.join(base_dir, to)
+
+ # This is normally linked, so unlink before writing!
+ os.unlink(dest)
+ with open(dest, "wb") as f:
+ f.write(txt)
+
+
+# Backport from Python 3
+@contextlib.contextmanager
+def TemporaryDirectory(): # noqa: N802
+ "Prepare a temporary directory, cleanup when done"
+ try:
+ tmpdir = tempfile.mkdtemp()
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+# Remove the CMake install directory when done
+@contextlib.contextmanager
+def remove_output(*sources):
+ try:
+ yield
+ finally:
+ for src in sources:
+ shutil.rmtree(src)
+
+
+with remove_output("pybind11/include", "pybind11/share"):
+ # Generate the files if they are not present.
+ with TemporaryDirectory() as tmpdir:
+ cmd = ["cmake", "-S", ".", "-B", tmpdir] + [
+ "-DCMAKE_INSTALL_PREFIX=pybind11",
+ "-DBUILD_TESTING=OFF",
+ "-DPYBIND11_NOPYTHON=ON",
+ ]
+ cmake_opts = dict(cwd=DIR, stdout=sys.stdout, stderr=sys.stderr)
+ subprocess.check_call(cmd, **cmake_opts)
+ subprocess.check_call(["cmake", "--install", tmpdir], **cmake_opts)
+
+ txt = get_and_replace(setup_py, version=version, extra_cmd=extra_cmd)
+ code = compile(txt, setup_py, "exec")
+ exec(code, {"SDist": SDist})
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
-cmake_minimum_required(VERSION 2.8.12)
+cmake_minimum_required(VERSION 3.4)
-option(PYBIND11_WERROR "Report all warnings as errors" OFF)
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
-if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
- # We're being loaded directly, i.e. not via add_subdirectory, so make this
- # work as its own project and load the pybind11Config to get the tools we need
- project(pybind11_tests CXX)
+# Only needed for CMake < 3.5 support
+include(CMakeParseArguments)
+
+# Filter out items; print an optional message if any items filtered
+#
+# Usage:
+# pybind11_filter_tests(LISTNAME file1.cpp file2.cpp ... MESSAGE "")
+#
+macro(pybind11_filter_tests LISTNAME)
+ cmake_parse_arguments(ARG "" "MESSAGE" "" ${ARGN})
+ set(PYBIND11_FILTER_TESTS_FOUND OFF)
+ foreach(filename IN LISTS ARG_UNPARSED_ARGUMENTS)
+ list(FIND ${LISTNAME} ${filename} _FILE_FOUND)
+ if(_FILE_FOUND GREATER -1)
+ list(REMOVE_AT ${LISTNAME} ${_FILE_FOUND})
+ set(PYBIND11_FILTER_TESTS_FOUND ON)
+ endif()
+ endforeach()
+ if(PYBIND11_FILTER_TESTS_FOUND AND ARG_MESSAGE)
+ message(STATUS "${ARG_MESSAGE}")
+ endif()
+endmacro()
- find_package(pybind11 REQUIRED CONFIG)
+macro(possibly_uninitialized)
+ foreach(VARNAME ${ARGN})
+ if(NOT DEFINED "${VARNAME}")
+ set("${VARNAME}" "")
+ endif()
+ endforeach()
+endmacro()
+
+# New Python support
+if(DEFINED Python_EXECUTABLE)
+ set(PYTHON_EXECUTABLE "${Python_EXECUTABLE}")
+ set(PYTHON_VERSION "${Python_VERSION}")
+endif()
+
+# There's no harm in including a project in a project
+project(pybind11_tests CXX)
+
+# Access FindCatch and more
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/../tools")
+
+option(PYBIND11_WERROR "Report all warnings as errors" OFF)
+option(DOWNLOAD_EIGEN "Download EIGEN (requires CMake 3.11+)" OFF)
+option(PYBIND11_CUDA_TESTS "Enable building CUDA tests (requires CMake 3.12+)" OFF)
+set(PYBIND11_TEST_OVERRIDE
+ ""
+ CACHE STRING "Tests from ;-separated list of *.cpp files will be built instead of all tests")
+set(PYBIND11_TEST_FILTER
+ ""
+ CACHE STRING "Tests from ;-separated list of *.cpp files will be removed from all tests")
+
+if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
+ # We're being loaded directly, i.e. not via add_subdirectory, so make this
+ # work as its own project and load the pybind11Config to get the tools we need
+ find_package(pybind11 REQUIRED CONFIG)
endif()
-if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
+if(NOT CMAKE_BUILD_TYPE AND NOT DEFINED CMAKE_CONFIGURATION_TYPES)
message(STATUS "Setting tests build type to MinSizeRel as none was specified")
- set(CMAKE_BUILD_TYPE MinSizeRel CACHE STRING "Choose the type of build." FORCE)
- set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
- "MinSizeRel" "RelWithDebInfo")
+ set(CMAKE_BUILD_TYPE
+ MinSizeRel
+ CACHE STRING "Choose the type of build." FORCE)
+ set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel"
+ "RelWithDebInfo")
+endif()
+
+if(PYBIND11_CUDA_TESTS)
+ enable_language(CUDA)
+ if(DEFINED CMAKE_CXX_STANDARD)
+ set(CMAKE_CUDA_STANDARD ${CMAKE_CXX_STANDARD})
+ endif()
+ set(CMAKE_CUDA_STANDARD_REQUIRED ON)
endif()
# Full set of test files (you can override these; see below)
set(PYBIND11_TEST_FILES
- test_async.cpp
- test_buffers.cpp
- test_builtin_casters.cpp
- test_call_policies.cpp
- test_callbacks.cpp
- test_chrono.cpp
- test_class.cpp
- test_constants_and_functions.cpp
- test_copy_move.cpp
- test_docstring_options.cpp
- test_eigen.cpp
- test_enum.cpp
- test_eval.cpp
- test_exceptions.cpp
- test_factory_constructors.cpp
- test_gil_scoped.cpp
- test_iostream.cpp
- test_kwargs_and_defaults.cpp
- test_local_bindings.cpp
- test_methods_and_attributes.cpp
- test_modules.cpp
- test_multiple_inheritance.cpp
- test_numpy_array.cpp
- test_numpy_dtypes.cpp
- test_numpy_vectorize.cpp
- test_opaque_types.cpp
- test_operator_overloading.cpp
- test_pickling.cpp
- test_pytypes.cpp
- test_sequences_and_iterators.cpp
- test_smart_ptr.cpp
- test_stl.cpp
- test_stl_binders.cpp
- test_tagbased_polymorphic.cpp
- test_union.cpp
- test_virtual_functions.cpp
-)
+ test_async.cpp
+ test_buffers.cpp
+ test_builtin_casters.cpp
+ test_call_policies.cpp
+ test_callbacks.cpp
+ test_chrono.cpp
+ test_class.cpp
+ test_constants_and_functions.cpp
+ test_copy_move.cpp
+ test_custom_type_casters.cpp
+ test_docstring_options.cpp
+ test_eigen.cpp
+ test_enum.cpp
+ test_eval.cpp
+ test_exceptions.cpp
+ test_factory_constructors.cpp
+ test_gil_scoped.cpp
+ test_iostream.cpp
+ test_kwargs_and_defaults.cpp
+ test_local_bindings.cpp
+ test_methods_and_attributes.cpp
+ test_modules.cpp
+ test_multiple_inheritance.cpp
+ test_numpy_array.cpp
+ test_numpy_dtypes.cpp
+ test_numpy_vectorize.cpp
+ test_opaque_types.cpp
+ test_operator_overloading.cpp
+ test_pickling.cpp
+ test_pytypes.cpp
+ test_sequences_and_iterators.cpp
+ test_smart_ptr.cpp
+ test_stl.cpp
+ test_stl_binders.cpp
+ test_tagbased_polymorphic.cpp
+ test_union.cpp
+ test_virtual_functions.cpp)
# Invoking cmake with something like:
-# cmake -DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_picking.cpp" ..
+# cmake -DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_pickling.cpp" ..
# lets you override the tests that get compiled and run. You can restore to all tests with:
# cmake -DPYBIND11_TEST_OVERRIDE= ..
-if (PYBIND11_TEST_OVERRIDE)
+if(PYBIND11_TEST_OVERRIDE)
set(PYBIND11_TEST_FILES ${PYBIND11_TEST_OVERRIDE})
endif()
-# Skip test_async for Python < 3.5
-list(FIND PYBIND11_TEST_FILES test_async.cpp PYBIND11_TEST_FILES_ASYNC_I)
-if((PYBIND11_TEST_FILES_ASYNC_I GREATER -1) AND ("${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}" VERSION_LESS 3.5))
- message(STATUS "Skipping test_async because Python version ${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} < 3.5")
- list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_ASYNC_I})
+# You can also filter tests:
+if(PYBIND11_TEST_FILTER)
+ pybind11_filter_tests(PYBIND11_TEST_FILES ${PYBIND11_TEST_FILTER})
+endif()
+
+if(PYTHON_VERSION VERSION_LESS 3.5)
+ pybind11_filter_tests(PYBIND11_TEST_FILES test_async.cpp MESSAGE
+ "Skipping test_async on Python 2")
+endif()
+
+# Skip tests for CUDA check:
+# /pybind11/tests/test_constants_and_functions.cpp(125):
+# error: incompatible exception specifications
+if(PYBIND11_CUDA_TESTS)
+ pybind11_filter_tests(
+ PYBIND11_TEST_FILES test_constants_and_functions.cpp MESSAGE
+ "Skipping test_constants_and_functions due to incompatible exception specifications")
endif()
string(REPLACE ".cpp" ".py" PYBIND11_PYTEST_FILES "${PYBIND11_TEST_FILES}")
# Contains the set of test files that require pybind11_cross_module_tests to be
# built; if none of these are built (i.e. because TEST_OVERRIDE is used and
# doesn't include them) the second module doesn't get built.
-set(PYBIND11_CROSS_MODULE_TESTS
- test_exceptions.py
- test_local_bindings.py
- test_stl.py
- test_stl_binders.py
-)
+set(PYBIND11_CROSS_MODULE_TESTS test_exceptions.py test_local_bindings.py test_stl.py
+ test_stl_binders.py)
-set(PYBIND11_CROSS_MODULE_GIL_TESTS
- test_gil_scoped.py
-)
+set(PYBIND11_CROSS_MODULE_GIL_TESTS test_gil_scoped.py)
# Check if Eigen is available; if not, remove from PYBIND11_TEST_FILES (but
# keep it in PYBIND11_PYTEST_FILES, so that we get the "eigen is not installed"
# Try loading via newer Eigen's Eigen3Config first (bypassing tools/FindEigen3.cmake).
# Eigen 3.3.1+ exports a cmake 3.0+ target for handling dependency requirements, but also
# produces a fatal error if loaded from a pre-3.0 cmake.
- if (NOT CMAKE_VERSION VERSION_LESS 3.0)
+ if(DOWNLOAD_EIGEN)
+ if(CMAKE_VERSION VERSION_LESS 3.11)
+ message(FATAL_ERROR "CMake 3.11+ required when using DOWNLOAD_EIGEN")
+ endif()
+
+ set(EIGEN3_VERSION_STRING "3.3.8")
+
+ include(FetchContent)
+ FetchContent_Declare(
+ eigen
+ GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git
+ GIT_TAG ${EIGEN3_VERSION_STRING})
+
+ FetchContent_GetProperties(eigen)
+ if(NOT eigen_POPULATED)
+ message(STATUS "Downloading Eigen")
+ FetchContent_Populate(eigen)
+ endif()
+
+ set(EIGEN3_INCLUDE_DIR ${eigen_SOURCE_DIR})
+ set(EIGEN3_FOUND TRUE)
+
+ else()
find_package(Eigen3 3.2.7 QUIET CONFIG)
- if (EIGEN3_FOUND)
- if (EIGEN3_VERSION_STRING AND NOT EIGEN3_VERSION_STRING VERSION_LESS 3.3.1)
- set(PYBIND11_EIGEN_VIA_TARGET 1)
- endif()
+
+ if(NOT EIGEN3_FOUND)
+ # Couldn't load via target, so fall back to allowing module mode finding, which will pick up
+ # tools/FindEigen3.cmake
+ find_package(Eigen3 3.2.7 QUIET)
endif()
endif()
- if (NOT EIGEN3_FOUND)
- # Couldn't load via target, so fall back to allowing module mode finding, which will pick up
- # tools/FindEigen3.cmake
- find_package(Eigen3 3.2.7 QUIET)
- endif()
if(EIGEN3_FOUND)
+ if(NOT TARGET Eigen3::Eigen)
+ add_library(Eigen3::Eigen IMPORTED INTERFACE)
+ set_property(TARGET Eigen3::Eigen PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+ "${EIGEN3_INCLUDE_DIR}")
+ endif()
+
# Eigen 3.3.1+ cmake sets EIGEN3_VERSION_STRING (and hard codes the version when installed
# rather than looking it up in the cmake script); older versions, and the
# tools/FindEigen3.cmake, set EIGEN3_VERSION instead.
message(STATUS "Building tests with Eigen v${EIGEN3_VERSION}")
else()
list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})
- message(STATUS "Building tests WITHOUT Eigen")
+ message(STATUS "Building tests WITHOUT Eigen, use -DDOWNLOAD_EIGEN on CMake 3.11+ to download")
endif()
endif()
# Optional dependency for some tests (boost::variant is only supported with version >= 1.56)
find_package(Boost 1.56)
+if(Boost_FOUND)
+ if(NOT TARGET Boost::headers)
+ add_library(Boost::headers IMPORTED INTERFACE)
+ if(TARGET Boost::boost)
+ # Classic FindBoost
+ set_property(TARGET Boost::boost PROPERTY INTERFACE_LINK_LIBRARIES Boost::boost)
+ else()
+ # Very old FindBoost, or newer Boost than CMake in older CMakes
+ set_property(TARGET Boost::headers PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+ ${Boost_INCLUDE_DIRS})
+ endif()
+ endif()
+endif()
+
# Compile with compiler warnings turned on
function(pybind11_enable_warnings target_name)
if(MSVC)
target_compile_options(${target_name} PRIVATE /W4)
- elseif(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Intel|Clang)")
- target_compile_options(${target_name} PRIVATE -Wall -Wextra -Wconversion -Wcast-qual -Wdeprecated)
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Intel|Clang)" AND NOT PYBIND11_CUDA_TESTS)
+ target_compile_options(
+ ${target_name}
+ PRIVATE -Wall
+ -Wextra
+ -Wconversion
+ -Wcast-qual
+ -Wdeprecated
+ -Wundef
+ -Wnon-virtual-dtor)
endif()
if(PYBIND11_WERROR)
if(MSVC)
target_compile_options(${target_name} PRIVATE /WX)
+ elseif(PYBIND11_CUDA_TESTS)
+ target_compile_options(${target_name} PRIVATE "SHELL:-Werror all-warnings")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Intel|Clang)")
target_compile_options(${target_name} PRIVATE -Werror)
endif()
endif()
+
+ # Needs to be readded since the ordering requires these to be after the ones above
+ if(CMAKE_CXX_STANDARD
+ AND CMAKE_CXX_COMPILER_ID MATCHES "Clang"
+ AND PYTHON_VERSION VERSION_LESS 3.0)
+ if(CMAKE_CXX_STANDARD LESS 17)
+ target_compile_options(${target_name} PUBLIC -Wno-deprecated-register)
+ else()
+ target_compile_options(${target_name} PUBLIC -Wno-register)
+ endif()
+ endif()
endfunction()
set(test_targets pybind11_tests)
# Build pybind11_cross_module_tests if any test_whatever.py are being built that require it
foreach(t ${PYBIND11_CROSS_MODULE_TESTS})
list(FIND PYBIND11_PYTEST_FILES ${t} i)
- if (i GREATER -1)
+ if(i GREATER -1)
list(APPEND test_targets pybind11_cross_module_tests)
break()
endif()
foreach(t ${PYBIND11_CROSS_MODULE_GIL_TESTS})
list(FIND PYBIND11_PYTEST_FILES ${t} i)
- if (i GREATER -1)
+ if(i GREATER -1)
list(APPEND test_targets cross_module_gil_utils)
break()
endif()
endforeach()
-set(testdir ${CMAKE_CURRENT_SOURCE_DIR})
+# Support CUDA testing by forcing the target file to compile with NVCC
+if(PYBIND11_CUDA_TESTS)
+ set_property(SOURCE ${PYBIND11_TEST_FILES} PROPERTY LANGUAGE CUDA)
+endif()
+
foreach(target ${test_targets})
set(test_files ${PYBIND11_TEST_FILES})
- if(NOT target STREQUAL "pybind11_tests")
+ if(NOT "${target}" STREQUAL "pybind11_tests")
set(test_files "")
endif()
+ # Support CUDA testing by forcing the target file to compile with NVCC
+ if(PYBIND11_CUDA_TESTS)
+ set_property(SOURCE ${target}.cpp PROPERTY LANGUAGE CUDA)
+ endif()
+
# Create the binding library
pybind11_add_module(${target} THIN_LTO ${target}.cpp ${test_files} ${PYBIND11_HEADERS})
pybind11_enable_warnings(${target})
+ if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
+ get_property(
+ suffix
+ TARGET ${target}
+ PROPERTY SUFFIX)
+ set(source_output "${CMAKE_CURRENT_SOURCE_DIR}/${target}${suffix}")
+ if(suffix AND EXISTS "${source_output}")
+ message(WARNING "Output file also in source directory; "
+ "please remove to avoid confusion: ${source_output}")
+ endif()
+ endif()
+
if(MSVC)
target_compile_options(${target} PRIVATE /utf-8)
endif()
if(EIGEN3_FOUND)
- if (PYBIND11_EIGEN_VIA_TARGET)
- target_link_libraries(${target} PRIVATE Eigen3::Eigen)
- else()
- target_include_directories(${target} PRIVATE ${EIGEN3_INCLUDE_DIR})
- endif()
+ target_link_libraries(${target} PRIVATE Eigen3::Eigen)
target_compile_definitions(${target} PRIVATE -DPYBIND11_TEST_EIGEN)
endif()
if(Boost_FOUND)
- target_include_directories(${target} PRIVATE ${Boost_INCLUDE_DIRS})
+ target_link_libraries(${target} PRIVATE Boost::headers)
target_compile_definitions(${target} PRIVATE -DPYBIND11_TEST_BOOST)
endif()
# Always write the output file directly into the 'tests' directory (even on MSVC)
if(NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY)
- set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${testdir})
- foreach(config ${CMAKE_CONFIGURATION_TYPES})
- string(TOUPPER ${config} config)
- set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY_${config} ${testdir})
- endforeach()
+ set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY
+ "${CMAKE_CURRENT_BINARY_DIR}")
+
+ if(DEFINED CMAKE_CONFIGURATION_TYPES)
+ foreach(config ${CMAKE_CONFIGURATION_TYPES})
+ string(TOUPPER ${config} config)
+ set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY_${config}
+ "${CMAKE_CURRENT_BINARY_DIR}")
+ endforeach()
+ endif()
endif()
endforeach()
-# Make sure pytest is found or produce a fatal error
-if(NOT PYBIND11_PYTEST_FOUND)
- execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import pytest; print(pytest.__version__)"
- RESULT_VARIABLE pytest_not_found OUTPUT_VARIABLE pytest_version ERROR_QUIET)
- if(pytest_not_found)
- message(FATAL_ERROR "Running the tests requires pytest. Please install it manually"
- " (try: ${PYTHON_EXECUTABLE} -m pip install pytest)")
- elseif(pytest_version VERSION_LESS 3.0)
- message(FATAL_ERROR "Running the tests requires pytest >= 3.0. Found: ${pytest_version}"
- "Please update it (try: ${PYTHON_EXECUTABLE} -m pip install -U pytest)")
- endif()
- set(PYBIND11_PYTEST_FOUND TRUE CACHE INTERNAL "")
-endif()
+# Make sure pytest is found or produce a warning
+pybind11_find_import(pytest VERSION 3.1)
+
+if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
+ # This is not used later in the build, so it's okay to regenerate each time.
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/pytest.ini" "${CMAKE_CURRENT_BINARY_DIR}/pytest.ini"
+ COPYONLY)
+ file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/pytest.ini"
+ "\ntestpaths = \"${CMAKE_CURRENT_SOURCE_DIR}\"")
-if(CMAKE_VERSION VERSION_LESS 3.2)
- set(PYBIND11_USES_TERMINAL "")
-else()
- set(PYBIND11_USES_TERMINAL "USES_TERMINAL")
endif()
+# cmake 3.12 added list(transform <list> prepend
+# but we can't use it yet
+string(REPLACE "test_" "${CMAKE_CURRENT_SOURCE_DIR}/test_" PYBIND11_ABS_PYTEST_FILES
+ "${PYBIND11_PYTEST_FILES}")
+
+set(PYBIND11_TEST_PREFIX_COMMAND
+ ""
+ CACHE STRING "Put this before pytest, use for checkers and such")
+
# A single command to compile and run the tests
-add_custom_target(pytest COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYBIND11_PYTEST_FILES}
- DEPENDS ${test_targets} WORKING_DIRECTORY ${testdir} ${PYBIND11_USES_TERMINAL})
+add_custom_target(
+ pytest
+ COMMAND ${PYBIND11_TEST_PREFIX_COMMAND} ${PYTHON_EXECUTABLE} -m pytest
+ ${PYBIND11_ABS_PYTEST_FILES}
+ DEPENDS ${test_targets}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
+ USES_TERMINAL)
if(PYBIND11_TEST_OVERRIDE)
- add_custom_command(TARGET pytest POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E echo "Note: not all tests run: -DPYBIND11_TEST_OVERRIDE is in effect")
+ add_custom_command(
+ TARGET pytest
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E echo
+ "Note: not all tests run: -DPYBIND11_TEST_OVERRIDE is in effect")
endif()
+# cmake-format: off
+add_custom_target(
+ memcheck
+ COMMAND
+ PYTHONMALLOC=malloc
+ valgrind
+ --leak-check=full
+ --show-leak-kinds=definite,indirect
+ --errors-for-leak-kinds=definite,indirect
+ --error-exitcode=1
+ --read-var-info=yes
+ --track-origins=yes
+ --suppressions="${CMAKE_CURRENT_SOURCE_DIR}/valgrind-python.supp"
+ --suppressions="${CMAKE_CURRENT_SOURCE_DIR}/valgrind-numpy-scipy.supp"
+ --gen-suppressions=all
+ ${PYTHON_EXECUTABLE} -m pytest ${PYBIND11_ABS_PYTEST_FILES}
+ DEPENDS ${test_targets}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
+ USES_TERMINAL)
+# cmake-format: on
+
# Add a check target to run all the tests, starting with pytest (we add dependencies to this below)
add_custom_target(check DEPENDS pytest)
# The remaining tests only apply when being built as part of the pybind11 project, but not if the
# tests are being built independently.
-if (NOT PROJECT_NAME STREQUAL "pybind11")
+if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
return()
endif()
# Add a post-build comment to show the primary test suite .so size and, if a previous size, compare it:
-add_custom_command(TARGET pybind11_tests POST_BUILD
- COMMAND ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/tools/libsize.py
- $<TARGET_FILE:pybind11_tests> ${CMAKE_CURRENT_BINARY_DIR}/sosize-$<TARGET_FILE_NAME:pybind11_tests>.txt)
-
-# Test embedding the interpreter. Provides the `cpptest` target.
-add_subdirectory(test_embed)
-
-# Test CMake build using functions and targets from subdirectory or installed location
-add_subdirectory(test_cmake_build)
+add_custom_command(
+ TARGET pybind11_tests
+ POST_BUILD
+ COMMAND
+ ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/../tools/libsize.py
+ $<TARGET_FILE:pybind11_tests>
+ ${CMAKE_CURRENT_BINARY_DIR}/sosize-$<TARGET_FILE_NAME:pybind11_tests>.txt)
+
+if(NOT PYBIND11_CUDA_TESTS)
+ # Test embedding the interpreter. Provides the `cpptest` target.
+ add_subdirectory(test_embed)
+
+ # Test CMake build using functions and targets from subdirectory or installed location
+ add_subdirectory(test_cmake_build)
+endif()
+# -*- coding: utf-8 -*-
"""pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
"""
-import pytest
-import textwrap
-import difflib
-import re
-import sys
import contextlib
-import platform
+import difflib
import gc
+import re
+import textwrap
+
+import pytest
-_unicode_marker = re.compile(r'u(\'[^\']*\')')
-_long_marker = re.compile(r'([0-9])L')
-_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
+import env
-# test_async.py requires support for async and await
+# Early diagnostic for failed imports
+import pybind11_tests # noqa: F401
+
+_unicode_marker = re.compile(r"u(\'[^\']*\')")
+_long_marker = re.compile(r"([0-9])L")
+_hexadecimal = re.compile(r"0x[0-9a-fA-F]+")
+
+# Avoid collecting Python3 only files
collect_ignore = []
-if sys.version_info[:2] < (3, 5):
+if env.PY2:
collect_ignore.append("test_async.py")
def _strip_and_dedent(s):
"""For triple-quote strings"""
- return textwrap.dedent(s.lstrip('\n').rstrip())
+ return textwrap.dedent(s.lstrip("\n").rstrip())
def _split_and_sort(s):
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
- return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
+ return ["--- actual / +++ expected"] + [
+ line.strip("\n") for line in difflib.ndiff(a, b)
+ ]
class Output(object):
"""Basic output post-processing and comparison"""
+
def __init__(self, string):
self.string = string
self.explanation = []
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
- a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
+ a = [
+ line
+ for line in self.string.strip().splitlines()
+ if not line.startswith("###")
+ ]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
+
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
- if hasattr(left, 'explanation'):
+ if hasattr(left, "explanation"):
return left.explanation
def gc_collect():
- ''' Run the garbage collector twice (needed when running
- reference counting tests with PyPy) '''
+ """Run the garbage collector twice (needed when running
+ reference counting tests with PyPy)"""
gc.collect()
gc.collect()
def pytest_configure():
- """Add import suppression and test requirements to `pytest` namespace"""
- try:
- import numpy as np
- except ImportError:
- np = None
- try:
- import scipy
- except ImportError:
- scipy = None
- try:
- from pybind11_tests.eigen import have_eigen
- except ImportError:
- have_eigen = False
- pypy = platform.python_implementation() == "PyPy"
-
- skipif = pytest.mark.skipif
pytest.suppress = suppress
- pytest.requires_numpy = skipif(not np, reason="numpy is not installed")
- pytest.requires_scipy = skipif(not np, reason="scipy is not installed")
- pytest.requires_eigen_and_numpy = skipif(not have_eigen or not np,
- reason="eigen and/or numpy are not installed")
- pytest.requires_eigen_and_scipy = skipif(
- not have_eigen or not scipy, reason="eigen and/or scipy are not installed")
- pytest.unsupported_on_pypy = skipif(pypy, reason="unsupported on PyPy")
- pytest.unsupported_on_py2 = skipif(sys.version_info.major < 3,
- reason="unsupported on Python 2.x")
pytest.gc_collect = gc_collect
-
-
-def _test_import_pybind11():
- """Early diagnostic for test module initialization errors
-
- When there is an error during initialization, the first import will report the
- real error while all subsequent imports will report nonsense. This import test
- is done early (in the pytest configuration file, before any tests) in order to
- avoid the noise of having all tests fail with identical error messages.
-
- Any possible exception is caught here and reported manually *without* the stack
- trace. This further reduces noise since the trace would only show pytest internals
- which are not useful for debugging pybind11 module issues.
- """
- # noinspection PyBroadException
- try:
- import pybind11_tests # noqa: F401 imported but unused
- except Exception as e:
- print("Failed to import pybind11_tests from pytest:")
- print(" {}: {}".format(type(e).__name__, e))
- sys.exit(1)
-
-
-_test_import_pybind11()
throw py::error_already_set();
Py_DECREF(result);
#else
- py::module::import("gc").attr("collect")();
+ py::module_::import("gc").attr("collect")();
#endif
}
}
}
}
- catch (const std::out_of_range &) {}
+ catch (const std::out_of_range&) {}
if (!t1) throw std::runtime_error("Unknown class passed to ConstructorStats::get()");
auto &cs1 = get(*t1);
// If we have both a t1 and t2 match, one is probably the trampoline class; return whichever
print_constr_details(inst, ":", values...);
track_values(inst, values...);
}
-
--- /dev/null
+# -*- coding: utf-8 -*-
+import platform
+import sys
+
+LINUX = sys.platform.startswith("linux")
+MACOS = sys.platform.startswith("darwin")
+WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin")
+
+CPYTHON = platform.python_implementation() == "CPython"
+PYPY = platform.python_implementation() == "PyPy"
+
+PY2 = sys.version_info.major == 2
+
+PY = sys.version_info
--- /dev/null
+# -*- coding: utf-8 -*-
+import contextlib
+import os
+import string
+import subprocess
+import sys
+import tarfile
+import zipfile
+
+# These tests must be run explicitly
+# They require CMake 3.15+ (--install)
+
+DIR = os.path.abspath(os.path.dirname(__file__))
+MAIN_DIR = os.path.dirname(os.path.dirname(DIR))
+
+
+main_headers = {
+ "include/pybind11/attr.h",
+ "include/pybind11/buffer_info.h",
+ "include/pybind11/cast.h",
+ "include/pybind11/chrono.h",
+ "include/pybind11/common.h",
+ "include/pybind11/complex.h",
+ "include/pybind11/eigen.h",
+ "include/pybind11/embed.h",
+ "include/pybind11/eval.h",
+ "include/pybind11/functional.h",
+ "include/pybind11/iostream.h",
+ "include/pybind11/numpy.h",
+ "include/pybind11/operators.h",
+ "include/pybind11/options.h",
+ "include/pybind11/pybind11.h",
+ "include/pybind11/pytypes.h",
+ "include/pybind11/stl.h",
+ "include/pybind11/stl_bind.h",
+}
+
+detail_headers = {
+ "include/pybind11/detail/class.h",
+ "include/pybind11/detail/common.h",
+ "include/pybind11/detail/descr.h",
+ "include/pybind11/detail/init.h",
+ "include/pybind11/detail/internals.h",
+ "include/pybind11/detail/typeid.h",
+}
+
+cmake_files = {
+ "share/cmake/pybind11/FindPythonLibsNew.cmake",
+ "share/cmake/pybind11/pybind11Common.cmake",
+ "share/cmake/pybind11/pybind11Config.cmake",
+ "share/cmake/pybind11/pybind11ConfigVersion.cmake",
+ "share/cmake/pybind11/pybind11NewTools.cmake",
+ "share/cmake/pybind11/pybind11Targets.cmake",
+ "share/cmake/pybind11/pybind11Tools.cmake",
+}
+
+py_files = {
+ "__init__.py",
+ "__main__.py",
+ "_version.py",
+ "_version.pyi",
+ "commands.py",
+ "py.typed",
+ "setup_helpers.py",
+ "setup_helpers.pyi",
+}
+
+headers = main_headers | detail_headers
+src_files = headers | cmake_files
+all_files = src_files | py_files
+
+
+sdist_files = {
+ "pybind11",
+ "pybind11/include",
+ "pybind11/include/pybind11",
+ "pybind11/include/pybind11/detail",
+ "pybind11/share",
+ "pybind11/share/cmake",
+ "pybind11/share/cmake/pybind11",
+ "pyproject.toml",
+ "setup.cfg",
+ "setup.py",
+ "LICENSE",
+ "MANIFEST.in",
+ "README.rst",
+ "PKG-INFO",
+}
+
+local_sdist_files = {
+ ".egg-info",
+ ".egg-info/PKG-INFO",
+ ".egg-info/SOURCES.txt",
+ ".egg-info/dependency_links.txt",
+ ".egg-info/not-zip-safe",
+ ".egg-info/top_level.txt",
+}
+
+
+def test_build_sdist(monkeypatch, tmpdir):
+
+ monkeypatch.chdir(MAIN_DIR)
+
+ out = subprocess.check_output(
+ [
+ sys.executable,
+ "setup.py",
+ "sdist",
+ "--formats=tar",
+ "--dist-dir",
+ str(tmpdir),
+ ]
+ )
+ if hasattr(out, "decode"):
+ out = out.decode()
+
+ (sdist,) = tmpdir.visit("*.tar")
+
+ with tarfile.open(str(sdist)) as tar:
+ start = tar.getnames()[0] + "/"
+ version = start[9:-1]
+ simpler = set(n.split("/", 1)[-1] for n in tar.getnames()[1:])
+
+ with contextlib.closing(
+ tar.extractfile(tar.getmember(start + "setup.py"))
+ ) as f:
+ setup_py = f.read()
+
+ with contextlib.closing(
+ tar.extractfile(tar.getmember(start + "pyproject.toml"))
+ ) as f:
+ pyproject_toml = f.read()
+
+ files = set("pybind11/{}".format(n) for n in all_files)
+ files |= sdist_files
+ files |= set("pybind11{}".format(n) for n in local_sdist_files)
+ files.add("pybind11.egg-info/entry_points.txt")
+ files.add("pybind11.egg-info/requires.txt")
+ assert simpler == files
+
+ with open(os.path.join(MAIN_DIR, "tools", "setup_main.py.in"), "rb") as f:
+ contents = (
+ string.Template(f.read().decode())
+ .substitute(version=version, extra_cmd="")
+ .encode()
+ )
+ assert setup_py == contents
+
+ with open(os.path.join(MAIN_DIR, "tools", "pyproject.toml"), "rb") as f:
+ contents = f.read()
+ assert pyproject_toml == contents
+
+
+def test_build_global_dist(monkeypatch, tmpdir):
+
+ monkeypatch.chdir(MAIN_DIR)
+ monkeypatch.setenv("PYBIND11_GLOBAL_SDIST", "1")
+
+ out = subprocess.check_output(
+ [
+ sys.executable,
+ "setup.py",
+ "sdist",
+ "--formats=tar",
+ "--dist-dir",
+ str(tmpdir),
+ ]
+ )
+ if hasattr(out, "decode"):
+ out = out.decode()
+
+ (sdist,) = tmpdir.visit("*.tar")
+
+ with tarfile.open(str(sdist)) as tar:
+ start = tar.getnames()[0] + "/"
+ version = start[16:-1]
+ simpler = set(n.split("/", 1)[-1] for n in tar.getnames()[1:])
+
+ with contextlib.closing(
+ tar.extractfile(tar.getmember(start + "setup.py"))
+ ) as f:
+ setup_py = f.read()
+
+ with contextlib.closing(
+ tar.extractfile(tar.getmember(start + "pyproject.toml"))
+ ) as f:
+ pyproject_toml = f.read()
+
+ files = set("pybind11/{}".format(n) for n in all_files)
+ files |= sdist_files
+ files |= set("pybind11_global{}".format(n) for n in local_sdist_files)
+ assert simpler == files
+
+ with open(os.path.join(MAIN_DIR, "tools", "setup_global.py.in"), "rb") as f:
+ contents = (
+ string.Template(f.read().decode())
+ .substitute(version=version, extra_cmd="")
+ .encode()
+ )
+ assert setup_py == contents
+
+ with open(os.path.join(MAIN_DIR, "tools", "pyproject.toml"), "rb") as f:
+ contents = f.read()
+ assert pyproject_toml == contents
+
+
+def tests_build_wheel(monkeypatch, tmpdir):
+ monkeypatch.chdir(MAIN_DIR)
+
+ subprocess.check_output(
+ [sys.executable, "-m", "pip", "wheel", ".", "-w", str(tmpdir)]
+ )
+
+ (wheel,) = tmpdir.visit("*.whl")
+
+ files = set("pybind11/{}".format(n) for n in all_files)
+ files |= {
+ "dist-info/LICENSE",
+ "dist-info/METADATA",
+ "dist-info/RECORD",
+ "dist-info/WHEEL",
+ "dist-info/entry_points.txt",
+ "dist-info/top_level.txt",
+ }
+
+ with zipfile.ZipFile(str(wheel)) as z:
+ names = z.namelist()
+
+ trimmed = set(n for n in names if "dist-info" not in n)
+ trimmed |= set(
+ "dist-info/{}".format(n.split("/", 1)[-1]) for n in names if "dist-info" in n
+ )
+ assert files == trimmed
+
+
+def tests_build_global_wheel(monkeypatch, tmpdir):
+ monkeypatch.chdir(MAIN_DIR)
+ monkeypatch.setenv("PYBIND11_GLOBAL_SDIST", "1")
+
+ subprocess.check_output(
+ [sys.executable, "-m", "pip", "wheel", ".", "-w", str(tmpdir)]
+ )
+
+ (wheel,) = tmpdir.visit("*.whl")
+
+ files = set("data/data/{}".format(n) for n in src_files)
+ files |= set("data/headers/{}".format(n[8:]) for n in headers)
+ files |= {
+ "dist-info/LICENSE",
+ "dist-info/METADATA",
+ "dist-info/WHEEL",
+ "dist-info/top_level.txt",
+ "dist-info/RECORD",
+ }
+
+ with zipfile.ZipFile(str(wheel)) as z:
+ names = z.namelist()
+
+ beginning = names[0].split("/", 1)[0].rsplit(".", 1)[0]
+ trimmed = set(n[len(beginning) + 1 :] for n in names)
+
+ assert files == trimmed
--- /dev/null
+# -*- coding: utf-8 -*-
+import os
+import sys
+import subprocess
+from textwrap import dedent
+
+import pytest
+
+DIR = os.path.abspath(os.path.dirname(__file__))
+MAIN_DIR = os.path.dirname(os.path.dirname(DIR))
+
+
+@pytest.mark.parametrize("parallel", [False, True])
+@pytest.mark.parametrize("std", [11, 0])
+def test_simple_setup_py(monkeypatch, tmpdir, parallel, std):
+ monkeypatch.chdir(tmpdir)
+ monkeypatch.syspath_prepend(MAIN_DIR)
+
+ (tmpdir / "setup.py").write_text(
+ dedent(
+ u"""\
+ import sys
+ sys.path.append({MAIN_DIR!r})
+
+ from setuptools import setup, Extension
+ from pybind11.setup_helpers import build_ext, Pybind11Extension
+
+ std = {std}
+
+ ext_modules = [
+ Pybind11Extension(
+ "simple_setup",
+ sorted(["main.cpp"]),
+ cxx_std=std,
+ ),
+ ]
+
+ cmdclass = dict()
+ if std == 0:
+ cmdclass["build_ext"] = build_ext
+
+
+ parallel = {parallel}
+ if parallel:
+ from pybind11.setup_helpers import ParallelCompile
+ ParallelCompile().install()
+
+ setup(
+ name="simple_setup_package",
+ cmdclass=cmdclass,
+ ext_modules=ext_modules,
+ )
+ """
+ ).format(MAIN_DIR=MAIN_DIR, std=std, parallel=parallel),
+ encoding="ascii",
+ )
+
+ (tmpdir / "main.cpp").write_text(
+ dedent(
+ u"""\
+ #include <pybind11/pybind11.h>
+
+ int f(int x) {
+ return x * 3;
+ }
+ PYBIND11_MODULE(simple_setup, m) {
+ m.def("f", &f);
+ }
+ """
+ ),
+ encoding="ascii",
+ )
+
+ subprocess.check_call(
+ [sys.executable, "setup.py", "build_ext", "--inplace"],
+ stdout=sys.stdout,
+ stderr=sys.stderr,
+ )
+
+ # Debug helper printout, normally hidden
+ for item in tmpdir.listdir():
+ print(item.basename)
+
+ assert (
+ len([f for f in tmpdir.listdir() if f.basename.startswith("simple_setup")]) == 1
+ )
+ assert len(list(tmpdir.listdir())) == 4 # two files + output + build_dir
+
+ (tmpdir / "test.py").write_text(
+ dedent(
+ u"""\
+ import simple_setup
+ assert simple_setup.f(3) == 9
+ """
+ ),
+ encoding="ascii",
+ )
+
+ subprocess.check_call(
+ [sys.executable, "test.py"], stdout=sys.stdout, stderr=sys.stderr
+ )
std::string name_;
const std::string &name() { return name_; }
};
-}
+} // namespace pets
struct MixGL { int i; MixGL(int i) : i{i} {} };
struct MixGL2 { int i; MixGL2(int i) : i{i} {} };
Instead, see the "How can I reduce the build time?" question in the "Frequently asked questions"
section of the documentation for good practice on splitting binding code over multiple files.
*/
-std::list<std::function<void(py::module &)>> &initializers() {
- static std::list<std::function<void(py::module &)>> inits;
+std::list<std::function<void(py::module_ &)>> &initializers() {
+ static std::list<std::function<void(py::module_ &)>> inits;
return inits;
}
test_initializer::test_initializer(Initializer init) {
- initializers().push_back(init);
+ initializers().emplace_back(init);
}
test_initializer::test_initializer(const char *submodule_name, Initializer init) {
- initializers().push_back([=](py::module &parent) {
+ initializers().emplace_back([=](py::module_ &parent) {
auto m = parent.def_submodule(submodule_name);
init(m);
});
}
-void bind_ConstructorStats(py::module &m) {
+void bind_ConstructorStats(py::module_ &m) {
py::class_<ConstructorStats>(m, "ConstructorStats")
.def("alive", &ConstructorStats::alive)
.def("values", &ConstructorStats::values)
for (const auto &initializer : initializers())
initializer(m);
-
- if (!py::hasattr(m, "have_eigen")) m.attr("have_eigen") = false;
}
#pragma once
#include <pybind11/pybind11.h>
+#include <pybind11/eval.h>
#if defined(_MSC_VER) && _MSC_VER < 1910
// We get some really long type names here which causes MSVC 2015 to emit warnings
using namespace pybind11::literals;
class test_initializer {
- using Initializer = void (*)(py::module &);
+ using Initializer = void (*)(py::module_ &);
public:
test_initializer(Initializer init);
};
#define TEST_SUBMODULE(name, variable) \
- void test_submodule_##name(py::module &); \
+ void test_submodule_##name(py::module_ &); \
test_initializer name(#name, test_submodule_##name); \
- void test_submodule_##name(py::module &variable)
+ void test_submodule_##name(py::module_ &variable)
/// Dummy type which is not exported anywhere -- something to trigger a conversion error
IncType &operator=(IncType &&) = delete;
};
+/// A simple union for basic testing
+union IntFloat {
+ int i;
+ float f;
+};
+
/// Custom cast-only type that casts to a string "rvalue" or "lvalue" depending on the cast context.
/// Used to test recursive casters (e.g. std::tuple, stl containers).
struct RValueCaster {};
-NAMESPACE_BEGIN(pybind11)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(pybind11)
+PYBIND11_NAMESPACE_BEGIN(detail)
template<> class type_caster<RValueCaster> {
public:
PYBIND11_TYPE_CASTER(RValueCaster, _("RValueCaster"));
static handle cast(RValueCaster &&, return_value_policy, handle) { return py::str("rvalue").release(); }
static handle cast(const RValueCaster &, return_value_policy, handle) { return py::str("lvalue").release(); }
};
-NAMESPACE_END(detail)
-NAMESPACE_END(pybind11)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(pybind11)
+
+template <typename F>
+void ignoreOldStyleInitWarnings(F &&body) {
+ py::exec(R"(
+ message = "pybind11-bound class '.+' is using an old-style placement-new '(?:__init__|__setstate__)' which has been deprecated"
+
+ import warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message=message, category=FutureWarning)
+ body()
+ )", py::dict(py::arg("body") = py::cpp_function(body)));
+}
[pytest]
-minversion = 3.0
-norecursedirs = test_cmake_build test_embed
+minversion = 3.1
+norecursedirs = test_* extra_*
+xfail_strict = True
addopts =
# show summary of skipped tests
-rs
# capture only Python print and C++ py::print, but not C output (low-level Python errors)
--capture=sys
+ # enable all warnings
+ -Wa
filterwarnings =
# make warnings into errors but ignore certain third-party extension issues
error
--- /dev/null
+--extra-index-url https://antocuni.github.io/pypy-wheels/manylinux2010/
+numpy==1.16.6; python_version<"3.6" and sys_platform!="win32"
+numpy==1.18.0; platform_python_implementation=="PyPy" and sys_platform=="darwin" and python_version>="3.6"
+numpy==1.19.3; (platform_python_implementation!="PyPy" or sys_platform=="linux") and python_version>="3.6" and python_version<"3.10"
+pytest==4.6.9; python_version<"3.5"
+pytest==6.1.2; python_version=="3.5"
+pytest==6.2.1; python_version>="3.6"
+pytest-timeout
+scipy==1.2.3; (platform_python_implementation!="PyPy" or sys_platform=="linux") and python_version<"3.6"
+scipy==1.5.4; (platform_python_implementation!="PyPy" or sys_platform=="linux") and python_version>="3.6" and python_version<"3.10"
.def(py::init<>())
.def("__await__", [](const SupportsAsync& self) -> py::object {
static_cast<void>(self);
- py::object loop = py::module::import("asyncio.events").attr("get_event_loop")();
+ py::object loop = py::module_::import("asyncio.events").attr("get_event_loop")();
py::object f = loop.attr("create_future")();
f.attr("set_result")(5);
return f.attr("__await__")();
-import asyncio
+# -*- coding: utf-8 -*-
import pytest
-from pybind11_tests import async_module as m
+
+asyncio = pytest.importorskip("asyncio")
+m = pytest.importorskip("pybind11_tests.async_module")
@pytest.fixture
#include "pybind11_tests.h"
#include "constructor_stats.h"
+#include <pybind11/stl.h>
TEST_SUBMODULE(buffers, m) {
// test_from_python / test_to_python:
class Matrix {
public:
- Matrix(ssize_t rows, ssize_t cols) : m_rows(rows), m_cols(cols) {
+ Matrix(py::ssize_t rows, py::ssize_t cols) : m_rows(rows), m_cols(cols) {
print_created(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix");
m_data = new float[(size_t) (rows*cols)];
memset(m_data, 0, sizeof(float) * (size_t) (rows * cols));
return *this;
}
- float operator()(ssize_t i, ssize_t j) const {
+ float operator()(py::ssize_t i, py::ssize_t j) const {
return m_data[(size_t) (i*m_cols + j)];
}
- float &operator()(ssize_t i, ssize_t j) {
+ float &operator()(py::ssize_t i, py::ssize_t j) {
return m_data[(size_t) (i*m_cols + j)];
}
float *data() { return m_data; }
- ssize_t rows() const { return m_rows; }
- ssize_t cols() const { return m_cols; }
+ py::ssize_t rows() const { return m_rows; }
+ py::ssize_t cols() const { return m_cols; }
private:
- ssize_t m_rows;
- ssize_t m_cols;
+ py::ssize_t m_rows;
+ py::ssize_t m_cols;
float *m_data;
};
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
- .def(py::init<ssize_t, ssize_t>())
+ .def(py::init<py::ssize_t, py::ssize_t>())
/// Construct from a buffer
.def(py::init([](py::buffer const b) {
py::buffer_info info = b.request();
.def("cols", &Matrix::cols)
/// Bare bones interface
- .def("__getitem__", [](const Matrix &m, std::pair<ssize_t, ssize_t> i) {
+ .def("__getitem__", [](const Matrix &m, std::pair<py::ssize_t, py::ssize_t> i) {
if (i.first >= m.rows() || i.second >= m.cols())
throw py::index_error();
return m(i.first, i.second);
})
- .def("__setitem__", [](Matrix &m, std::pair<ssize_t, ssize_t> i, float v) {
+ .def("__setitem__", [](Matrix &m, std::pair<py::ssize_t, py::ssize_t> i, float v) {
if (i.first >= m.rows() || i.second >= m.cols())
throw py::index_error();
m(i.first, i.second) = v;
// test_inherited_protocol
class SquareMatrix : public Matrix {
public:
- SquareMatrix(ssize_t n) : Matrix(n, n) { }
+ SquareMatrix(py::ssize_t n) : Matrix(n, n) { }
};
// Derived classes inherit the buffer protocol and the buffer access function
py::class_<SquareMatrix, Matrix>(m, "SquareMatrix")
- .def(py::init<ssize_t>());
+ .def(py::init<py::ssize_t>());
// test_pointer_to_member_fn
.def_readwrite("value", (int32_t DerivedBuffer::*) &DerivedBuffer::value)
.def_buffer(&DerivedBuffer::get_buffer_info);
+ struct BufferReadOnly {
+ const uint8_t value = 0;
+ BufferReadOnly(uint8_t value): value(value) {}
+
+ py::buffer_info get_buffer_info() {
+ return py::buffer_info(&value, 1);
+ }
+ };
+ py::class_<BufferReadOnly>(m, "BufferReadOnly", py::buffer_protocol())
+ .def(py::init<uint8_t>())
+ .def_buffer(&BufferReadOnly::get_buffer_info);
+
+ struct BufferReadOnlySelect {
+ uint8_t value = 0;
+ bool readonly = false;
+
+ py::buffer_info get_buffer_info() {
+ return py::buffer_info(&value, 1, readonly);
+ }
+ };
+ py::class_<BufferReadOnlySelect>(m, "BufferReadOnlySelect", py::buffer_protocol())
+ .def(py::init<>())
+ .def_readwrite("value", &BufferReadOnlySelect::value)
+ .def_readwrite("readonly", &BufferReadOnlySelect::readonly)
+ .def_buffer(&BufferReadOnlySelect::get_buffer_info);
+
+ // Expose buffer_info for testing.
+ py::class_<py::buffer_info>(m, "buffer_info")
+ .def(py::init<>())
+ .def_readonly("itemsize", &py::buffer_info::itemsize)
+ .def_readonly("size", &py::buffer_info::size)
+ .def_readonly("format", &py::buffer_info::format)
+ .def_readonly("ndim", &py::buffer_info::ndim)
+ .def_readonly("shape", &py::buffer_info::shape)
+ .def_readonly("strides", &py::buffer_info::strides)
+ .def_readonly("readonly", &py::buffer_info::readonly)
+ .def("__repr__", [](py::handle self) {
+ return py::str("itemsize={0.itemsize!r}, size={0.size!r}, format={0.format!r}, ndim={0.ndim!r}, shape={0.shape!r}, strides={0.strides!r}, readonly={0.readonly!r}").format(self);
+ })
+ ;
+
+ m.def("get_buffer_info", [](py::buffer buffer) {
+ return buffer.request();
+ });
}
+# -*- coding: utf-8 -*-
+import io
import struct
+import ctypes
+
import pytest
+
+import env # noqa: F401
+
from pybind11_tests import buffers as m
from pybind11_tests import ConstructorStats
-pytestmark = pytest.requires_numpy
-
-with pytest.suppress(ImportError):
- import numpy as np
+np = pytest.importorskip("numpy")
def test_from_python():
assert cstats.move_assignments == 0
-# PyPy: Memory leak in the "np.array(m, copy=False)" call
-# https://bitbucket.org/pypy/pypy/issues/2444
-@pytest.unsupported_on_pypy
+# https://foss.heptapod.net/pypy/pypy/-/issues/2444
def test_to_python():
mat = m.Matrix(5, 4)
assert memoryview(mat).shape == (5, 4)
mat[3, 2] = 7.0
assert mat[2, 3] == 4
assert mat[3, 2] == 7
- assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, )
- assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, )
+ assert struct.unpack_from("f", mat, (3 * 4 + 2) * 4) == (7,)
+ assert struct.unpack_from("f", mat, (2 * 4 + 3) * 4) == (4,)
mat2 = np.array(mat, copy=False)
assert mat2.shape == (5, 4)
assert cstats.move_assignments == 0
-@pytest.unsupported_on_pypy
def test_inherited_protocol():
"""SquareMatrix is derived from Matrix and inherits the buffer protocol"""
assert np.asarray(matrix).shape == (5, 5)
-@pytest.unsupported_on_pypy
def test_pointer_to_member_fn():
for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:
buf = cls()
buf.value = 0x12345678
- value = struct.unpack('i', bytearray(buf))[0]
+ value = struct.unpack("i", bytearray(buf))[0]
assert value == 0x12345678
+
+
+def test_readonly_buffer():
+ buf = m.BufferReadOnly(0x64)
+ view = memoryview(buf)
+ assert view[0] == b"d" if env.PY2 else 0x64
+ assert view.readonly
+ with pytest.raises(TypeError):
+ view[0] = b"\0" if env.PY2 else 0
+
+
+def test_selective_readonly_buffer():
+ buf = m.BufferReadOnlySelect()
+
+ memoryview(buf)[0] = b"d" if env.PY2 else 0x64
+ assert buf.value == 0x64
+
+ io.BytesIO(b"A").readinto(buf)
+ assert buf.value == ord(b"A")
+
+ buf.readonly = True
+ with pytest.raises(TypeError):
+ memoryview(buf)[0] = b"\0" if env.PY2 else 0
+ with pytest.raises(TypeError):
+ io.BytesIO(b"1").readinto(buf)
+
+
+def test_ctypes_array_1d():
+ char1d = (ctypes.c_char * 10)()
+ int1d = (ctypes.c_int * 15)()
+ long1d = (ctypes.c_long * 7)()
+
+ for carray in (char1d, int1d, long1d):
+ info = m.get_buffer_info(carray)
+ assert info.itemsize == ctypes.sizeof(carray._type_)
+ assert info.size == len(carray)
+ assert info.ndim == 1
+ assert info.shape == [info.size]
+ assert info.strides == [info.itemsize]
+ assert not info.readonly
+
+
+def test_ctypes_array_2d():
+ char2d = ((ctypes.c_char * 10) * 4)()
+ int2d = ((ctypes.c_int * 15) * 3)()
+ long2d = ((ctypes.c_long * 7) * 2)()
+
+ for carray in (char2d, int2d, long2d):
+ info = m.get_buffer_info(carray)
+ assert info.itemsize == ctypes.sizeof(carray[0]._type_)
+ assert info.size == len(carray) * len(carray[0])
+ assert info.ndim == 2
+ assert info.shape == [len(carray), len(carray[0])]
+ assert info.strides == [info.itemsize * len(carray[0]), info.itemsize]
+ assert not info.readonly
+
+
+@pytest.mark.skipif(
+ "env.PYPY and env.PY2", reason="PyPy2 bytes buffer not reported as readonly"
+)
+def test_ctypes_from_buffer():
+ test_pystr = b"0123456789"
+ for pyarray in (test_pystr, bytearray(test_pystr)):
+ pyinfo = m.get_buffer_info(pyarray)
+
+ if pyinfo.readonly:
+ cbytes = (ctypes.c_char * len(pyarray)).from_buffer_copy(pyarray)
+ cinfo = m.get_buffer_info(cbytes)
+ else:
+ cbytes = (ctypes.c_char * len(pyarray)).from_buffer(pyarray)
+ cinfo = m.get_buffer_info(cbytes)
+
+ assert cinfo.size == pyinfo.size
+ assert cinfo.ndim == pyinfo.ndim
+ assert cinfo.shape == pyinfo.shape
+ assert cinfo.strides == pyinfo.strides
+ assert not cinfo.readonly
# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
#endif
+struct ConstRefCasted {
+ int tag;
+};
+
+PYBIND11_NAMESPACE_BEGIN(pybind11)
+PYBIND11_NAMESPACE_BEGIN(detail)
+template <>
+class type_caster<ConstRefCasted> {
+ public:
+ static constexpr auto name = _<ConstRefCasted>();
+
+ // Input is unimportant, a new value will always be constructed based on the
+ // cast operator.
+ bool load(handle, bool) { return true; }
+
+ operator ConstRefCasted&&() { value = {1}; return std::move(value); }
+ operator ConstRefCasted&() { value = {2}; return value; }
+ operator ConstRefCasted*() { value = {3}; return &value; }
+
+ operator const ConstRefCasted&() { value = {4}; return value; }
+ operator const ConstRefCasted*() { value = {5}; return &value; }
+
+ // custom cast_op to explicitly propagate types to the conversion operators.
+ template <typename T_>
+ using cast_op_type =
+ /// const
+ conditional_t<
+ std::is_same<remove_reference_t<T_>, const ConstRefCasted*>::value, const ConstRefCasted*,
+ conditional_t<
+ std::is_same<T_, const ConstRefCasted&>::value, const ConstRefCasted&,
+ /// non-const
+ conditional_t<
+ std::is_same<remove_reference_t<T_>, ConstRefCasted*>::value, ConstRefCasted*,
+ conditional_t<
+ std::is_same<T_, ConstRefCasted&>::value, ConstRefCasted&,
+ /* else */ConstRefCasted&&>>>>;
+
+ private:
+ ConstRefCasted value = {0};
+};
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(pybind11)
+
TEST_SUBMODULE(builtin_casters, m) {
// test_simple_string
m.def("string_roundtrip", [](const char *s) { return s; });
else { wstr.push_back((wchar_t) mathbfA32); } // 𝐀, utf32
wstr.push_back(0x7a); // z
- m.def("good_utf8_string", []() { return std::string(u8"Say utf8\u203d \U0001f382 \U0001d400"); }); // Say utf8‽ 🎂 𝐀
+ m.def("good_utf8_string", []() { return std::string((const char*)u8"Say utf8\u203d \U0001f382 \U0001d400"); }); // Say utf8‽ 🎂 𝐀
m.def("good_utf16_string", [=]() { return std::u16string({ b16, ib16, cake16_1, cake16_2, mathbfA16_1, mathbfA16_2, z16 }); }); // b‽🎂𝐀z
m.def("good_utf32_string", [=]() { return std::u32string({ a32, mathbfA32, cake32, ib32, z32 }); }); // a𝐀🎂‽z
m.def("good_wchar_string", [=]() { return wstr; }); // a‽𝐀z
m.def("strlen", [](char *s) { return strlen(s); });
m.def("string_length", [](std::string s) { return s.length(); });
+#ifdef PYBIND11_HAS_U8STRING
+ m.attr("has_u8string") = true;
+ m.def("good_utf8_u8string", []() { return std::u8string(u8"Say utf8\u203d \U0001f382 \U0001d400"); }); // Say utf8‽ 🎂 𝐀
+ m.def("bad_utf8_u8string", []() { return std::u8string((const char8_t*)"abc\xd0" "def"); });
+
+ m.def("u8_char8_Z", []() -> char8_t { return u8'Z'; });
+
+ // test_single_char_arguments
+ m.def("ord_char8", [](char8_t c) -> int { return static_cast<unsigned char>(c); });
+ m.def("ord_char8_lv", [](char8_t &c) -> int { return static_cast<unsigned char>(c); });
+#endif
+
// test_string_view
#ifdef PYBIND11_HAS_STRING_VIEW
m.attr("has_string_view") = true;
m.def("string_view_chars", [](std::string_view s) { py::list l; for (auto c : s) l.append((std::uint8_t) c); return l; });
m.def("string_view16_chars", [](std::u16string_view s) { py::list l; for (auto c : s) l.append((int) c); return l; });
m.def("string_view32_chars", [](std::u32string_view s) { py::list l; for (auto c : s) l.append((int) c); return l; });
- m.def("string_view_return", []() { return std::string_view(u8"utf8 secret \U0001f382"); });
+ m.def("string_view_return", []() { return std::string_view((const char*)u8"utf8 secret \U0001f382"); });
m.def("string_view16_return", []() { return std::u16string_view(u"utf16 secret \U0001f382"); });
m.def("string_view32_return", []() { return std::u32string_view(U"utf32 secret \U0001f382"); });
+
+# ifdef PYBIND11_HAS_U8STRING
+ m.def("string_view8_print", [](std::u8string_view s) { py::print(s, s.size()); });
+ m.def("string_view8_chars", [](std::u8string_view s) { py::list l; for (auto c : s) l.append((std::uint8_t) c); return l; });
+ m.def("string_view8_return", []() { return std::u8string_view(u8"utf8 secret \U0001f382"); });
+# endif
#endif
// test_integer_casting
m.def("i64_str", [](std::int64_t v) { return std::to_string(v); });
m.def("u64_str", [](std::uint64_t v) { return std::to_string(v); });
+ // test_int_convert
+ m.def("int_passthrough", [](int arg) { return arg; });
+ m.def("int_passthrough_noconvert", [](int arg) { return arg; }, py::arg{}.noconvert());
+
// test_tuple
m.def("pair_passthrough", [](std::pair<bool, std::string> input) {
return std::make_pair(input.second, input.first);
return std::make_pair(RValueCaster{}, std::make_tuple(RValueCaster{}, std::make_pair(RValueCaster{}, RValueCaster{}))); });
m.def("lvalue_nested", []() -> const decltype(lvnested) & { return lvnested; });
+ static std::pair<int, std::string> int_string_pair{2, "items"};
+ m.def("int_string_pair", []() { return &int_string_pair; });
+
// test_builtins_cast_return_none
m.def("return_none_string", []() -> std::string * { return nullptr; });
m.def("return_none_char", []() -> const char * { return nullptr; });
m.def("return_none_bool", []() -> bool * { return nullptr; });
m.def("return_none_int", []() -> int * { return nullptr; });
m.def("return_none_float", []() -> float * { return nullptr; });
+ m.def("return_none_pair", []() -> std::pair<int,int> * { return nullptr; });
// test_none_deferred
m.def("defer_none_cstring", [](char *) { return false; });
m.def("load_nullptr_t", [](std::nullptr_t) {}); // not useful, but it should still compile
m.def("cast_nullptr_t", []() { return std::nullptr_t{}; });
+ // [workaround(intel)] ICC 20/21 breaks with py::arg().stuff, using py::arg{}.stuff works.
+
// test_bool_caster
m.def("bool_passthrough", [](bool arg) { return arg; });
- m.def("bool_passthrough_noconvert", [](bool arg) { return arg; }, py::arg().noconvert());
+ m.def("bool_passthrough_noconvert", [](bool arg) { return arg; }, py::arg{}.noconvert());
+
+ // TODO: This should be disabled and fixed in future Intel compilers
+#if !defined(__INTEL_COMPILER)
+ // Test "bool_passthrough_noconvert" again, but using () instead of {} to construct py::arg
+ // When compiled with the Intel compiler, this results in segmentation faults when importing
+ // the module. Tested with icc (ICC) 2021.1 Beta 20200827, this should be tested again when
+ // a newer version of icc is available.
+ m.def("bool_passthrough_noconvert2", [](bool arg) { return arg; }, py::arg().noconvert());
+#endif
// test_reference_wrapper
m.def("refwrap_builtin", [](std::reference_wrapper<int> p) { return 10 * p.get(); });
m.def("refwrap_usertype", [](std::reference_wrapper<UserType> p) { return p.get().value(); });
+ m.def("refwrap_usertype_const", [](std::reference_wrapper<const UserType> p) { return p.get().value(); });
+
+ m.def("refwrap_lvalue", []() -> std::reference_wrapper<UserType> {
+ static UserType x(1);
+ return std::ref(x);
+ });
+ m.def("refwrap_lvalue_const", []() -> std::reference_wrapper<const UserType> {
+ static UserType x(1);
+ return std::cref(x);
+ });
+
// Not currently supported (std::pair caster has return-by-value cast operator);
// triggers static_assert failure.
//m.def("refwrap_pair", [](std::reference_wrapper<std::pair<int, int>>) { });
py::object o = py::cast(v);
return py::cast<void *>(o) == v;
});
+
+ // Tests const/non-const propagation in cast_op.
+ m.def("takes", [](ConstRefCasted x) { return x.tag; });
+ m.def("takes_move", [](ConstRefCasted&& x) { return x.tag; });
+ m.def("takes_ptr", [](ConstRefCasted* x) { return x->tag; });
+ m.def("takes_ref", [](ConstRefCasted& x) { return x.tag; });
+ m.def("takes_ref_wrap", [](std::reference_wrapper<ConstRefCasted> x) { return x.get().tag; });
+ m.def("takes_const_ptr", [](const ConstRefCasted* x) { return x->tag; });
+ m.def("takes_const_ref", [](const ConstRefCasted& x) { return x.tag; });
+ m.def("takes_const_ref_wrap", [](std::reference_wrapper<const ConstRefCasted> x) { return x.get().tag; });
}
-# Python < 3 needs this: coding=utf-8
+# -*- coding: utf-8 -*-
import pytest
+import env # noqa: F401
+
from pybind11_tests import builtin_casters as m
from pybind11_tests import UserType, IncType
assert m.good_utf16_string() == u"b‽🎂𝐀z"
assert m.good_utf32_string() == u"a𝐀🎂‽z"
assert m.good_wchar_string() == u"a⸘𝐀z"
+ if hasattr(m, "has_u8string"):
+ assert m.good_utf8_u8string() == u"Say utf8‽ 🎂 𝐀"
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_string()
if hasattr(m, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_wchar_string()
+ if hasattr(m, "has_u8string"):
+ with pytest.raises(UnicodeDecodeError):
+ m.bad_utf8_u8string()
- assert m.u8_Z() == 'Z'
- assert m.u8_eacute() == u'é'
- assert m.u16_ibang() == u'‽'
- assert m.u32_mathbfA() == u'𝐀'
- assert m.wchar_heart() == u'♥'
+ assert m.u8_Z() == "Z"
+ assert m.u8_eacute() == u"é"
+ assert m.u16_ibang() == u"‽"
+ assert m.u32_mathbfA() == u"𝐀"
+ assert m.wchar_heart() == u"♥"
+ if hasattr(m, "has_u8string"):
+ assert m.u8_char8_Z() == "Z"
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
+
def toobig_message(r):
return "Character code point not in range({0:#x})".format(r)
+
toolong_message = "Expected a character, but multi-character string found"
- assert m.ord_char(u'a') == 0x61 # simple ASCII
- assert m.ord_char_lv(u'b') == 0x62
- assert m.ord_char(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
+ assert m.ord_char(u"a") == 0x61 # simple ASCII
+ assert m.ord_char_lv(u"b") == 0x62
+ assert (
+ m.ord_char(u"é") == 0xE9
+ ) # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
- assert m.ord_char(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char
+ assert m.ord_char(u"Ā") == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
- assert m.ord_char(u'ab')
+ assert m.ord_char(u"ab")
assert str(excinfo.value) == toolong_message
- assert m.ord_char16(u'a') == 0x61
- assert m.ord_char16(u'é') == 0xE9
- assert m.ord_char16_lv(u'ê') == 0xEA
- assert m.ord_char16(u'Ā') == 0x100
- assert m.ord_char16(u'‽') == 0x203d
- assert m.ord_char16(u'♥') == 0x2665
- assert m.ord_char16_lv(u'♡') == 0x2661
+ assert m.ord_char16(u"a") == 0x61
+ assert m.ord_char16(u"é") == 0xE9
+ assert m.ord_char16_lv(u"ê") == 0xEA
+ assert m.ord_char16(u"Ā") == 0x100
+ assert m.ord_char16(u"‽") == 0x203D
+ assert m.ord_char16(u"♥") == 0x2665
+ assert m.ord_char16_lv(u"♡") == 0x2661
with pytest.raises(ValueError) as excinfo:
- assert m.ord_char16(u'🎂') == 0x1F382 # requires surrogate pair
+ assert m.ord_char16(u"🎂") == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
- assert m.ord_char16(u'aa')
+ assert m.ord_char16(u"aa")
assert str(excinfo.value) == toolong_message
- assert m.ord_char32(u'a') == 0x61
- assert m.ord_char32(u'é') == 0xE9
- assert m.ord_char32(u'Ā') == 0x100
- assert m.ord_char32(u'‽') == 0x203d
- assert m.ord_char32(u'♥') == 0x2665
- assert m.ord_char32(u'🎂') == 0x1F382
+ assert m.ord_char32(u"a") == 0x61
+ assert m.ord_char32(u"é") == 0xE9
+ assert m.ord_char32(u"Ā") == 0x100
+ assert m.ord_char32(u"‽") == 0x203D
+ assert m.ord_char32(u"♥") == 0x2665
+ assert m.ord_char32(u"🎂") == 0x1F382
with pytest.raises(ValueError) as excinfo:
- assert m.ord_char32(u'aa')
+ assert m.ord_char32(u"aa")
assert str(excinfo.value) == toolong_message
- assert m.ord_wchar(u'a') == 0x61
- assert m.ord_wchar(u'é') == 0xE9
- assert m.ord_wchar(u'Ā') == 0x100
- assert m.ord_wchar(u'‽') == 0x203d
- assert m.ord_wchar(u'♥') == 0x2665
+ assert m.ord_wchar(u"a") == 0x61
+ assert m.ord_wchar(u"é") == 0xE9
+ assert m.ord_wchar(u"Ā") == 0x100
+ assert m.ord_wchar(u"‽") == 0x203D
+ assert m.ord_wchar(u"♥") == 0x2665
if m.wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
- assert m.ord_wchar(u'🎂') == 0x1F382 # requires surrogate pair
+ assert m.ord_wchar(u"🎂") == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
- assert m.ord_wchar(u'🎂') == 0x1F382
+ assert m.ord_wchar(u"🎂") == 0x1F382
with pytest.raises(ValueError) as excinfo:
- assert m.ord_wchar(u'aa')
+ assert m.ord_wchar(u"aa")
assert str(excinfo.value) == toolong_message
+ if hasattr(m, "has_u8string"):
+ assert m.ord_char8(u"a") == 0x61 # simple ASCII
+ assert m.ord_char8_lv(u"b") == 0x62
+ assert (
+ m.ord_char8(u"é") == 0xE9
+ ) # requires 2 bytes in utf-8, but can be stuffed in a char
+ with pytest.raises(ValueError) as excinfo:
+ assert m.ord_char8(u"Ā") == 0x100 # requires 2 bytes, doesn't fit in a char
+ assert str(excinfo.value) == toobig_message(0x100)
+ with pytest.raises(ValueError) as excinfo:
+ assert m.ord_char8(u"ab")
+ assert str(excinfo.value) == toolong_message
+
def test_bytes_to_string():
"""Tests the ability to pass bytes to C++ string-accepting functions. Note that this is
one-way: the only way to return bytes to Python is via the pybind11::bytes class."""
# Issue #816
- import sys
- byte = bytes if sys.version_info[0] < 3 else str
- assert m.strlen(byte("hi")) == 2
- assert m.string_length(byte("world")) == 5
- assert m.string_length(byte("a\x00b")) == 3
- assert m.strlen(byte("a\x00b")) == 1 # C-string limitation
+ def to_bytes(s):
+ b = s if env.PY2 else s.encode("utf8")
+ assert isinstance(b, bytes)
+ return b
+
+ assert m.strlen(to_bytes("hi")) == 2
+ assert m.string_length(to_bytes("world")) == 5
+ assert m.string_length(to_bytes("a\x00b")) == 3
+ assert m.strlen(to_bytes("a\x00b")) == 1 # C-string limitation
# passing in a utf8 encoded string should work
- assert m.string_length(u'💩'.encode("utf8")) == 4
+ assert m.string_length(u"💩".encode("utf8")) == 4
@pytest.mark.skipif(not hasattr(m, "has_string_view"), reason="no <string_view>")
def test_string_view(capture):
"""Tests support for C++17 string_view arguments and return values"""
assert m.string_view_chars("Hi") == [72, 105]
- assert m.string_view_chars("Hi 🎂") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]
- assert m.string_view16_chars("Hi 🎂") == [72, 105, 32, 0xd83c, 0xdf82]
- assert m.string_view32_chars("Hi 🎂") == [72, 105, 32, 127874]
-
- assert m.string_view_return() == "utf8 secret 🎂"
- assert m.string_view16_return() == "utf16 secret 🎂"
- assert m.string_view32_return() == "utf32 secret 🎂"
+ assert m.string_view_chars("Hi 🎂") == [72, 105, 32, 0xF0, 0x9F, 0x8E, 0x82]
+ assert m.string_view16_chars(u"Hi 🎂") == [72, 105, 32, 0xD83C, 0xDF82]
+ assert m.string_view32_chars(u"Hi 🎂") == [72, 105, 32, 127874]
+ if hasattr(m, "has_u8string"):
+ assert m.string_view8_chars("Hi") == [72, 105]
+ assert m.string_view8_chars(u"Hi 🎂") == [72, 105, 32, 0xF0, 0x9F, 0x8E, 0x82]
+
+ assert m.string_view_return() == u"utf8 secret 🎂"
+ assert m.string_view16_return() == u"utf16 secret 🎂"
+ assert m.string_view32_return() == u"utf32 secret 🎂"
+ if hasattr(m, "has_u8string"):
+ assert m.string_view8_return() == u"utf8 secret 🎂"
with capture:
m.string_view_print("Hi")
m.string_view_print("utf8 🎂")
- m.string_view16_print("utf16 🎂")
- m.string_view32_print("utf32 🎂")
- assert capture == """
+ m.string_view16_print(u"utf16 🎂")
+ m.string_view32_print(u"utf32 🎂")
+ assert (
+ capture
+ == u"""
Hi 2
utf8 🎂 9
utf16 🎂 8
utf32 🎂 7
"""
+ )
+ if hasattr(m, "has_u8string"):
+ with capture:
+ m.string_view8_print("Hi")
+ m.string_view8_print(u"utf8 🎂")
+ assert (
+ capture
+ == u"""
+ Hi 2
+ utf8 🎂 9
+ """
+ )
with capture:
m.string_view_print("Hi, ascii")
m.string_view_print("Hi, utf8 🎂")
- m.string_view16_print("Hi, utf16 🎂")
- m.string_view32_print("Hi, utf32 🎂")
- assert capture == """
+ m.string_view16_print(u"Hi, utf16 🎂")
+ m.string_view32_print(u"Hi, utf32 🎂")
+ assert (
+ capture
+ == u"""
Hi, ascii 9
Hi, utf8 🎂 13
Hi, utf16 🎂 12
Hi, utf32 🎂 11
"""
+ )
+ if hasattr(m, "has_u8string"):
+ with capture:
+ m.string_view8_print("Hi, ascii")
+ m.string_view8_print(u"Hi, utf8 🎂")
+ assert (
+ capture
+ == u"""
+ Hi, ascii 9
+ Hi, utf8 🎂 13
+ """
+ )
def test_integer_casting():
"""Issue #929 - out-of-range integer values shouldn't be accepted"""
- import sys
assert m.i32_str(-1) == "-1"
assert m.i64_str(-1) == "-1"
assert m.i32_str(2000000000) == "2000000000"
assert m.u32_str(2000000000) == "2000000000"
- if sys.version_info < (3,):
+ if env.PY2:
assert m.i32_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
- assert m.i64_str(long(-999999999999)) == "-999999999999" # noqa: F821 undefined name
- assert m.u64_str(long(999999999999)) == "999999999999" # noqa: F821 undefined name 'long'
+ assert (
+ m.i64_str(long(-999999999999)) # noqa: F821 undefined name 'long'
+ == "-999999999999"
+ )
+ assert (
+ m.u64_str(long(999999999999)) # noqa: F821 undefined name 'long'
+ == "999999999999"
+ )
else:
assert m.i64_str(-999999999999) == "-999999999999"
assert m.u64_str(999999999999) == "999999999999"
m.i32_str(3000000000)
assert "incompatible function arguments" in str(excinfo.value)
- if sys.version_info < (3,):
+ if env.PY2:
with pytest.raises(TypeError) as excinfo:
m.u32_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
assert "incompatible function arguments" in str(excinfo.value)
+def test_int_convert():
+ class Int(object):
+ def __int__(self):
+ return 42
+
+ class NotInt(object):
+ pass
+
+ class Float(object):
+ def __float__(self):
+ return 41.99999
+
+ class Index(object):
+ def __index__(self):
+ return 42
+
+ class IntAndIndex(object):
+ def __int__(self):
+ return 42
+
+ def __index__(self):
+ return 0
+
+ class RaisingTypeErrorOnIndex(object):
+ def __index__(self):
+ raise TypeError
+
+ def __int__(self):
+ return 42
+
+ class RaisingValueErrorOnIndex(object):
+ def __index__(self):
+ raise ValueError
+
+ def __int__(self):
+ return 42
+
+ convert, noconvert = m.int_passthrough, m.int_passthrough_noconvert
+
+ def requires_conversion(v):
+ pytest.raises(TypeError, noconvert, v)
+
+ def cant_convert(v):
+ pytest.raises(TypeError, convert, v)
+
+ assert convert(7) == 7
+ assert noconvert(7) == 7
+ cant_convert(3.14159)
+ assert convert(Int()) == 42
+ requires_conversion(Int())
+ cant_convert(NotInt())
+ cant_convert(Float())
+
+ # Before Python 3.8, `PyLong_AsLong` does not pick up on `obj.__index__`,
+ # but pybind11 "backports" this behavior.
+ assert convert(Index()) == 42
+ assert noconvert(Index()) == 42
+ assert convert(IntAndIndex()) == 0 # Fishy; `int(DoubleThought)` == 42
+ assert noconvert(IntAndIndex()) == 0
+ assert convert(RaisingTypeErrorOnIndex()) == 42
+ requires_conversion(RaisingTypeErrorOnIndex())
+ assert convert(RaisingValueErrorOnIndex()) == 42
+ requires_conversion(RaisingValueErrorOnIndex())
+
+
+def test_numpy_int_convert():
+ np = pytest.importorskip("numpy")
+
+ convert, noconvert = m.int_passthrough, m.int_passthrough_noconvert
+
+ def require_implicit(v):
+ pytest.raises(TypeError, noconvert, v)
+
+ # `np.intc` is an alias that corresponds to a C++ `int`
+ assert convert(np.intc(42)) == 42
+ assert noconvert(np.intc(42)) == 42
+
+ # The implicit conversion from np.float32 is undesirable but currently accepted.
+ assert convert(np.float32(3.14159)) == 3
+ require_implicit(np.float32(3.14159))
+
+
def test_tuple(doc):
"""std::pair <-> tuple & std::tuple <-> tuple"""
assert m.pair_passthrough((True, "test")) == ("test", True)
assert m.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert m.empty_tuple() == ()
- assert doc(m.pair_passthrough) == """
+ assert (
+ doc(m.pair_passthrough)
+ == """
pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
- assert doc(m.tuple_passthrough) == """
+ )
+ assert (
+ doc(m.tuple_passthrough)
+ == """
tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
"""
+ )
assert m.rvalue_pair() == ("rvalue", "rvalue")
assert m.lvalue_pair() == ("lvalue", "lvalue")
assert m.rvalue_nested() == ("rvalue", ("rvalue", ("rvalue", "rvalue")))
assert m.lvalue_nested() == ("lvalue", ("lvalue", ("lvalue", "lvalue")))
+ assert m.int_string_pair() == (2, "items")
+
def test_builtins_cast_return_none():
"""Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None"""
assert m.return_none_bool() is None
assert m.return_none_int() is None
assert m.return_none_float() is None
+ assert m.return_none_pair() is None
def test_none_deferred():
"""std::reference_wrapper for builtin and user types"""
assert m.refwrap_builtin(42) == 420
assert m.refwrap_usertype(UserType(42)) == 42
+ assert m.refwrap_usertype_const(UserType(42)) == 42
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
m.refwrap_usertype(None)
assert "incompatible function arguments" in str(excinfo.value)
+ assert m.refwrap_lvalue().value == 1
+ assert m.refwrap_lvalue_const().value == 1
+
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert [x.value for x in a1] == [2, 3]
assert convert(A(False)) is False
-@pytest.requires_numpy
def test_numpy_bool():
- import numpy as np
+ np = pytest.importorskip("numpy")
+
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
+ def cant_convert(v):
+ pytest.raises(TypeError, convert, v)
+
# np.bool_ is not considered implicit
assert convert(np.bool_(True)) is True
assert convert(np.bool_(False)) is False
assert noconvert(np.bool_(True)) is True
assert noconvert(np.bool_(False)) is False
+ cant_convert(np.zeros(2, dtype="int"))
def test_int_long():
long."""
import sys
- must_be_long = type(getattr(sys, 'maxint', 1) + 1)
+
+ must_be_long = type(getattr(sys, "maxint", 1) + 1)
assert isinstance(m.int_cast(), int)
assert isinstance(m.long_cast(), int)
assert isinstance(m.longlong_cast(), must_be_long)
def test_void_caster_2():
assert m.test_void_caster()
+
+
+def test_const_ref_caster():
+ """Verifies that const-ref is propagated through type_caster cast_op.
+ The returned ConstRefCasted type is a mimimal type that is constructed to
+ reference the casting mode used.
+ """
+ x = False
+ assert m.takes(x) == 1
+ assert m.takes_move(x) == 1
+
+ assert m.takes_ptr(x) == 3
+ assert m.takes_ref(x) == 2
+ assert m.takes_ref_wrap(x) == 2
+
+ assert m.takes_const_ptr(x) == 5
+ assert m.takes_const_ref(x) == 4
+ assert m.takes_const_ref_wrap(x) == 4
class Parent {
public:
Parent() { py::print("Allocating parent."); }
+ Parent(const Parent& parent) = default;
~Parent() { py::print("Releasing parent."); }
void addChild(Child *) { }
Child *returnChild() { return new Child(); }
+# -*- coding: utf-8 -*-
import pytest
+
+import env # noqa: F401
+
from pybind11_tests import call_policies as m
from pybind11_tests import ConstructorStats
+@pytest.mark.xfail("env.PYPY", reason="sometimes comes out 1 off on PyPy", strict=False)
def test_keep_alive_argument(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
with capture:
p.addChild(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 1
- assert capture == """
+ assert (
+ capture
+ == """
Allocating child.
Releasing child.
"""
+ )
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
- assert capture == """
+ assert (
+ capture
+ == """
Releasing parent.
Releasing child.
"""
+ )
def test_keep_alive_return_value(capture):
with capture:
p.returnChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
- assert capture == """
+ assert (
+ capture
+ == """
Allocating child.
Releasing child.
"""
+ )
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
- assert capture == """
+ assert (
+ capture
+ == """
Releasing parent.
Releasing child.
"""
+ )
-# https://bitbucket.org/pypy/pypy/issues/2447
-@pytest.unsupported_on_pypy
+# https://foss.heptapod.net/pypy/pypy/-/issues/2447
+@pytest.mark.xfail("env.PYPY", reason="_PyObject_GetDictPtr is unimplemented")
def test_alive_gc(capture):
n_inst = ConstructorStats.detail_reg_inst()
p = m.ParentGC()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
- lst.append(lst) # creates a circular reference
+ lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
- assert capture == """
+ assert (
+ capture
+ == """
Releasing parent.
Releasing child.
"""
+ )
def test_alive_gc_derived(capture):
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
- lst.append(lst) # creates a circular reference
+ lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
- assert capture == """
+ assert (
+ capture
+ == """
Releasing parent.
Releasing child.
"""
+ )
def test_alive_gc_multi_derived(capture):
# +3 rather than +2 because Derived corresponds to two registered instances
assert ConstructorStats.detail_reg_inst() == n_inst + 3
lst = [p]
- lst.append(lst) # creates a circular reference
+ lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
- assert capture == """
+ assert (
+ capture
+ == """
Releasing parent.
Releasing child.
Releasing child.
"""
+ )
def test_return_none(capture):
with capture:
p = m.Parent(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
- assert capture == """
+ assert (
+ capture
+ == """
Allocating child.
Allocating parent.
"""
+ )
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
- assert capture == """
+ assert (
+ capture
+ == """
Releasing parent.
Releasing child.
"""
+ )
def test_call_guard():
}
});
- class AbstractBase { public: virtual unsigned int func() = 0; };
+ class AbstractBase {
+ public:
+ // [workaround(intel)] = default does not work here
+ // Defaulting this destructor results in linking errors with the Intel compiler
+ // (in Debug builds only, tested with icpc (ICC) 2021.1 Beta 20200827)
+ virtual ~AbstractBase() {}; // NOLINT(modernize-use-equals-default)
+ virtual unsigned int func() = 0;
+ };
m.def("func_accepting_func_accepting_base", [](std::function<double(AbstractBase&)>) { });
struct MovableObject {
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import callbacks as m
from threading import Thread
def test_keyword_args_and_generalized_unpacking():
-
def f(*args, **kwargs):
return args, kwargs
assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
- assert m.test_dict_unpacking(f) == (("positional", 1), {"key": "value", "a": 1, "b": 2})
+ assert m.test_dict_unpacking(f) == (
+ ("positional", 1),
+ {"key": "value", "a": 1, "b": 2},
+ )
assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
assert m.test_unpacking_and_keywords2(f) == (
("positional", 1, 2, 3, 4, 5),
- {"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
+ {"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5},
)
with pytest.raises(TypeError) as excinfo:
def test_cpp_function_roundtrip():
"""Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
- assert m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
- assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) ==
- "matches dummy_function: eval(1) = 2")
+ assert (
+ m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
+ )
+ assert (
+ m.test_dummy_function(m.roundtrip(m.dummy_function))
+ == "matches dummy_function: eval(1) = 2"
+ )
assert m.roundtrip(None, expect_none=True) is None
- assert (m.test_dummy_function(lambda x: x + 2) ==
- "can't convert to function pointer: eval(1) = 3")
+ assert (
+ m.test_dummy_function(lambda x: x + 2)
+ == "can't convert to function pointer: eval(1) = 3"
+ )
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(m.dummy_function2)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(lambda x, y: x + y)
- assert any(s in str(excinfo.value) for s in ("missing 1 required positional argument",
- "takes exactly 2 arguments"))
+ assert any(
+ s in str(excinfo.value)
+ for s in ("missing 1 required positional argument", "takes exactly 2 arguments")
+ )
def test_function_signatures(doc):
m.test_async_callback(gen_f(), work)
# wait until work is done
from time import sleep
+
sleep(0.5)
assert sum(res) == sum([x + 3 for x in work])
#include "pybind11_tests.h"
#include <pybind11/chrono.h>
+#include <chrono>
+
+struct different_resolutions {
+ using time_point_h = std::chrono::time_point<
+ std::chrono::system_clock, std::chrono::hours>;
+ using time_point_m = std::chrono::time_point<
+ std::chrono::system_clock, std::chrono::minutes>;
+ using time_point_s = std::chrono::time_point<
+ std::chrono::system_clock, std::chrono::seconds>;
+ using time_point_ms = std::chrono::time_point<
+ std::chrono::system_clock, std::chrono::milliseconds>;
+ using time_point_us = std::chrono::time_point<
+ std::chrono::system_clock, std::chrono::microseconds>;
+ time_point_h timestamp_h;
+ time_point_m timestamp_m;
+ time_point_s timestamp_s;
+ time_point_ms timestamp_ms;
+ time_point_us timestamp_us;
+};
TEST_SUBMODULE(chrono, m) {
using system_time = std::chrono::system_clock::time_point;
m.def("test_nano_timepoint", [](timestamp start, timespan delta) -> timestamp {
return start + delta;
});
+
+ // Test different resolutions
+ py::class_<different_resolutions>(m, "different_resolutions")
+ .def(py::init<>())
+ .def_readwrite("timestamp_h", &different_resolutions::timestamp_h)
+ .def_readwrite("timestamp_m", &different_resolutions::timestamp_m)
+ .def_readwrite("timestamp_s", &different_resolutions::timestamp_s)
+ .def_readwrite("timestamp_ms", &different_resolutions::timestamp_ms)
+ .def_readwrite("timestamp_us", &different_resolutions::timestamp_us)
+ ;
}
+# -*- coding: utf-8 -*-
from pybind11_tests import chrono as m
import datetime
+import pytest
+
+import env # noqa: F401
def test_chrono_system_clock():
# Get the time from both c++ and datetime
+ date0 = datetime.datetime.today()
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
assert isinstance(date1, datetime.datetime)
# The numbers should vary by a very small amount (time it took to execute)
+ diff_python = abs(date2 - date0)
diff = abs(date1 - date2)
- # There should never be a days/seconds difference
+ # There should never be a days difference
assert diff.days == 0
- assert diff.seconds == 0
- # We test that no more than about 0.5 seconds passes here
- # This makes sure that the dates created are very close to the same
- # but if the testing system is incredibly overloaded this should still pass
- assert diff.microseconds < 500000
+ # Since datetime.datetime.today() calls time.time(), and on some platforms
+ # that has 1 second accuracy, we compare this way
+ assert diff.seconds <= diff_python.seconds
def test_chrono_system_clock_roundtrip():
assert time2.microsecond == 0
-def test_chrono_system_clock_roundtrip_time():
- time1 = datetime.datetime.today().time()
+SKIP_TZ_ENV_ON_WIN = pytest.mark.skipif(
+ "env.WIN", reason="TZ environment variable only supported on POSIX"
+)
+
+
+@pytest.mark.parametrize(
+ "time1",
+ [
+ datetime.datetime.today().time(),
+ datetime.time(0, 0, 0),
+ datetime.time(0, 0, 0, 1),
+ datetime.time(0, 28, 45, 109827),
+ datetime.time(0, 59, 59, 999999),
+ datetime.time(1, 0, 0),
+ datetime.time(5, 59, 59, 0),
+ datetime.time(5, 59, 59, 1),
+ ],
+)
+@pytest.mark.parametrize(
+ "tz",
+ [
+ None,
+ pytest.param("Europe/Brussels", marks=SKIP_TZ_ENV_ON_WIN),
+ pytest.param("Asia/Pyongyang", marks=SKIP_TZ_ENV_ON_WIN),
+ pytest.param("America/New_York", marks=SKIP_TZ_ENV_ON_WIN),
+ ],
+)
+def test_chrono_system_clock_roundtrip_time(time1, tz, monkeypatch):
+ if tz is not None:
+ monkeypatch.setenv("TZ", "/usr/share/zoneinfo/{}".format(tz))
# Roundtrip the time
datetime2 = m.test_chrono2(time1)
def test_nano_timepoint():
time = datetime.datetime.now()
time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60))
- assert(time1 == time + datetime.timedelta(seconds=60))
+ assert time1 == time + datetime.timedelta(seconds=60)
+
+
+def test_chrono_different_resolutions():
+ resolutions = m.different_resolutions()
+ time = datetime.datetime.now()
+ resolutions.timestamp_h = time
+ resolutions.timestamp_m = time
+ resolutions.timestamp_s = time
+ resolutions.timestamp_ms = time
+ resolutions.timestamp_us = time
BSD-style license that can be found in the LICENSE file.
*/
+#if defined(__INTEL_COMPILER) && __cplusplus >= 201703L
+// Intel compiler requires a separate header file to support aligned new operators
+// and does not set the __cpp_aligned_new feature macro.
+// This header needs to be included before pybind11.
+#include <aligned_new>
+#endif
+
#include "pybind11_tests.h"
#include "constructor_stats.h"
#include "local_bindings.h"
BaseClass() = default;
BaseClass(const BaseClass &) = default;
BaseClass(BaseClass &&) = default;
- virtual ~BaseClass() {}
+ virtual ~BaseClass() = default;
};
struct DerivedClass1 : BaseClass { };
struct DerivedClass2 : BaseClass { };
);
});
+ struct Invalid {};
+
+ // test_type
+ m.def("check_type", [](int category) {
+ // Currently not supported (via a fail at compile time)
+ // See https://github.com/pybind/pybind11/issues/2486
+ // if (category == 2)
+ // return py::type::of<int>();
+ if (category == 1)
+ return py::type::of<DerivedClass1>();
+ else
+ return py::type::of<Invalid>();
+ });
+
+ m.def("get_type_of", [](py::object ob) {
+ return py::type::of(ob);
+ });
+
+ m.def("get_type_classic", [](py::handle h) {
+ return h.get_type();
+ });
+
+ m.def("as_type", [](py::object ob) {
+ return py::type(ob);
+ });
+
// test_mismatched_holder
struct MismatchBase1 { };
struct MismatchDerived1 : MismatchBase1 { };
struct MismatchDerived2 : MismatchBase2 { };
m.def("mismatched_holder_1", []() {
- auto mod = py::module::import("__main__");
+ auto mod = py::module_::import("__main__");
py::class_<MismatchBase1, std::shared_ptr<MismatchBase1>>(mod, "MismatchBase1");
py::class_<MismatchDerived1, MismatchBase1>(mod, "MismatchDerived1");
});
m.def("mismatched_holder_2", []() {
- auto mod = py::module::import("__main__");
+ auto mod = py::module_::import("__main__");
py::class_<MismatchBase2>(mod, "MismatchBase2");
py::class_<MismatchDerived2, std::shared_ptr<MismatchDerived2>,
MismatchBase2>(mod, "MismatchDerived2");
};
auto def = new PyMethodDef{"f", f, METH_VARARGS, nullptr};
- return py::reinterpret_steal<py::object>(PyCFunction_NewEx(def, nullptr, m.ptr()));
+ py::capsule def_capsule(def, [](void *ptr) { delete reinterpret_cast<PyMethodDef *>(ptr); });
+ return py::reinterpret_steal<py::object>(PyCFunction_NewEx(def, def_capsule.ptr(), m.ptr()));
}());
// test_operator_new_delete
static void *operator new(size_t s, void *ptr) { py::print("C placement-new", s); return ptr; }
static void operator delete(void *p, size_t s) { py::print("C delete", s); return ::operator delete(p); }
virtual ~AliasedHasOpNewDelSize() = default;
+ AliasedHasOpNewDelSize() = default;
+ AliasedHasOpNewDelSize(const AliasedHasOpNewDelSize&) = delete;
};
struct PyAliasedHasOpNewDelSize : AliasedHasOpNewDelSize {
PyAliasedHasOpNewDelSize() = default;
class ProtectedB {
public:
virtual ~ProtectedB() = default;
+ ProtectedB() = default;
+ ProtectedB(const ProtectedB &) = delete;
protected:
virtual int foo() const { return value; }
class TrampolineB : public ProtectedB {
public:
- int foo() const override { PYBIND11_OVERLOAD(int, ProtectedB, foo, ); }
+ int foo() const override { PYBIND11_OVERRIDE(int, ProtectedB, foo, ); }
};
class PublicistB : public ProtectedB {
public:
+ // [workaround(intel)] = default does not work here
+ // Removing or defaulting this destructor results in linking errors with the Intel compiler
+ // (in Debug builds only, tested with icpc (ICC) 2021.1 Beta 20200827)
+ ~PublicistB() override {}; // NOLINT(modernize-use-equals-default)
using ProtectedB::foo;
};
// test_reentrant_implicit_conversion_failure
// #1035: issue with runaway reentrant implicit conversion
struct BogusImplicitConversion {
- BogusImplicitConversion(const BogusImplicitConversion &) { }
+ BogusImplicitConversion(const BogusImplicitConversion &) = default;
};
py::class_<BogusImplicitConversion>(m, "BogusImplicitConversion")
.def(py::init<>())
.def("ptr", &Aligned::ptr);
#endif
+
+ // test_final
+ struct IsFinal final {};
+ py::class_<IsFinal>(m, "IsFinal", py::is_final());
+
+ // test_non_final_final
+ struct IsNonFinalFinal {};
+ py::class_<IsNonFinalFinal>(m, "IsNonFinalFinal", py::is_final());
+
+ // test_exception_rvalue_abort
+ struct PyPrintDestructor {
+ PyPrintDestructor() = default;
+ ~PyPrintDestructor() {
+ py::print("Print from destructor");
+ }
+ void throw_something() { throw std::runtime_error("error"); }
+ };
+ py::class_<PyPrintDestructor>(m, "PyPrintDestructor")
+ .def(py::init<>())
+ .def("throw_something", &PyPrintDestructor::throw_something);
+
+ // test_multiple_instances_with_same_pointer
+ struct SamePointer {};
+ static SamePointer samePointer;
+ py::class_<SamePointer, std::unique_ptr<SamePointer, py::nodelete>>(m, "SamePointer")
+ .def(py::init([]() { return &samePointer; }))
+ .def("__del__", [](SamePointer&) { py::print("__del__ called"); });
+
+ struct Empty {};
+ py::class_<Empty>(m, "Empty")
+ .def(py::init<>());
+
+ // test_base_and_derived_nested_scope
+ struct BaseWithNested {
+ struct Nested {};
+ };
+
+ struct DerivedWithNested : BaseWithNested {
+ struct Nested {};
+ };
+
+ py::class_<BaseWithNested> baseWithNested_class(m, "BaseWithNested");
+ py::class_<DerivedWithNested, BaseWithNested> derivedWithNested_class(m, "DerivedWithNested");
+ py::class_<BaseWithNested::Nested>(baseWithNested_class, "Nested")
+ .def_static("get_name", []() { return "BaseWithNested::Nested"; });
+ py::class_<DerivedWithNested::Nested>(derivedWithNested_class, "Nested")
+ .def_static("get_name", []() { return "DerivedWithNested::Nested"; });
+
+ // test_register_duplicate_class
+ struct Duplicate {};
+ struct OtherDuplicate {};
+ struct DuplicateNested {};
+ struct OtherDuplicateNested {};
+ m.def("register_duplicate_class_name", [](py::module_ m) {
+ py::class_<Duplicate>(m, "Duplicate");
+ py::class_<OtherDuplicate>(m, "Duplicate");
+ });
+ m.def("register_duplicate_class_type", [](py::module_ m) {
+ py::class_<OtherDuplicate>(m, "OtherDuplicate");
+ py::class_<OtherDuplicate>(m, "YetAnotherDuplicate");
+ });
+ m.def("register_duplicate_nested_class_name", [](py::object gt) {
+ py::class_<DuplicateNested>(gt, "DuplicateNested");
+ py::class_<OtherDuplicateNested>(gt, "DuplicateNested");
+ });
+ m.def("register_duplicate_nested_class_type", [](py::object gt) {
+ py::class_<OtherDuplicateNested>(gt, "OtherDuplicateNested");
+ py::class_<OtherDuplicateNested>(gt, "YetAnotherDuplicateNested");
+ });
}
-template <int N> class BreaksBase { public: virtual ~BreaksBase() = default; };
+template <int N> class BreaksBase { public:
+ virtual ~BreaksBase() = default;
+ BreaksBase() = default;
+ BreaksBase(const BreaksBase&) = delete;
+};
template <int N> class BreaksTramp : public BreaksBase<N> {};
// These should all compile just fine:
-typedef py::class_<BreaksBase<1>, std::unique_ptr<BreaksBase<1>>, BreaksTramp<1>> DoesntBreak1;
-typedef py::class_<BreaksBase<2>, BreaksTramp<2>, std::unique_ptr<BreaksBase<2>>> DoesntBreak2;
-typedef py::class_<BreaksBase<3>, std::unique_ptr<BreaksBase<3>>> DoesntBreak3;
-typedef py::class_<BreaksBase<4>, BreaksTramp<4>> DoesntBreak4;
-typedef py::class_<BreaksBase<5>> DoesntBreak5;
-typedef py::class_<BreaksBase<6>, std::shared_ptr<BreaksBase<6>>, BreaksTramp<6>> DoesntBreak6;
-typedef py::class_<BreaksBase<7>, BreaksTramp<7>, std::shared_ptr<BreaksBase<7>>> DoesntBreak7;
-typedef py::class_<BreaksBase<8>, std::shared_ptr<BreaksBase<8>>> DoesntBreak8;
+using DoesntBreak1 = py::class_<BreaksBase<1>, std::unique_ptr<BreaksBase<1>>, BreaksTramp<1>>;
+using DoesntBreak2 = py::class_<BreaksBase<2>, BreaksTramp<2>, std::unique_ptr<BreaksBase<2>>>;
+using DoesntBreak3 = py::class_<BreaksBase<3>, std::unique_ptr<BreaksBase<3>>>;
+using DoesntBreak4 = py::class_<BreaksBase<4>, BreaksTramp<4>>;
+using DoesntBreak5 = py::class_<BreaksBase<5>>;
+using DoesntBreak6 = py::class_<BreaksBase<6>, std::shared_ptr<BreaksBase<6>>, BreaksTramp<6>>;
+using DoesntBreak7 = py::class_<BreaksBase<7>, BreaksTramp<7>, std::shared_ptr<BreaksBase<7>>>;
+using DoesntBreak8 = py::class_<BreaksBase<8>, std::shared_ptr<BreaksBase<8>>>;
#define CHECK_BASE(N) static_assert(std::is_same<typename DoesntBreak##N::type, BreaksBase<N>>::value, \
"DoesntBreak" #N " has wrong type!")
CHECK_BASE(1); CHECK_BASE(2); CHECK_BASE(3); CHECK_BASE(4); CHECK_BASE(5); CHECK_BASE(6); CHECK_BASE(7); CHECK_BASE(8);
+# -*- coding: utf-8 -*-
import pytest
+import env # noqa: F401
+
from pybind11_tests import class_ as m
from pybind11_tests import UserType, ConstructorStats
assert cstats.alive() == 0
+def test_type():
+ assert m.check_type(1) == m.DerivedClass1
+ with pytest.raises(RuntimeError) as execinfo:
+ m.check_type(0)
+
+ assert "pybind11::detail::get_type_info: unable to find type info" in str(
+ execinfo.value
+ )
+ assert "Invalid" in str(execinfo.value)
+
+ # Currently not supported
+ # See https://github.com/pybind/pybind11/issues/2486
+ # assert m.check_type(2) == int
+
+
+def test_type_of_py():
+ assert m.get_type_of(1) == int
+ assert m.get_type_of(m.DerivedClass1()) == m.DerivedClass1
+ assert m.get_type_of(int) == type
+
+
+def test_type_of_classic():
+ assert m.get_type_classic(1) == int
+ assert m.get_type_classic(m.DerivedClass1()) == m.DerivedClass1
+ assert m.get_type_classic(int) == type
+
+
+def test_type_of_py_nodelete():
+ # If the above test deleted the class, this will segfault
+ assert m.get_type_of(m.DerivedClass1()) == m.DerivedClass1
+
+
+def test_as_type_py():
+ assert m.as_type(int) == int
+
+ with pytest.raises(TypeError):
+ assert m.as_type(1) == int
+
+ with pytest.raises(TypeError):
+ assert m.as_type(m.DerivedClass1()) == m.DerivedClass1
+
+
def test_docstrings(doc):
assert doc(UserType) == "A `py::class_` type for testing"
assert UserType.__name__ == "UserType"
assert UserType.get_value.__name__ == "get_value"
assert UserType.get_value.__module__ == "pybind11_tests"
- assert doc(UserType.get_value) == """
+ assert (
+ doc(UserType.get_value)
+ == """
get_value(self: m.UserType) -> int
Get value using a method
"""
+ )
assert doc(UserType.value) == "Get/set value using a property"
- assert doc(m.NoConstructor.new_instance) == """
+ assert (
+ doc(m.NoConstructor.new_instance)
+ == """
new_instance() -> m.class_.NoConstructor
Return an instance
"""
+ )
def test_qualname(doc):
assert m.NestBase.__qualname__ == "NestBase"
assert m.NestBase.Nested.__qualname__ == "NestBase.Nested"
- assert doc(m.NestBase.__init__) == """
+ assert (
+ doc(m.NestBase.__init__)
+ == """
__init__(self: m.class_.NestBase) -> None
"""
- assert doc(m.NestBase.g) == """
+ )
+ assert (
+ doc(m.NestBase.g)
+ == """
g(self: m.class_.NestBase, arg0: m.class_.NestBase.Nested) -> None
"""
- assert doc(m.NestBase.Nested.__init__) == """
+ )
+ assert (
+ doc(m.NestBase.Nested.__init__)
+ == """
__init__(self: m.class_.NestBase.Nested) -> None
"""
- assert doc(m.NestBase.Nested.fn) == """
+ )
+ assert (
+ doc(m.NestBase.Nested.fn)
+ == """
fn(self: m.class_.NestBase.Nested, arg0: int, arg1: m.class_.NestBase, arg2: m.class_.NestBase.Nested) -> None
""" # noqa: E501 line too long
- assert doc(m.NestBase.Nested.fa) == """
+ )
+ assert (
+ doc(m.NestBase.Nested.fa)
+ == """
fa(self: m.class_.NestBase.Nested, a: int, b: m.class_.NestBase, c: m.class_.NestBase.Nested) -> None
""" # noqa: E501 line too long
+ )
assert m.NestBase.__module__ == "pybind11_tests.class_"
assert m.NestBase.Nested.__module__ == "pybind11_tests.class_"
def test_inheritance(msg):
- roger = m.Rabbit('Rabbit')
+ roger = m.Rabbit("Rabbit")
assert roger.name() + " is a " + roger.species() == "Rabbit is a parrot"
assert m.pet_name_species(roger) == "Rabbit is a parrot"
- polly = m.Pet('Polly', 'parrot')
+ polly = m.Pet("Polly", "parrot")
assert polly.name() + " is a " + polly.species() == "Polly is a parrot"
assert m.pet_name_species(polly) == "Polly is a parrot"
- molly = m.Dog('Molly')
+ molly = m.Dog("Molly")
assert molly.name() + " is a " + molly.species() == "Molly is a dog"
assert m.pet_name_species(molly) == "Molly is a dog"
- fred = m.Hamster('Fred')
+ fred = m.Hamster("Fred")
assert fred.name() + " is a " + fred.species() == "Fred is a rodent"
assert m.dog_bark(molly) == "Woof!"
with pytest.raises(TypeError) as excinfo:
m.dog_bark(polly)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
dog_bark(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.class_.Dog) -> str
Invoked with: <m.class_.Pet object at 0>
"""
+ )
with pytest.raises(TypeError) as excinfo:
m.Chimera("lion", "goat")
assert "No constructor defined!" in str(excinfo.value)
+def test_inheritance_init(msg):
+
+ # Single base
+ class Python(m.Pet):
+ def __init__(self):
+ pass
+
+ with pytest.raises(TypeError) as exc_info:
+ Python()
+ expected = "m.class_.Pet.__init__() must be called when overriding __init__"
+ assert msg(exc_info.value) == expected
+
+ # Multiple bases
+ class RabbitHamster(m.Rabbit, m.Hamster):
+ def __init__(self):
+ m.Rabbit.__init__(self, "RabbitHamster")
+
+ with pytest.raises(TypeError) as exc_info:
+ RabbitHamster()
+ expected = "m.class_.Hamster.__init__() must be called when overriding __init__"
+ assert msg(exc_info.value) == expected
+
+
def test_automatic_upcasting():
assert type(m.return_class_1()).__name__ == "DerivedClass1"
assert type(m.return_class_2()).__name__ == "DerivedClass2"
with pytest.raises(RuntimeError) as excinfo:
m.mismatched_holder_1()
- assert re.match('generic_type: type ".*MismatchDerived1" does not have a non-default '
- 'holder type while its base ".*MismatchBase1" does', str(excinfo.value))
+ assert re.match(
+ 'generic_type: type ".*MismatchDerived1" does not have a non-default '
+ 'holder type while its base ".*MismatchBase1" does',
+ str(excinfo.value),
+ )
with pytest.raises(RuntimeError) as excinfo:
m.mismatched_holder_2()
- assert re.match('generic_type: type ".*MismatchDerived2" has a non-default holder type '
- 'while its base ".*MismatchBase2" does not', str(excinfo.value))
+ assert re.match(
+ 'generic_type: type ".*MismatchDerived2" has a non-default holder type '
+ 'while its base ".*MismatchBase2" does not',
+ str(excinfo.value),
+ )
def test_override_static():
a = m.HasOpNewDel()
b = m.HasOpNewDelSize()
d = m.HasOpNewDelBoth()
- assert capture == """
+ assert (
+ capture
+ == """
A new 8
B new 4
D new 32
"""
+ )
sz_alias = str(m.AliasedHasOpNewDelSize.size_alias)
sz_noalias = str(m.AliasedHasOpNewDelSize.size_noalias)
with capture:
c = m.AliasedHasOpNewDelSize()
c2 = SubAliased()
- assert capture == (
- "C new " + sz_noalias + "\n" +
- "C new " + sz_alias + "\n"
- )
+ assert capture == ("C new " + sz_noalias + "\n" + "C new " + sz_alias + "\n")
with capture:
del a
pytest.gc_collect()
del d
pytest.gc_collect()
- assert capture == """
+ assert (
+ capture
+ == """
A delete
B delete 4
D delete
"""
+ )
with capture:
del c
pytest.gc_collect()
del c2
pytest.gc_collect()
- assert capture == (
- "C delete " + sz_noalias + "\n" +
- "C delete " + sz_alias + "\n"
- )
+ assert capture == ("C delete " + sz_noalias + "\n" + "C delete " + sz_alias + "\n")
def test_bind_protected_functions():
assert b.vec == [123, 456]
-@pytest.unsupported_on_pypy
+@pytest.mark.xfail("env.PYPY")
def test_class_refcount():
"""Instances must correctly increase/decrease the reference count of their types (#1029)"""
from sys import getrefcount
# ensure that there is no runaway reentrant implicit conversion (#1035)
with pytest.raises(TypeError) as excinfo:
m.BogusImplicitConversion(0)
- assert msg(excinfo.value) == '''
+ assert (
+ msg(excinfo.value)
+ == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.class_.BogusImplicitConversion(arg0: m.class_.BogusImplicitConversion)
Invoked with: 0
- '''
+ """
+ )
def test_error_after_conversions():
with pytest.raises(TypeError) as exc_info:
m.test_error_after_conversions("hello")
assert str(exc_info.value).startswith(
- "Unable to convert function return value to a Python type!")
+ "Unable to convert function return value to a Python type!"
+ )
def test_aligned():
if hasattr(m, "Aligned"):
p = m.Aligned().ptr()
assert p % 1024 == 0
+
+
+# https://foss.heptapod.net/pypy/pypy/-/issues/2742
+@pytest.mark.xfail("env.PYPY")
+def test_final():
+ with pytest.raises(TypeError) as exc_info:
+
+ class PyFinalChild(m.IsFinal):
+ pass
+
+ assert str(exc_info.value).endswith("is not an acceptable base type")
+
+
+# https://foss.heptapod.net/pypy/pypy/-/issues/2742
+@pytest.mark.xfail("env.PYPY")
+def test_non_final_final():
+ with pytest.raises(TypeError) as exc_info:
+
+ class PyNonFinalFinalChild(m.IsNonFinalFinal):
+ pass
+
+ assert str(exc_info.value).endswith("is not an acceptable base type")
+
+
+# https://github.com/pybind/pybind11/issues/1878
+def test_exception_rvalue_abort():
+ with pytest.raises(RuntimeError):
+ m.PyPrintDestructor().throw_something()
+
+
+# https://github.com/pybind/pybind11/issues/1568
+def test_multiple_instances_with_same_pointer(capture):
+ n = 100
+ instances = [m.SamePointer() for _ in range(n)]
+ for i in range(n):
+ # We need to reuse the same allocated memory for with a different type,
+ # to ensure the bug in `deregister_instance_impl` is detected. Otherwise
+ # `Py_TYPE(self) == Py_TYPE(it->second)` will still succeed, even though
+ # the `instance` is already deleted.
+ instances[i] = m.Empty()
+ # No assert: if this does not trigger the error
+ # pybind11_fail("pybind11_object_dealloc(): Tried to deallocate unregistered instance!");
+ # and just completes without crashing, we're good.
+
+
+# https://github.com/pybind/pybind11/issues/1624
+def test_base_and_derived_nested_scope():
+ assert issubclass(m.DerivedWithNested, m.BaseWithNested)
+ assert m.BaseWithNested.Nested != m.DerivedWithNested.Nested
+ assert m.BaseWithNested.Nested.get_name() == "BaseWithNested::Nested"
+ assert m.DerivedWithNested.Nested.get_name() == "DerivedWithNested::Nested"
+
+
+def test_register_duplicate_class():
+ import types
+
+ module_scope = types.ModuleType("module_scope")
+ with pytest.raises(RuntimeError) as exc_info:
+ m.register_duplicate_class_name(module_scope)
+ expected = (
+ 'generic_type: cannot initialize type "Duplicate": '
+ "an object with that name is already defined"
+ )
+ assert str(exc_info.value) == expected
+ with pytest.raises(RuntimeError) as exc_info:
+ m.register_duplicate_class_type(module_scope)
+ expected = 'generic_type: type "YetAnotherDuplicate" is already registered!'
+ assert str(exc_info.value) == expected
+
+ class ClassScope:
+ pass
+
+ with pytest.raises(RuntimeError) as exc_info:
+ m.register_duplicate_nested_class_name(ClassScope)
+ expected = (
+ 'generic_type: cannot initialize type "DuplicateNested": '
+ "an object with that name is already defined"
+ )
+ assert str(exc_info.value) == expected
+ with pytest.raises(RuntimeError) as exc_info:
+ m.register_duplicate_nested_class_type(ClassScope)
+ expected = 'generic_type: type "YetAnotherDuplicateNested" is already registered!'
+ assert str(exc_info.value) == expected
-add_custom_target(test_cmake_build)
+# Built-in in CMake 3.5+
+include(CMakeParseArguments)
-if(CMAKE_VERSION VERSION_LESS 3.1)
- # 3.0 needed for interface library for subdirectory_target/installed_target
- # 3.1 needed for cmake -E env for testing
- return()
-endif()
+add_custom_target(test_cmake_build)
-include(CMakeParseArguments)
function(pybind11_add_build_test name)
cmake_parse_arguments(ARG "INSTALL" "" "" ${ARGN})
- set(build_options "-DCMAKE_PREFIX_PATH=${PROJECT_BINARY_DIR}/mock_install"
- "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
- "-DPYTHON_EXECUTABLE:FILEPATH=${PYTHON_EXECUTABLE}"
- "-DPYBIND11_CPP_STANDARD=${PYBIND11_CPP_STANDARD}")
+ set(build_options "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}")
+
+ if(PYBIND11_FINDPYTHON)
+ list(APPEND build_options "-DPYBIND11_FINDPYTHON=${PYBIND11_FINDPYTHON}")
+
+ if(DEFINED Python_ROOT_DIR)
+ list(APPEND build_options "-DPython_ROOT_DIR=${Python_ROOT_DIR}")
+ endif()
+
+ list(APPEND build_options "-DPython_EXECUTABLE=${Python_EXECUTABLE}")
+ else()
+ list(APPEND build_options "-DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}")
+ endif()
+
+ if(DEFINED CMAKE_CXX_STANDARD)
+ list(APPEND build_options "-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}")
+ endif()
+
if(NOT ARG_INSTALL)
- list(APPEND build_options "-DPYBIND11_PROJECT_DIR=${PROJECT_SOURCE_DIR}")
+ list(APPEND build_options "-Dpybind11_SOURCE_DIR=${pybind11_SOURCE_DIR}")
+ else()
+ list(APPEND build_options "-DCMAKE_PREFIX_PATH=${pybind11_BINARY_DIR}/mock_install")
endif()
- add_custom_target(test_${name} ${CMAKE_CTEST_COMMAND}
- --quiet --output-log ${name}.log
- --build-and-test "${CMAKE_CURRENT_SOURCE_DIR}/${name}"
- "${CMAKE_CURRENT_BINARY_DIR}/${name}"
- --build-config Release
+ add_custom_target(
+ test_build_${name}
+ ${CMAKE_CTEST_COMMAND}
+ --build-and-test
+ "${CMAKE_CURRENT_SOURCE_DIR}/${name}"
+ "${CMAKE_CURRENT_BINARY_DIR}/${name}"
+ --build-config
+ Release
--build-noclean
- --build-generator ${CMAKE_GENERATOR}
- $<$<BOOL:${CMAKE_GENERATOR_PLATFORM}>:--build-generator-platform> ${CMAKE_GENERATOR_PLATFORM}
- --build-makeprogram ${CMAKE_MAKE_PROGRAM}
- --build-target check
- --build-options ${build_options}
- )
+ --build-generator
+ ${CMAKE_GENERATOR}
+ $<$<BOOL:${CMAKE_GENERATOR_PLATFORM}>:--build-generator-platform>
+ ${CMAKE_GENERATOR_PLATFORM}
+ --build-makeprogram
+ ${CMAKE_MAKE_PROGRAM}
+ --build-target
+ check_${name}
+ --build-options
+ ${build_options})
if(ARG_INSTALL)
- add_dependencies(test_${name} mock_install)
+ add_dependencies(test_build_${name} mock_install)
endif()
- add_dependencies(test_cmake_build test_${name})
+ add_dependencies(test_cmake_build test_build_${name})
endfunction()
+possibly_uninitialized(PYTHON_MODULE_EXTENSION Python_INTERPRETER_ID)
+
pybind11_add_build_test(subdirectory_function)
pybind11_add_build_test(subdirectory_target)
-if(NOT ${PYTHON_MODULE_EXTENSION} MATCHES "pypy")
+if("${PYTHON_MODULE_EXTENSION}" MATCHES "pypy" OR "${Python_INTERPRETER_ID}" STREQUAL "PyPy")
+ message(STATUS "Skipping embed test on PyPy")
+else()
pybind11_add_build_test(subdirectory_embed)
endif()
if(PYBIND11_INSTALL)
- add_custom_target(mock_install ${CMAKE_COMMAND}
- "-DCMAKE_INSTALL_PREFIX=${PROJECT_BINARY_DIR}/mock_install"
- -P "${PROJECT_BINARY_DIR}/cmake_install.cmake"
- )
+ add_custom_target(
+ mock_install ${CMAKE_COMMAND} "-DCMAKE_INSTALL_PREFIX=${pybind11_BINARY_DIR}/mock_install" -P
+ "${pybind11_BINARY_DIR}/cmake_install.cmake")
pybind11_add_build_test(installed_function INSTALL)
pybind11_add_build_test(installed_target INSTALL)
- if(NOT ${PYTHON_MODULE_EXTENSION} MATCHES "pypy")
+ if(NOT ("${PYTHON_MODULE_EXTENSION}" MATCHES "pypy" OR "${Python_INTERPRETER_ID}" STREQUAL "PyPy"
+ ))
pybind11_add_build_test(installed_embed INSTALL)
endif()
endif()
add_dependencies(check test_cmake_build)
+
+add_subdirectory(subdirectory_target EXCLUDE_FROM_ALL)
+add_subdirectory(subdirectory_embed EXCLUDE_FROM_ALL)
py::scoped_interpreter guard{};
- auto m = py::module::import("test_cmake_build");
+ auto m = py::module_::import("test_cmake_build");
if (m.attr("add")(1, 2).cast<int>() != 3)
throw std::runtime_error("embed.cpp failed");
- py::module::import("sys").attr("argv") = py::make_tuple("test.py", "embed.cpp");
+ py::module_::import("sys").attr("argv") = py::make_tuple("test.py", "embed.cpp");
py::eval_file(test_py_file, py::globals());
}
-cmake_minimum_required(VERSION 3.0)
+cmake_minimum_required(VERSION 3.4)
+
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
+
project(test_installed_embed CXX)
-set(CMAKE_MODULE_PATH "")
find_package(pybind11 CONFIG REQUIRED)
message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}")
-add_executable(test_cmake_build ../embed.cpp)
-target_link_libraries(test_cmake_build PRIVATE pybind11::embed)
+add_executable(test_installed_embed ../embed.cpp)
+target_link_libraries(test_installed_embed PRIVATE pybind11::embed)
+set_target_properties(test_installed_embed PROPERTIES OUTPUT_NAME test_cmake_build)
# Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::embed).
# This may be needed to resolve header conflicts, e.g. between Python release and debug headers.
-set_target_properties(test_cmake_build PROPERTIES NO_SYSTEM_FROM_IMPORTED ON)
+set_target_properties(test_installed_embed PROPERTIES NO_SYSTEM_FROM_IMPORTED ON)
-add_custom_target(check $<TARGET_FILE:test_cmake_build> ${PROJECT_SOURCE_DIR}/../test.py)
+add_custom_target(check_installed_embed $<TARGET_FILE:test_installed_embed>
+ ${PROJECT_SOURCE_DIR}/../test.py)
-cmake_minimum_required(VERSION 2.8.12)
+cmake_minimum_required(VERSION 3.4)
project(test_installed_module CXX)
-set(CMAKE_MODULE_PATH "")
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
+
+project(test_installed_function CXX)
find_package(pybind11 CONFIG REQUIRED)
-message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}")
+message(
+ STATUS "Found pybind11 v${pybind11_VERSION} ${pybind11_VERSION_TYPE}: ${pybind11_INCLUDE_DIRS}")
+
+pybind11_add_module(test_installed_function SHARED NO_EXTRAS ../main.cpp)
+set_target_properties(test_installed_function PROPERTIES OUTPUT_NAME test_cmake_build)
-pybind11_add_module(test_cmake_build SHARED NO_EXTRAS ../main.cpp)
+if(DEFINED Python_EXECUTABLE)
+ set(_Python_EXECUTABLE "${Python_EXECUTABLE}")
+elseif(DEFINED PYTHON_EXECUTABLE)
+ set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}")
+else()
+ message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)")
+endif()
-add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>
- ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})
+add_custom_target(
+ check_installed_function
+ ${CMAKE_COMMAND}
+ -E
+ env
+ PYTHONPATH=$<TARGET_FILE_DIR:test_installed_function>
+ ${_Python_EXECUTABLE}
+ ${PROJECT_SOURCE_DIR}/../test.py
+ ${PROJECT_NAME})
-cmake_minimum_required(VERSION 3.0)
-project(test_installed_target CXX)
+cmake_minimum_required(VERSION 3.4)
+
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
-set(CMAKE_MODULE_PATH "")
+project(test_installed_target CXX)
find_package(pybind11 CONFIG REQUIRED)
message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}")
-add_library(test_cmake_build MODULE ../main.cpp)
+add_library(test_installed_target MODULE ../main.cpp)
-target_link_libraries(test_cmake_build PRIVATE pybind11::module)
+target_link_libraries(test_installed_target PRIVATE pybind11::module)
+set_target_properties(test_installed_target PROPERTIES OUTPUT_NAME test_cmake_build)
-# make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib
-set_target_properties(test_cmake_build PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
+# Make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib
+pybind11_extension(test_installed_target)
# Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::module).
# This may be needed to resolve header conflicts, e.g. between Python release and debug headers.
-set_target_properties(test_cmake_build PROPERTIES NO_SYSTEM_FROM_IMPORTED ON)
+set_target_properties(test_installed_target PROPERTIES NO_SYSTEM_FROM_IMPORTED ON)
+
+if(DEFINED Python_EXECUTABLE)
+ set(_Python_EXECUTABLE "${Python_EXECUTABLE}")
+elseif(DEFINED PYTHON_EXECUTABLE)
+ set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}")
+else()
+ message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)")
+endif()
-add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>
- ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})
+add_custom_target(
+ check_installed_target
+ ${CMAKE_COMMAND}
+ -E
+ env
+ PYTHONPATH=$<TARGET_FILE_DIR:test_installed_target>
+ ${_Python_EXECUTABLE}
+ ${PROJECT_SOURCE_DIR}/../test.py
+ ${PROJECT_NAME})
-cmake_minimum_required(VERSION 3.0)
+cmake_minimum_required(VERSION 3.4)
+
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
+
project(test_subdirectory_embed CXX)
-set(PYBIND11_INSTALL ON CACHE BOOL "")
+set(PYBIND11_INSTALL
+ ON
+ CACHE BOOL "")
set(PYBIND11_EXPORT_NAME test_export)
-add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11)
+add_subdirectory("${pybind11_SOURCE_DIR}" pybind11)
# Test basic target functionality
-add_executable(test_cmake_build ../embed.cpp)
-target_link_libraries(test_cmake_build PRIVATE pybind11::embed)
+add_executable(test_subdirectory_embed ../embed.cpp)
+target_link_libraries(test_subdirectory_embed PRIVATE pybind11::embed)
+set_target_properties(test_subdirectory_embed PROPERTIES OUTPUT_NAME test_cmake_build)
-add_custom_target(check $<TARGET_FILE:test_cmake_build> ${PROJECT_SOURCE_DIR}/../test.py)
+add_custom_target(check_subdirectory_embed $<TARGET_FILE:test_subdirectory_embed>
+ "${PROJECT_SOURCE_DIR}/../test.py")
# Test custom export group -- PYBIND11_EXPORT_NAME
add_library(test_embed_lib ../embed.cpp)
target_link_libraries(test_embed_lib PRIVATE pybind11::embed)
-install(TARGETS test_embed_lib
- EXPORT test_export
- ARCHIVE DESTINATION bin
- LIBRARY DESTINATION lib
- RUNTIME DESTINATION lib)
-install(EXPORT test_export
- DESTINATION lib/cmake/test_export/test_export-Targets.cmake)
+install(
+ TARGETS test_embed_lib
+ EXPORT test_export
+ ARCHIVE DESTINATION bin
+ LIBRARY DESTINATION lib
+ RUNTIME DESTINATION lib)
+install(EXPORT test_export DESTINATION lib/cmake/test_export/test_export-Targets.cmake)
-cmake_minimum_required(VERSION 2.8.12)
-project(test_subdirectory_module CXX)
+cmake_minimum_required(VERSION 3.4)
-add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11)
-pybind11_add_module(test_cmake_build THIN_LTO ../main.cpp)
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
-add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>
- ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})
+project(test_subdirectory_function CXX)
+
+add_subdirectory("${pybind11_SOURCE_DIR}" pybind11)
+pybind11_add_module(test_subdirectory_function ../main.cpp)
+set_target_properties(test_subdirectory_function PROPERTIES OUTPUT_NAME test_cmake_build)
+
+if(DEFINED Python_EXECUTABLE)
+ set(_Python_EXECUTABLE "${Python_EXECUTABLE}")
+elseif(DEFINED PYTHON_EXECUTABLE)
+ set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}")
+else()
+ message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)")
+endif()
+
+add_custom_target(
+ check_subdirectory_function
+ ${CMAKE_COMMAND}
+ -E
+ env
+ PYTHONPATH=$<TARGET_FILE_DIR:test_subdirectory_function>
+ ${_Python_EXECUTABLE}
+ ${PROJECT_SOURCE_DIR}/../test.py
+ ${PROJECT_NAME})
-cmake_minimum_required(VERSION 3.0)
+cmake_minimum_required(VERSION 3.4)
+
+# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
+# some versions of VS that have a patched CMake 3.11. This forces us to emulate
+# the behavior using the following workaround:
+if(${CMAKE_VERSION} VERSION_LESS 3.18)
+ cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+ cmake_policy(VERSION 3.18)
+endif()
+
project(test_subdirectory_target CXX)
-add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11)
+add_subdirectory("${pybind11_SOURCE_DIR}" pybind11)
+
+add_library(test_subdirectory_target MODULE ../main.cpp)
+set_target_properties(test_subdirectory_target PROPERTIES OUTPUT_NAME test_cmake_build)
-add_library(test_cmake_build MODULE ../main.cpp)
+target_link_libraries(test_subdirectory_target PRIVATE pybind11::module)
-target_link_libraries(test_cmake_build PRIVATE pybind11::module)
+# Make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib
+pybind11_extension(test_subdirectory_target)
-# make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib
-set_target_properties(test_cmake_build PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
+if(DEFINED Python_EXECUTABLE)
+ set(_Python_EXECUTABLE "${Python_EXECUTABLE}")
+elseif(DEFINED PYTHON_EXECUTABLE)
+ set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}")
+else()
+ message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)")
+endif()
-add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$<TARGET_FILE_DIR:test_cmake_build>
- ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME})
+add_custom_target(
+ check_subdirectory_target
+ ${CMAKE_COMMAND}
+ -E
+ env
+ PYTHONPATH=$<TARGET_FILE_DIR:test_subdirectory_target>
+ ${_Python_EXECUTABLE}
+ ${PROJECT_SOURCE_DIR}/../test.py
+ ${PROJECT_NAME})
+# -*- coding: utf-8 -*-
import sys
import test_cmake_build
// Test that we properly handle C++17 exception specifiers (which are part of the function signature
// in C++17). These should all still work before C++17, but don't affect the function signature.
namespace test_exc_sp {
+// [workaround(intel)] Unable to use noexcept instead of noexcept(true)
+// Make the f1 test basically the same as the f2 test in C++17 mode for the Intel compiler as
+// it fails to compile with a plain noexcept (tested with icc (ICC) 2021.1 Beta 20200827).
+#if defined(__INTEL_COMPILER) && defined(PYBIND11_CPP17)
+int f1(int x) noexcept(true) { return x+1; }
+#else
int f1(int x) noexcept { return x+1; }
+#endif
int f2(int x) noexcept(true) { return x+2; }
int f3(int x) noexcept(false) { return x+3; }
#if defined(__GNUG__)
# pragma GCC diagnostic pop
#endif
};
-}
+} // namespace test_exc_sp
TEST_SUBMODULE(constants_and_functions, m) {
m.def("f2", f2);
m.def("f3", f3);
m.def("f4", f4);
+
+ // test_function_record_leaks
+ struct LargeCapture {
+ // This should always be enough to trigger the alternative branch
+ // where `sizeof(capture) > sizeof(rec->data)`
+ uint64_t zeros[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ };
+ m.def("register_large_capture_with_invalid_arguments", [](py::module_ m) {
+ LargeCapture capture; // VS 2015's MSVC is acting up if we create the array here
+ m.def("should_raise", [capture](int) { return capture.zeros[9] + 33; }, py::kw_only(), py::arg());
+ });
+ m.def("register_with_raising_repr", [](py::module_ m, py::object default_value) {
+ m.def("should_raise", [](int, int, py::object) { return 42; }, "some docstring",
+ py::arg_v("x", 42), py::arg_v("y", 42, "<the answer>"), py::arg_v("z", default_value));
+ });
}
-from pybind11_tests import constants_and_functions as m
+# -*- coding: utf-8 -*-
+import pytest
+
+m = pytest.importorskip("pybind11_tests.constants_and_functions")
def test_constants():
assert m.f2(53) == 55
assert m.f3(86) == 89
assert m.f4(140) == 144
+
+
+def test_function_record_leaks():
+ class RaisingRepr:
+ def __repr__(self):
+ raise RuntimeError("Surprise!")
+
+ with pytest.raises(RuntimeError):
+ m.register_large_capture_with_invalid_arguments(m)
+ with pytest.raises(RuntimeError):
+ m.register_with_raising_repr(m, RaisingRepr())
};
struct lacking_copy_ctor : public empty<lacking_copy_ctor> {
- lacking_copy_ctor() {}
+ lacking_copy_ctor() = default;
lacking_copy_ctor(const lacking_copy_ctor& other) = delete;
};
template <> lacking_copy_ctor empty<lacking_copy_ctor>::instance_ = {};
struct lacking_move_ctor : public empty<lacking_move_ctor> {
- lacking_move_ctor() {}
+ lacking_move_ctor() = default;
lacking_move_ctor(const lacking_move_ctor& other) = delete;
lacking_move_ctor(lacking_move_ctor&& other) = delete;
};
int value;
};
-NAMESPACE_BEGIN(pybind11)
-NAMESPACE_BEGIN(detail)
+PYBIND11_NAMESPACE_BEGIN(pybind11)
+PYBIND11_NAMESPACE_BEGIN(detail)
template <> struct type_caster<MoveOnlyInt> {
PYBIND11_TYPE_CASTER(MoveOnlyInt, _("MoveOnlyInt"));
bool load(handle src, bool) { value = MoveOnlyInt(src.cast<int>()); return true; }
operator CopyOnlyInt&() { return value; }
template <typename T> using cast_op_type = pybind11::detail::cast_op_type<T>;
};
-NAMESPACE_END(detail)
-NAMESPACE_END(pybind11)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(pybind11)
TEST_SUBMODULE(copy_move_policies, m) {
// test_lacking_copy_ctor
r += py::cast<MoveOrCopyInt>(o).value; /* moves */
r += py::cast<MoveOnlyInt>(o).value; /* moves */
r += py::cast<CopyOnlyInt>(o).value; /* copies */
- MoveOrCopyInt m1(py::cast<MoveOrCopyInt>(o)); /* moves */
- MoveOnlyInt m2(py::cast<MoveOnlyInt>(o)); /* moves */
- CopyOnlyInt m3(py::cast<CopyOnlyInt>(o)); /* copies */
+ auto m1(py::cast<MoveOrCopyInt>(o)); /* moves */
+ auto m2(py::cast<MoveOnlyInt>(o)); /* moves */
+ auto m3(py::cast<CopyOnlyInt>(o)); /* copies */
r += m1.value + m2.value + m3.value;
return r;
m.attr("has_optional") = false;
#endif
- // #70 compilation issue if operator new is not public
+ // #70 compilation issue if operator new is not public - simple body added
+ // but not needed on most compilers; MSVC and nvcc don't like a local
+ // struct not having a method defined when declared, since it can not be
+ // added later.
struct PrivateOpNew {
int value = 1;
private:
-#if defined(_MSC_VER)
-# pragma warning(disable: 4822) // warning C4822: local class member function does not have a body
-#endif
- void *operator new(size_t bytes);
+ void *operator new(size_t bytes) {
+ void *ptr = std::malloc(bytes);
+ if (ptr)
+ return ptr;
+ else
+ throw std::bad_alloc{};
+ }
};
py::class_<PrivateOpNew>(m, "PrivateOpNew").def_readonly("value", &PrivateOpNew::value);
m.def("private_op_new_value", []() { return PrivateOpNew(); });
};
py::class_<MoveIssue2>(m, "MoveIssue2").def(py::init<int>()).def_readwrite("value", &MoveIssue2::v);
- m.def("get_moveissue1", [](int i) { return new MoveIssue1(i); }, py::return_value_policy::move);
+ // #2742: Don't expect ownership of raw pointer to `new`ed object to be transferred with `py::return_value_policy::move`
+ m.def("get_moveissue1", [](int i) { return std::unique_ptr<MoveIssue1>(new MoveIssue1(i)); }, py::return_value_policy::move);
m.def("get_moveissue2", [](int i) { return MoveIssue2(i); }, py::return_value_policy::move);
}
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import copy_move_policies as m
def test_lacking_copy_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_copy_ctor.get_one()
- assert "the object is non-copyable!" in str(excinfo.value)
+ assert "is non-copyable!" in str(excinfo.value)
def test_lacking_move_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_move_ctor.get_one()
- assert "the object is neither movable nor copyable!" in str(excinfo.value)
+ assert "is neither movable nor copyable!" in str(excinfo.value)
def test_move_and_copy_casts():
"""Cast some values in C++ via custom type casters and count the number of moves/copies."""
cstats = m.move_and_copy_cstats()
- c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
+ c_m, c_mc, c_c = (
+ cstats["MoveOnlyInt"],
+ cstats["MoveOrCopyInt"],
+ cstats["CopyOnlyInt"],
+ )
# The type move constructions/assignments below each get incremented: the move assignment comes
# from the type_caster load; the move construction happens when extracting that via a cast or
moves/copies."""
cstats = m.move_and_copy_cstats()
- c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
+ c_m, c_mc, c_c = (
+ cstats["MoveOnlyInt"],
+ cstats["MoveOrCopyInt"],
+ cstats["CopyOnlyInt"],
+ )
assert m.move_only(10) == 10 # 1 move, c_m
assert m.move_or_copy(11) == 11 # 1 move, c_mc
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
-@pytest.mark.skipif(not m.has_optional, reason='no <optional>')
+@pytest.mark.skipif(not m.has_optional, reason="no <optional>")
def test_move_and_copy_load_optional():
"""Tests move/copy loads of std::optional arguments"""
cstats = m.move_and_copy_cstats()
- c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
+ c_m, c_mc, c_c = (
+ cstats["MoveOnlyInt"],
+ cstats["MoveOrCopyInt"],
+ cstats["CopyOnlyInt"],
+ )
# The extra move/copy constructions below come from the std::optional move (which has to move
# its arguments):
with pytest.raises(RuntimeError) as excinfo:
m.private_op_new_value()
- assert "the object is neither movable nor copyable" in str(excinfo.value)
+ assert "is neither movable nor copyable" in str(excinfo.value)
assert m.private_op_new_reference().value == 1
def test_move_fallback():
"""#389: rvp::move should fall-through to copy on non-movable objects"""
- m2 = m.get_moveissue2(2)
- assert m2.value == 2
m1 = m.get_moveissue1(1)
assert m1.value == 1
+ m2 = m.get_moveissue2(2)
+ assert m2.value == 2
--- /dev/null
+/*
+ tests/test_custom_type_casters.cpp -- tests type_caster<T>
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#include "pybind11_tests.h"
+#include "constructor_stats.h"
+
+
+// py::arg/py::arg_v testing: these arguments just record their argument when invoked
+class ArgInspector1 { public: std::string arg = "(default arg inspector 1)"; };
+class ArgInspector2 { public: std::string arg = "(default arg inspector 2)"; };
+class ArgAlwaysConverts { };
+namespace pybind11 { namespace detail {
+template <> struct type_caster<ArgInspector1> {
+public:
+ PYBIND11_TYPE_CASTER(ArgInspector1, _("ArgInspector1"));
+
+ bool load(handle src, bool convert) {
+ value.arg = "loading ArgInspector1 argument " +
+ std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. "
+ "Argument value = " + (std::string) str(src);
+ return true;
+ }
+
+ static handle cast(const ArgInspector1 &src, return_value_policy, handle) {
+ return str(src.arg).release();
+ }
+};
+template <> struct type_caster<ArgInspector2> {
+public:
+ PYBIND11_TYPE_CASTER(ArgInspector2, _("ArgInspector2"));
+
+ bool load(handle src, bool convert) {
+ value.arg = "loading ArgInspector2 argument " +
+ std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. "
+ "Argument value = " + (std::string) str(src);
+ return true;
+ }
+
+ static handle cast(const ArgInspector2 &src, return_value_policy, handle) {
+ return str(src.arg).release();
+ }
+};
+template <> struct type_caster<ArgAlwaysConverts> {
+public:
+ PYBIND11_TYPE_CASTER(ArgAlwaysConverts, _("ArgAlwaysConverts"));
+
+ bool load(handle, bool convert) {
+ return convert;
+ }
+
+ static handle cast(const ArgAlwaysConverts &, return_value_policy, handle) {
+ return py::none().release();
+ }
+};
+} // namespace detail
+} // namespace pybind11
+
+// test_custom_caster_destruction
+class DestructionTester {
+public:
+ DestructionTester() { print_default_created(this); }
+ ~DestructionTester() { print_destroyed(this); }
+ DestructionTester(const DestructionTester &) { print_copy_created(this); }
+ DestructionTester(DestructionTester &&) { print_move_created(this); }
+ DestructionTester &operator=(const DestructionTester &) { print_copy_assigned(this); return *this; }
+ DestructionTester &operator=(DestructionTester &&) { print_move_assigned(this); return *this; }
+};
+namespace pybind11 { namespace detail {
+template <> struct type_caster<DestructionTester> {
+ PYBIND11_TYPE_CASTER(DestructionTester, _("DestructionTester"));
+ bool load(handle, bool) { return true; }
+
+ static handle cast(const DestructionTester &, return_value_policy, handle) {
+ return py::bool_(true).release();
+ }
+};
+} // namespace detail
+} // namespace pybind11
+
+TEST_SUBMODULE(custom_type_casters, m) {
+ // test_custom_type_casters
+
+ // test_noconvert_args
+ //
+ // Test converting. The ArgAlwaysConverts is just there to make the first no-conversion pass
+ // fail so that our call always ends up happening via the second dispatch (the one that allows
+ // some conversion).
+ class ArgInspector {
+ public:
+ ArgInspector1 f(ArgInspector1 a, ArgAlwaysConverts) { return a; }
+ std::string g(ArgInspector1 a, const ArgInspector1 &b, int c, ArgInspector2 *d, ArgAlwaysConverts) {
+ return a.arg + "\n" + b.arg + "\n" + std::to_string(c) + "\n" + d->arg;
+ }
+ static ArgInspector2 h(ArgInspector2 a, ArgAlwaysConverts) { return a; }
+ };
+ // [workaround(intel)] ICC 20/21 breaks with py::arg().stuff, using py::arg{}.stuff works.
+ py::class_<ArgInspector>(m, "ArgInspector")
+ .def(py::init<>())
+ .def("f", &ArgInspector::f, py::arg(), py::arg() = ArgAlwaysConverts())
+ .def("g", &ArgInspector::g, "a"_a.noconvert(), "b"_a, "c"_a.noconvert()=13, "d"_a=ArgInspector2(), py::arg() = ArgAlwaysConverts())
+ .def_static("h", &ArgInspector::h, py::arg{}.noconvert(), py::arg() = ArgAlwaysConverts())
+ ;
+ m.def("arg_inspect_func", [](ArgInspector2 a, ArgInspector1 b, ArgAlwaysConverts) { return a.arg + "\n" + b.arg; },
+ py::arg{}.noconvert(false), py::arg_v(nullptr, ArgInspector1()).noconvert(true), py::arg() = ArgAlwaysConverts());
+
+ m.def("floats_preferred", [](double f) { return 0.5 * f; }, "f"_a);
+ m.def("floats_only", [](double f) { return 0.5 * f; }, "f"_a.noconvert());
+ m.def("ints_preferred", [](int i) { return i / 2; }, "i"_a);
+ m.def("ints_only", [](int i) { return i / 2; }, "i"_a.noconvert());
+
+ // test_custom_caster_destruction
+ // Test that `take_ownership` works on types with a custom type caster when given a pointer
+
+ // default policy: don't take ownership:
+ m.def("custom_caster_no_destroy", []() { static auto *dt = new DestructionTester(); return dt; });
+
+ m.def("custom_caster_destroy", []() { return new DestructionTester(); },
+ py::return_value_policy::take_ownership); // Takes ownership: destroy when finished
+ m.def("custom_caster_destroy_const", []() -> const DestructionTester * { return new DestructionTester(); },
+ py::return_value_policy::take_ownership); // Likewise (const doesn't inhibit destruction)
+ m.def("destruction_tester_cstats", &ConstructorStats::get<DestructionTester>, py::return_value_policy::reference);
+}
--- /dev/null
+# -*- coding: utf-8 -*-
+import pytest
+from pybind11_tests import custom_type_casters as m
+
+
+def test_noconvert_args(msg):
+ a = m.ArgInspector()
+ assert (
+ msg(a.f("hi"))
+ == """
+ loading ArgInspector1 argument WITH conversion allowed. Argument value = hi
+ """
+ )
+ assert (
+ msg(a.g("this is a", "this is b"))
+ == """
+ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
+ loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
+ 13
+ loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
+ """ # noqa: E501 line too long
+ )
+ assert (
+ msg(a.g("this is a", "this is b", 42))
+ == """
+ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
+ loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
+ 42
+ loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
+ """ # noqa: E501 line too long
+ )
+ assert (
+ msg(a.g("this is a", "this is b", 42, "this is d"))
+ == """
+ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
+ loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
+ 42
+ loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d
+ """
+ )
+ assert (
+ a.h("arg 1")
+ == "loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1"
+ )
+ assert (
+ msg(m.arg_inspect_func("A1", "A2"))
+ == """
+ loading ArgInspector2 argument WITH conversion allowed. Argument value = A1
+ loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2
+ """
+ )
+
+ assert m.floats_preferred(4) == 2.0
+ assert m.floats_only(4.0) == 2.0
+ with pytest.raises(TypeError) as excinfo:
+ m.floats_only(4)
+ assert (
+ msg(excinfo.value)
+ == """
+ floats_only(): incompatible function arguments. The following argument types are supported:
+ 1. (f: float) -> float
+
+ Invoked with: 4
+ """
+ )
+
+ assert m.ints_preferred(4) == 2
+ assert m.ints_preferred(True) == 0
+ with pytest.raises(TypeError) as excinfo:
+ m.ints_preferred(4.0)
+ assert (
+ msg(excinfo.value)
+ == """
+ ints_preferred(): incompatible function arguments. The following argument types are supported:
+ 1. (i: int) -> int
+
+ Invoked with: 4.0
+ """ # noqa: E501 line too long
+ )
+
+ assert m.ints_only(4) == 2
+ with pytest.raises(TypeError) as excinfo:
+ m.ints_only(4.0)
+ assert (
+ msg(excinfo.value)
+ == """
+ ints_only(): incompatible function arguments. The following argument types are supported:
+ 1. (i: int) -> int
+
+ Invoked with: 4.0
+ """
+ )
+
+
+def test_custom_caster_destruction():
+ """Tests that returning a pointer to a type that gets converted with a custom type caster gets
+ destroyed when the function has py::return_value_policy::take_ownership policy applied."""
+
+ cstats = m.destruction_tester_cstats()
+ # This one *doesn't* have take_ownership: the pointer should be used but not destroyed:
+ z = m.custom_caster_no_destroy()
+ assert cstats.alive() == 1 and cstats.default_constructions == 1
+ assert z
+
+ # take_ownership applied: this constructs a new object, casts it, then destroys it:
+ z = m.custom_caster_destroy()
+ assert z
+ assert cstats.default_constructions == 2
+
+ # Same, but with a const pointer return (which should *not* inhibit destruction):
+ z = m.custom_caster_destroy_const()
+ assert z
+ assert cstats.default_constructions == 3
+
+ # Make sure we still only have the original object (from ..._no_destroy()) alive:
+ assert cstats.alive() == 1
m.def("test_function7", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring");
+ {
+ py::options options;
+ options.disable_user_defined_docstrings();
+ options.disable_function_signatures();
+
+ m.def("test_function8", []() {});
+ }
+
{
py::options options;
options.disable_user_defined_docstrings();
+# -*- coding: utf-8 -*-
from pybind11_tests import docstring_options as m
assert m.test_overloaded3.__doc__ == "Overload docstr"
# options.enable_function_signatures()
- assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None")
+ assert m.test_function3.__doc__.startswith("test_function3(a: int, b: int) -> None")
- assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None")
- assert m.test_function4.__doc__ .endswith("A custom docstring\n")
+ assert m.test_function4.__doc__.startswith("test_function4(a: int, b: int) -> None")
+ assert m.test_function4.__doc__.endswith("A custom docstring\n")
# options.disable_function_signatures()
# options.disable_user_defined_docstrings()
assert m.test_function6.__doc__ == "A custom docstring"
# RAII destructor
- assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None")
- assert m.test_function7.__doc__ .endswith("A custom docstring\n")
+ assert m.test_function7.__doc__.startswith("test_function7(a: int, b: int) -> None")
+ assert m.test_function7.__doc__.endswith("A custom docstring\n")
+
+ # when all options are disabled, no docstring (instead of an empty one) should be generated
+ assert m.test_function8.__doc__ is None
# Suppression of user-defined docstrings for non-function objects
assert not m.DocstringTestFoo.__doc__
// reference is referencing rows/columns correctly).
template <typename MatrixArgType> Eigen::MatrixXd adjust_matrix(MatrixArgType m) {
Eigen::MatrixXd ret(m);
- for (int c = 0; c < m.cols(); c++) for (int r = 0; r < m.rows(); r++)
- ret(r, c) += 10*r + 100*c;
+ for (int c = 0; c < m.cols(); c++)
+ for (int r = 0; r < m.rows(); r++)
+ ret(r, c) += 10*r + 100*c; // NOLINT(clang-analyzer-core.uninitialized.Assign)
return ret;
}
using SparseMatrixR = Eigen::SparseMatrix<float, Eigen::RowMajor>;
using SparseMatrixC = Eigen::SparseMatrix<float>;
- m.attr("have_eigen") = true;
-
// various tests
m.def("double_col", [](const Eigen::VectorXf &x) -> Eigen::VectorXf { return 2.0f * x; });
m.def("double_row", [](const Eigen::RowVectorXf &x) -> Eigen::RowVectorXf { return 2.0f * x; });
m.def("dense_copy_r", [](const DenseMatrixR &m) -> DenseMatrixR { return m; });
m.def("dense_copy_c", [](const DenseMatrixC &m) -> DenseMatrixC { return m; });
// test_sparse, test_sparse_signature
- m.def("sparse_r", [mat]() -> SparseMatrixR { return Eigen::SparseView<Eigen::MatrixXf>(mat); });
+ m.def("sparse_r", [mat]() -> SparseMatrixR { return Eigen::SparseView<Eigen::MatrixXf>(mat); }); //NOLINT(clang-analyzer-core.uninitialized.UndefReturn)
m.def("sparse_c", [mat]() -> SparseMatrixC { return Eigen::SparseView<Eigen::MatrixXf>(mat); });
m.def("sparse_copy_r", [](const SparseMatrixR &m) -> SparseMatrixR { return m; });
m.def("sparse_copy_c", [](const SparseMatrixC &m) -> SparseMatrixC { return m; });
m.def("cpp_ref_r", [](py::handle m) { return m.cast<Eigen::Ref<MatrixXdR>>()(1, 0); });
m.def("cpp_ref_any", [](py::handle m) { return m.cast<py::EigenDRef<Eigen::MatrixXd>>()(1, 0); });
+ // [workaround(intel)] ICC 20/21 breaks with py::arg().stuff, using py::arg{}.stuff works.
// test_nocopy_wrapper
// Test that we can prevent copying into an argument that would normally copy: First a version
m.def("get_elem", &get_elem);
// Now this alternative that calls the tells pybind to fail rather than copy:
m.def("get_elem_nocopy", [](Eigen::Ref<const Eigen::MatrixXd> m) -> double { return get_elem(m); },
- py::arg().noconvert());
+ py::arg{}.noconvert());
// Also test a row-major-only no-copy const ref:
m.def("get_elem_rm_nocopy", [](Eigen::Ref<const Eigen::Matrix<long, -1, -1, Eigen::RowMajor>> &m) -> long { return m(2, 1); },
- py::arg().noconvert());
+ py::arg{}.noconvert());
// test_issue738
// Issue #738: 1xN or Nx1 2D matrices were neither accepted nor properly copied with an
// incompatible stride value on the length-1 dimension--but that should be allowed (without
// requiring a copy!) because the stride value can be safely ignored on a size-1 dimension.
- m.def("iss738_f1", &adjust_matrix<const Eigen::Ref<const Eigen::MatrixXd> &>, py::arg().noconvert());
- m.def("iss738_f2", &adjust_matrix<const Eigen::Ref<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>> &>, py::arg().noconvert());
+ m.def("iss738_f1", &adjust_matrix<const Eigen::Ref<const Eigen::MatrixXd> &>, py::arg{}.noconvert());
+ m.def("iss738_f2", &adjust_matrix<const Eigen::Ref<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>> &>, py::arg{}.noconvert());
// test_issue1105
// Issue #1105: when converting from a numpy two-dimensional (Nx1) or (1xN) value into a dense
// a new array (np.ones(10)) increases the chances that the temp array will be garbage
// collected and/or that its memory will be overridden with different values.
m.def("get_elem_direct", [](Eigen::Ref<const Eigen::VectorXd> v) {
- py::module::import("numpy").attr("ones")(10);
+ py::module_::import("numpy").attr("ones")(10);
return v(5);
});
m.def("get_elem_indirect", [](std::vector<Eigen::Ref<const Eigen::VectorXd>> v) {
- py::module::import("numpy").attr("ones")(10);
+ py::module_::import("numpy").attr("ones")(10);
return v[0](5);
});
}
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import ConstructorStats
-pytestmark = pytest.requires_eigen_and_numpy
+np = pytest.importorskip("numpy")
+m = pytest.importorskip("pybind11_tests.eigen")
-with pytest.suppress(ImportError):
- from pybind11_tests import eigen as m
- import numpy as np
- ref = np.array([[ 0., 3, 0, 0, 0, 11],
- [22, 0, 0, 0, 17, 11],
- [ 7, 5, 0, 1, 0, 11],
- [ 0, 0, 0, 0, 0, 11],
- [ 0, 0, 14, 0, 8, 11]])
+ref = np.array(
+ [
+ [0.0, 3, 0, 0, 0, 11],
+ [22, 0, 0, 0, 17, 11],
+ [7, 5, 0, 1, 0, 11],
+ [0, 0, 0, 0, 0, 11],
+ [0, 0, 14, 0, 8, 11],
+ ]
+)
def assert_equal_ref(mat):
def test_partially_fixed():
- ref2 = np.array([[0., 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
+ ref2 = np.array([[0.0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, 1]), ref2[:, [1]])
np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2[0, :]), ref2[[0], :])
- np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])
np.testing.assert_array_equal(
- m.partial_copy_four_rm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])
+ m.partial_copy_four_rm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)]
+ )
+ np.testing.assert_array_equal(
+ m.partial_copy_four_rm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :]
+ )
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2), ref2)
np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, 1]), ref2[:, [1]])
np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2[0, :]), ref2[[0], :])
- np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)])
np.testing.assert_array_equal(
- m.partial_copy_four_cm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :])
+ m.partial_copy_four_cm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)]
+ )
+ np.testing.assert_array_equal(
+ m.partial_copy_four_cm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :]
+ )
# TypeError should be raise for a shape mismatch
- functions = [m.partial_copy_four_rm_r, m.partial_copy_four_rm_c,
- m.partial_copy_four_cm_r, m.partial_copy_four_cm_c]
- matrix_with_wrong_shape = [[1, 2],
- [3, 4]]
+ functions = [
+ m.partial_copy_four_rm_r,
+ m.partial_copy_four_rm_c,
+ m.partial_copy_four_cm_r,
+ m.partial_copy_four_cm_c,
+ ]
+ matrix_with_wrong_shape = [[1, 2], [3, 4]]
for f in functions:
with pytest.raises(TypeError) as excinfo:
f(matrix_with_wrong_shape)
def test_mutator_descriptors():
- zr = np.arange(30, dtype='float32').reshape(5, 6) # row-major
+ zr = np.arange(30, dtype="float32").reshape(5, 6) # row-major
zc = zr.reshape(6, 5).transpose() # column-major
m.fixed_mutator_r(zr)
m.fixed_mutator_a(zc)
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_r(zc)
- assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable, flags.c_contiguous]) -> None'
- in str(excinfo.value))
+ assert (
+ "(arg0: numpy.ndarray[numpy.float32[5, 6],"
+ " flags.writeable, flags.c_contiguous]) -> None" in str(excinfo.value)
+ )
with pytest.raises(TypeError) as excinfo:
m.fixed_mutator_c(zr)
- assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable, flags.f_contiguous]) -> None'
- in str(excinfo.value))
+ assert (
+ "(arg0: numpy.ndarray[numpy.float32[5, 6],"
+ " flags.writeable, flags.f_contiguous]) -> None" in str(excinfo.value)
+ )
with pytest.raises(TypeError) as excinfo:
- m.fixed_mutator_a(np.array([[1, 2], [3, 4]], dtype='float32'))
- assert ('(arg0: numpy.ndarray[float32[5, 6], flags.writeable]) -> None'
- in str(excinfo.value))
+ m.fixed_mutator_a(np.array([[1, 2], [3, 4]], dtype="float32"))
+ assert "(arg0: numpy.ndarray[numpy.float32[5, 6], flags.writeable]) -> None" in str(
+ excinfo.value
+ )
zr.flags.writeable = False
with pytest.raises(TypeError):
m.fixed_mutator_r(zr)
def test_cpp_casting():
- assert m.cpp_copy(m.fixed_r()) == 22.
- assert m.cpp_copy(m.fixed_c()) == 22.
- z = np.array([[5., 6], [7, 8]])
- assert m.cpp_copy(z) == 7.
- assert m.cpp_copy(m.get_cm_ref()) == 21.
- assert m.cpp_copy(m.get_rm_ref()) == 21.
- assert m.cpp_ref_c(m.get_cm_ref()) == 21.
- assert m.cpp_ref_r(m.get_rm_ref()) == 21.
+ assert m.cpp_copy(m.fixed_r()) == 22.0
+ assert m.cpp_copy(m.fixed_c()) == 22.0
+ z = np.array([[5.0, 6], [7, 8]])
+ assert m.cpp_copy(z) == 7.0
+ assert m.cpp_copy(m.get_cm_ref()) == 21.0
+ assert m.cpp_copy(m.get_rm_ref()) == 21.0
+ assert m.cpp_ref_c(m.get_cm_ref()) == 21.0
+ assert m.cpp_ref_r(m.get_rm_ref()) == 21.0
with pytest.raises(RuntimeError) as excinfo:
# Can't reference m.fixed_c: it contains floats, m.cpp_ref_any wants doubles
m.cpp_ref_any(m.fixed_c())
- assert 'Unable to cast Python instance' in str(excinfo.value)
+ assert "Unable to cast Python instance" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
# Can't reference m.fixed_r: it contains floats, m.cpp_ref_any wants doubles
m.cpp_ref_any(m.fixed_r())
- assert 'Unable to cast Python instance' in str(excinfo.value)
- assert m.cpp_ref_any(m.ReturnTester.create()) == 1.
+ assert "Unable to cast Python instance" in str(excinfo.value)
+ assert m.cpp_ref_any(m.ReturnTester.create()) == 1.0
- assert m.cpp_ref_any(m.get_cm_ref()) == 21.
- assert m.cpp_ref_any(m.get_cm_ref()) == 21.
+ assert m.cpp_ref_any(m.get_cm_ref()) == 21.0
+ assert m.cpp_ref_any(m.get_cm_ref()) == 21.0
def test_pass_readonly_array():
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
- for slice_idx, ref_mat in enumerate(slices):
+ for ref_mat in slices:
np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)
np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)
# Mutator:
m.double_threer(second_row)
m.double_threec(second_col)
- np.testing.assert_array_equal(counting_mat, [[0., 2, 2], [6, 16, 10], [6, 14, 8]])
+ np.testing.assert_array_equal(counting_mat, [[0.0, 2, 2], [6, 16, 10], [6, 14, 8]])
def test_negative_stride_from_python(msg):
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
counting_3d = counting_3d[::-1, ::-1, ::-1]
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
- for slice_idx, ref_mat in enumerate(slices):
+ for ref_mat in slices:
np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat)
np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat)
# Mutator:
with pytest.raises(TypeError) as excinfo:
m.double_threer(second_row)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
double_threer(): incompatible function arguments. The following argument types are supported:
- 1. (arg0: numpy.ndarray[float32[1, 3], flags.writeable]) -> None
+ 1. (arg0: numpy.ndarray[numpy.float32[1, 3], flags.writeable]) -> None
- Invoked with: """ + repr(np.array([ 5., 4., 3.], dtype='float32')) # noqa: E501 line too long
+ Invoked with: """ # noqa: E501 line too long
+ + repr(np.array([5.0, 4.0, 3.0], dtype="float32"))
+ )
with pytest.raises(TypeError) as excinfo:
m.double_threec(second_col)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
double_threec(): incompatible function arguments. The following argument types are supported:
- 1. (arg0: numpy.ndarray[float32[3, 1], flags.writeable]) -> None
+ 1. (arg0: numpy.ndarray[numpy.float32[3, 1], flags.writeable]) -> None
- Invoked with: """ + repr(np.array([ 7., 4., 1.], dtype='float32')) # noqa: E501 line too long
+ Invoked with: """ # noqa: E501 line too long
+ + repr(np.array([7.0, 4.0, 1.0], dtype="float32"))
+ )
def test_nonunit_stride_to_python():
assert np.all(m.diagonal(ref) == ref.diagonal())
assert np.all(m.diagonal_1(ref) == ref.diagonal(1))
for i in range(-5, 7):
- assert np.all(m.diagonal_n(ref, i) == ref.diagonal(i)), "m.diagonal_n({})".format(i)
+ assert np.all(
+ m.diagonal_n(ref, i) == ref.diagonal(i)
+ ), "m.diagonal_n({})".format(i)
assert np.all(m.block(ref, 2, 1, 3, 3) == ref[2:5, 1:4])
assert np.all(m.block(ref, 1, 4, 4, 2) == ref[1:, 4:])
def test_eigen_ref_to_python():
chols = [m.cholesky1, m.cholesky2, m.cholesky3, m.cholesky4]
for i, chol in enumerate(chols, start=1):
- mymat = chol(np.array([[1., 2, 4], [2, 13, 23], [4, 23, 77]]))
- assert np.all(mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])), "cholesky{}".format(i)
+ mymat = chol(np.array([[1.0, 2, 4], [2, 13, 23], [4, 23, 77]]))
+ assert np.all(
+ mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])
+ ), "cholesky{}".format(i)
def assign_both(a1, a2, r, c, v):
np.testing.assert_array_equal(a_block1, master[3:5, 3:5])
np.testing.assert_array_equal(a_block2, master[2:5, 2:4])
np.testing.assert_array_equal(a_block3, master[6:10, 7:10])
- np.testing.assert_array_equal(a_corn1, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])
- np.testing.assert_array_equal(a_corn2, master[0::master.shape[0] - 1, 0::master.shape[1] - 1])
+ np.testing.assert_array_equal(
+ a_corn1, master[0 :: master.shape[0] - 1, 0 :: master.shape[1] - 1]
+ )
+ np.testing.assert_array_equal(
+ a_corn2, master[0 :: master.shape[0] - 1, 0 :: master.shape[1] - 1]
+ )
np.testing.assert_array_equal(a_copy1, c1want)
np.testing.assert_array_equal(a_copy2, c2want)
cstats = ConstructorStats.get(m.ReturnTester)
assert cstats.alive() == 1
unsafe = [a.ref(), a.ref_const(), a.block(1, 2, 3, 4)]
- copies = [a.copy_get(), a.copy_view(), a.copy_ref(), a.copy_ref_const(),
- a.copy_block(4, 3, 2, 1)]
+ copies = [
+ a.copy_get(),
+ a.copy_view(),
+ a.copy_ref(),
+ a.copy_ref_const(),
+ a.copy_block(4, 3, 2, 1),
+ ]
del a
assert cstats.alive() == 0
del unsafe
del copies
- for meth in [m.ReturnTester.get, m.ReturnTester.get_ptr, m.ReturnTester.view,
- m.ReturnTester.view_ptr, m.ReturnTester.ref_safe, m.ReturnTester.ref_const_safe,
- m.ReturnTester.corners, m.ReturnTester.corners_const]:
+ for meth in [
+ m.ReturnTester.get,
+ m.ReturnTester.get_ptr,
+ m.ReturnTester.view,
+ m.ReturnTester.view_ptr,
+ m.ReturnTester.ref_safe,
+ m.ReturnTester.ref_const_safe,
+ m.ReturnTester.corners,
+ m.ReturnTester.corners_const,
+ ]:
assert_keeps_alive(m.ReturnTester, meth)
for meth in [m.ReturnTester.block_safe, m.ReturnTester.block_const]:
def test_eigen_ref_mutators():
"""Tests Eigen's ability to mutate numpy values"""
- orig = np.array([[1., 2, 3], [4, 5, 6], [7, 8, 9]])
+ orig = np.array([[1.0, 2, 3], [4, 5, 6], [7, 8, 9]])
zr = np.array(orig)
- zc = np.array(orig, order='F')
+ zc = np.array(orig, order="F")
m.add_rm(zr, 1, 0, 100)
- assert np.all(zr == np.array([[1., 2, 3], [104, 5, 6], [7, 8, 9]]))
+ assert np.all(zr == np.array([[1.0, 2, 3], [104, 5, 6], [7, 8, 9]]))
m.add_cm(zc, 1, 0, 200)
- assert np.all(zc == np.array([[1., 2, 3], [204, 5, 6], [7, 8, 9]]))
+ assert np.all(zc == np.array([[1.0, 2, 3], [204, 5, 6], [7, 8, 9]]))
m.add_any(zr, 1, 0, 20)
- assert np.all(zr == np.array([[1., 2, 3], [124, 5, 6], [7, 8, 9]]))
+ assert np.all(zr == np.array([[1.0, 2, 3], [124, 5, 6], [7, 8, 9]]))
m.add_any(zc, 1, 0, 10)
- assert np.all(zc == np.array([[1., 2, 3], [214, 5, 6], [7, 8, 9]]))
+ assert np.all(zc == np.array([[1.0, 2, 3], [214, 5, 6], [7, 8, 9]]))
# Can't reference a col-major array with a row-major Ref, and vice versa:
with pytest.raises(TypeError):
cornersr = zr[0::2, 0::2]
cornersc = zc[0::2, 0::2]
- assert np.all(cornersr == np.array([[1., 3], [7, 9]]))
- assert np.all(cornersc == np.array([[1., 3], [7, 9]]))
+ assert np.all(cornersr == np.array([[1.0, 3], [7, 9]]))
+ assert np.all(cornersc == np.array([[1.0, 3], [7, 9]]))
with pytest.raises(TypeError):
m.add_rm(cornersr, 0, 1, 25)
m.add_cm(cornersc, 0, 1, 25)
m.add_any(cornersr, 0, 1, 25)
m.add_any(cornersc, 0, 1, 44)
- assert np.all(zr == np.array([[1., 2, 28], [4, 5, 6], [7, 8, 9]]))
- assert np.all(zc == np.array([[1., 2, 47], [4, 5, 6], [7, 8, 9]]))
+ assert np.all(zr == np.array([[1.0, 2, 28], [4, 5, 6], [7, 8, 9]]))
+ assert np.all(zc == np.array([[1.0, 2, 47], [4, 5, 6], [7, 8, 9]]))
# You shouldn't be allowed to pass a non-writeable array to a mutating Eigen method:
zro = zr[0:4, 0:4]
assert not zrro.flags.owndata and not zrro.flags.writeable
zc[1, 2] = 99
- expect = np.array([[11., 12, 13], [21, 22, 99], [31, 32, 33]])
+ expect = np.array([[11.0, 12, 13], [21, 22, 99], [31, 32, 33]])
# We should have just changed zc, of course, but also zcro and the original eigen matrix
assert np.all(zc == expect)
assert np.all(zcro == expect)
assert np.all(z == z3)
assert np.all(z == z4)
assert np.all(z == z5)
- expect = np.array([[0., 22, 20], [31, 37, 33], [41, 42, 38]])
+ expect = np.array([[0.0, 22, 20], [31, 37, 33], [41, 42, 38]])
assert np.all(z == expect)
- y = np.array(range(100), dtype='float64').reshape(10, 10)
+ y = np.array(range(100), dtype="float64").reshape(10, 10)
y2 = m.incr_matrix_any(y, 10) # np -> eigen -> np
- y3 = m.incr_matrix_any(y2[0::2, 0::2], -33) # np -> eigen -> np slice -> np -> eigen -> np
+ y3 = m.incr_matrix_any(
+ y2[0::2, 0::2], -33
+ ) # np -> eigen -> np slice -> np -> eigen -> np
y4 = m.even_rows(y3) # numpy -> eigen slice -> (... y3)
y5 = m.even_cols(y4) # numpy -> eigen slice -> (... y4)
y6 = m.incr_matrix_any(y5, 1000) # numpy -> eigen -> (... y5)
# Apply same mutations using just numpy:
- yexpect = np.array(range(100), dtype='float64').reshape(10, 10)
+ yexpect = np.array(range(100), dtype="float64").reshape(10, 10)
yexpect += 10
yexpect[0::2, 0::2] -= 33
yexpect[0::4, 0::4] += 1000
def test_nocopy_wrapper():
# get_elem requires a column-contiguous matrix reference, but should be
# callable with other types of matrix (via copying):
- int_matrix_colmajor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], order='F')
- dbl_matrix_colmajor = np.array(int_matrix_colmajor, dtype='double', order='F', copy=True)
- int_matrix_rowmajor = np.array(int_matrix_colmajor, order='C', copy=True)
- dbl_matrix_rowmajor = np.array(int_matrix_rowmajor, dtype='double', order='C', copy=True)
+ int_matrix_colmajor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], order="F")
+ dbl_matrix_colmajor = np.array(
+ int_matrix_colmajor, dtype="double", order="F", copy=True
+ )
+ int_matrix_rowmajor = np.array(int_matrix_colmajor, order="C", copy=True)
+ dbl_matrix_rowmajor = np.array(
+ int_matrix_rowmajor, dtype="double", order="C", copy=True
+ )
# All should be callable via get_elem:
assert m.get_elem(int_matrix_colmajor) == 8
# All but the second should fail with m.get_elem_nocopy:
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(int_matrix_colmajor)
- assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
- ', flags.f_contiguous' in str(excinfo.value))
+ assert "get_elem_nocopy(): incompatible function arguments." in str(
+ excinfo.value
+ ) and ", flags.f_contiguous" in str(excinfo.value)
assert m.get_elem_nocopy(dbl_matrix_colmajor) == 8
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(int_matrix_rowmajor)
- assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
- ', flags.f_contiguous' in str(excinfo.value))
+ assert "get_elem_nocopy(): incompatible function arguments." in str(
+ excinfo.value
+ ) and ", flags.f_contiguous" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.get_elem_nocopy(dbl_matrix_rowmajor)
- assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and
- ', flags.f_contiguous' in str(excinfo.value))
+ assert "get_elem_nocopy(): incompatible function arguments." in str(
+ excinfo.value
+ ) and ", flags.f_contiguous" in str(excinfo.value)
# For the row-major test, we take a long matrix in row-major, so only the third is allowed:
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(int_matrix_colmajor)
- assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
- ', flags.c_contiguous' in str(excinfo.value))
+ assert "get_elem_rm_nocopy(): incompatible function arguments." in str(
+ excinfo.value
+ ) and ", flags.c_contiguous" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(dbl_matrix_colmajor)
- assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
- ', flags.c_contiguous' in str(excinfo.value))
+ assert "get_elem_rm_nocopy(): incompatible function arguments." in str(
+ excinfo.value
+ ) and ", flags.c_contiguous" in str(excinfo.value)
assert m.get_elem_rm_nocopy(int_matrix_rowmajor) == 8
with pytest.raises(TypeError) as excinfo:
m.get_elem_rm_nocopy(dbl_matrix_rowmajor)
- assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and
- ', flags.c_contiguous' in str(excinfo.value))
+ assert "get_elem_rm_nocopy(): incompatible function arguments." in str(
+ excinfo.value
+ ) and ", flags.c_contiguous" in str(excinfo.value)
def test_eigen_ref_life_support():
def test_special_matrix_objects():
- assert np.all(m.incr_diag(7) == np.diag([1., 2, 3, 4, 5, 6, 7]))
+ assert np.all(m.incr_diag(7) == np.diag([1.0, 2, 3, 4, 5, 6, 7]))
- asymm = np.array([[ 1., 2, 3, 4],
- [ 5, 6, 7, 8],
- [ 9, 10, 11, 12],
- [13, 14, 15, 16]])
+ asymm = np.array([[1.0, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
symm_lower = np.array(asymm)
symm_upper = np.array(asymm)
for i in range(4):
def test_dense_signature(doc):
- assert doc(m.double_col) == """
- double_col(arg0: numpy.ndarray[float32[m, 1]]) -> numpy.ndarray[float32[m, 1]]
+ assert (
+ doc(m.double_col)
+ == """
+ double_col(arg0: numpy.ndarray[numpy.float32[m, 1]]) -> numpy.ndarray[numpy.float32[m, 1]]
"""
- assert doc(m.double_row) == """
- double_row(arg0: numpy.ndarray[float32[1, n]]) -> numpy.ndarray[float32[1, n]]
+ )
+ assert (
+ doc(m.double_row)
+ == """
+ double_row(arg0: numpy.ndarray[numpy.float32[1, n]]) -> numpy.ndarray[numpy.float32[1, n]]
"""
- assert doc(m.double_complex) == """
- double_complex(arg0: numpy.ndarray[complex64[m, 1]]) -> numpy.ndarray[complex64[m, 1]]
+ )
+ assert doc(m.double_complex) == (
+ """
+ double_complex(arg0: numpy.ndarray[numpy.complex64[m, 1]])"""
+ """ -> numpy.ndarray[numpy.complex64[m, 1]]
"""
- assert doc(m.double_mat_rm) == """
- double_mat_rm(arg0: numpy.ndarray[float32[m, n]]) -> numpy.ndarray[float32[m, n]]
+ )
+ assert doc(m.double_mat_rm) == (
+ """
+ double_mat_rm(arg0: numpy.ndarray[numpy.float32[m, n]])"""
+ """ -> numpy.ndarray[numpy.float32[m, n]]
"""
+ )
def test_named_arguments():
a = np.array([[1.0, 2], [3, 4], [5, 6]])
b = np.ones((2, 1))
- assert np.all(m.matrix_multiply(a, b) == np.array([[3.], [7], [11]]))
- assert np.all(m.matrix_multiply(A=a, B=b) == np.array([[3.], [7], [11]]))
- assert np.all(m.matrix_multiply(B=b, A=a) == np.array([[3.], [7], [11]]))
+ assert np.all(m.matrix_multiply(a, b) == np.array([[3.0], [7], [11]]))
+ assert np.all(m.matrix_multiply(A=a, B=b) == np.array([[3.0], [7], [11]]))
+ assert np.all(m.matrix_multiply(B=b, A=a) == np.array([[3.0], [7], [11]]))
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(b, a)
- assert str(excinfo.value) == 'Nonconformable matrices!'
+ assert str(excinfo.value) == "Nonconformable matrices!"
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(A=b, B=a)
- assert str(excinfo.value) == 'Nonconformable matrices!'
+ assert str(excinfo.value) == "Nonconformable matrices!"
with pytest.raises(ValueError) as excinfo:
m.matrix_multiply(B=a, A=b)
- assert str(excinfo.value) == 'Nonconformable matrices!'
+ assert str(excinfo.value) == "Nonconformable matrices!"
-@pytest.requires_eigen_and_scipy
def test_sparse():
+ pytest.importorskip("scipy")
assert_sparse_equal_ref(m.sparse_r())
assert_sparse_equal_ref(m.sparse_c())
assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_r()))
assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_r()))
-@pytest.requires_eigen_and_scipy
def test_sparse_signature(doc):
- assert doc(m.sparse_copy_r) == """
- sparse_copy_r(arg0: scipy.sparse.csr_matrix[float32]) -> scipy.sparse.csr_matrix[float32]
+ pytest.importorskip("scipy")
+ assert (
+ doc(m.sparse_copy_r)
+ == """
+ sparse_copy_r(arg0: scipy.sparse.csr_matrix[numpy.float32]) -> scipy.sparse.csr_matrix[numpy.float32]
""" # noqa: E501 line too long
- assert doc(m.sparse_copy_c) == """
- sparse_copy_c(arg0: scipy.sparse.csc_matrix[float32]) -> scipy.sparse.csc_matrix[float32]
+ )
+ assert (
+ doc(m.sparse_copy_c)
+ == """
+ sparse_copy_c(arg0: scipy.sparse.csc_matrix[numpy.float32]) -> scipy.sparse.csc_matrix[numpy.float32]
""" # noqa: E501 line too long
+ )
def test_issue738():
"""Ignore strides on a length-1 dimension (even if they would be incompatible length > 1)"""
- assert np.all(m.iss738_f1(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))
- assert np.all(m.iss738_f1(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))
-
- assert np.all(m.iss738_f2(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]]))
- assert np.all(m.iss738_f2(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]]))
+ assert np.all(m.iss738_f1(np.array([[1.0, 2, 3]])) == np.array([[1.0, 102, 203]]))
+ assert np.all(
+ m.iss738_f1(np.array([[1.0], [2], [3]])) == np.array([[1.0], [12], [23]])
+ )
+
+ assert np.all(m.iss738_f2(np.array([[1.0, 2, 3]])) == np.array([[1.0, 102, 203]]))
+ assert np.all(
+ m.iss738_f2(np.array([[1.0], [2], [3]])) == np.array([[1.0], [12], [23]])
+ )
def test_issue1105():
-if(${PYTHON_MODULE_EXTENSION} MATCHES "pypy")
- add_custom_target(cpptest) # Dummy target on PyPy. Embedding is not supported.
+possibly_uninitialized(PYTHON_MODULE_EXTENSION Python_INTERPRETER_ID)
+
+if("${PYTHON_MODULE_EXTENSION}" MATCHES "pypy" OR "${Python_INTERPRETER_ID}" STREQUAL "PyPy")
+ message(STATUS "Skipping embed test on PyPy")
+ add_custom_target(cpptest) # Dummy target on PyPy. Embedding is not supported.
set(_suppress_unused_variable_warning "${DOWNLOAD_CATCH}")
return()
endif()
-find_package(Catch 1.9.3)
+find_package(Catch 2.13.2)
+
if(CATCH_FOUND)
message(STATUS "Building interpreter tests using Catch v${CATCH_VERSION}")
else()
message(STATUS "Catch not detected. Interpreter tests will be skipped. Install Catch headers"
- " manually or use `cmake -DDOWNLOAD_CATCH=1` to fetch them automatically.")
+ " manually or use `cmake -DDOWNLOAD_CATCH=ON` to fetch them automatically.")
return()
endif()
-add_executable(test_embed
- catch.cpp
- test_interpreter.cpp
-)
-target_include_directories(test_embed PRIVATE ${CATCH_INCLUDE_DIR})
+find_package(Threads REQUIRED)
+
+add_executable(test_embed catch.cpp test_interpreter.cpp)
pybind11_enable_warnings(test_embed)
-if(NOT CMAKE_VERSION VERSION_LESS 3.0)
- target_link_libraries(test_embed PRIVATE pybind11::embed)
-else()
- target_include_directories(test_embed PRIVATE ${PYBIND11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS})
- target_compile_options(test_embed PRIVATE ${PYBIND11_CPP_STANDARD})
- target_link_libraries(test_embed PRIVATE ${PYTHON_LIBRARIES})
-endif()
+target_link_libraries(test_embed PRIVATE pybind11::embed Catch2::Catch2 Threads::Threads)
-find_package(Threads REQUIRED)
-target_link_libraries(test_embed PUBLIC ${CMAKE_THREAD_LIBS_INIT})
+if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
+ file(COPY test_interpreter.py DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
+endif()
-add_custom_target(cpptest COMMAND $<TARGET_FILE:test_embed>
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+add_custom_target(
+ cpptest
+ COMMAND "$<TARGET_FILE:test_embed>"
+ WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
pybind11_add_module(external_module THIN_LTO external_module.cpp)
-set_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+set_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY
+ "${CMAKE_CURRENT_BINARY_DIR}")
+foreach(config ${CMAKE_CONFIGURATION_TYPES})
+ string(TOUPPER ${config} config)
+ set_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY_${config}
+ "${CMAKE_CURRENT_BINARY_DIR}")
+endforeach()
add_dependencies(cpptest external_module)
add_dependencies(check cpptest)
class PyWidget final : public Widget {
using Widget::Widget;
- int the_answer() const override { PYBIND11_OVERLOAD_PURE(int, Widget, the_answer); }
+ int the_answer() const override { PYBIND11_OVERRIDE_PURE(int, Widget, the_answer); }
};
PYBIND11_EMBEDDED_MODULE(widget_module, m) {
}
TEST_CASE("Pass classes and data between modules defined in C++ and Python") {
- auto module = py::module::import("test_interpreter");
- REQUIRE(py::hasattr(module, "DerivedWidget"));
+ auto module_ = py::module_::import("test_interpreter");
+ REQUIRE(py::hasattr(module_, "DerivedWidget"));
- auto locals = py::dict("hello"_a="Hello, World!", "x"_a=5, **module.attr("__dict__"));
+ auto locals = py::dict("hello"_a="Hello, World!", "x"_a=5, **module_.attr("__dict__"));
py::exec(R"(
widget = DerivedWidget("{} - {}".format(hello, x))
message = widget.the_message
)", py::globals(), locals);
REQUIRE(locals["message"].cast<std::string>() == "Hello, World! - 5");
- auto py_widget = module.attr("DerivedWidget")("The question");
+ auto py_widget = module_.attr("DerivedWidget")("The question");
auto message = py_widget.attr("the_message");
REQUIRE(message.cast<std::string>() == "The question");
}
TEST_CASE("Import error handling") {
- REQUIRE_NOTHROW(py::module::import("widget_module"));
- REQUIRE_THROWS_WITH(py::module::import("throw_exception"),
+ REQUIRE_NOTHROW(py::module_::import("widget_module"));
+ REQUIRE_THROWS_WITH(py::module_::import("throw_exception"),
"ImportError: C++ Error");
- REQUIRE_THROWS_WITH(py::module::import("throw_error_already_set"),
+ REQUIRE_THROWS_WITH(py::module_::import("throw_error_already_set"),
Catch::Contains("ImportError: KeyError"));
}
TEST_CASE("Restart the interpreter") {
// Verify pre-restart state.
- REQUIRE(py::module::import("widget_module").attr("add")(1, 2).cast<int>() == 3);
+ REQUIRE(py::module_::import("widget_module").attr("add")(1, 2).cast<int>() == 3);
REQUIRE(has_pybind11_internals_builtin());
REQUIRE(has_pybind11_internals_static());
- REQUIRE(py::module::import("external_module").attr("A")(123).attr("value").cast<int>() == 123);
+ REQUIRE(py::module_::import("external_module").attr("A")(123).attr("value").cast<int>() == 123);
// local and foreign module internals should point to the same internals:
REQUIRE(reinterpret_cast<uintptr_t>(*py::detail::get_internals_pp()) ==
- py::module::import("external_module").attr("internals_at")().cast<uintptr_t>());
+ py::module_::import("external_module").attr("internals_at")().cast<uintptr_t>());
// Restart the interpreter.
py::finalize_interpreter();
REQUIRE(has_pybind11_internals_builtin());
REQUIRE(has_pybind11_internals_static());
REQUIRE(reinterpret_cast<uintptr_t>(*py::detail::get_internals_pp()) ==
- py::module::import("external_module").attr("internals_at")().cast<uintptr_t>());
+ py::module_::import("external_module").attr("internals_at")().cast<uintptr_t>());
// Make sure that an interpreter with no get_internals() created until finalize still gets the
// internals destroyed
py::finalize_interpreter();
py::initialize_interpreter();
bool ran = false;
- py::module::import("__main__").attr("internals_destroy_test") =
+ py::module_::import("__main__").attr("internals_destroy_test") =
py::capsule(&ran, [](void *ran) { py::detail::get_internals(); *static_cast<bool *>(ran) = true; });
REQUIRE_FALSE(has_pybind11_internals_builtin());
REQUIRE_FALSE(has_pybind11_internals_static());
REQUIRE_FALSE(has_pybind11_internals_static());
// C++ modules can be reloaded.
- auto cpp_module = py::module::import("widget_module");
+ auto cpp_module = py::module_::import("widget_module");
REQUIRE(cpp_module.attr("add")(1, 2).cast<int>() == 3);
// C++ type information is reloaded and can be used in python modules.
- auto py_module = py::module::import("test_interpreter");
+ auto py_module = py::module_::import("test_interpreter");
auto py_widget = py_module.attr("DerivedWidget")("Hello after restart");
REQUIRE(py_widget.attr("the_message").cast<std::string>() == "Hello after restart");
}
TEST_CASE("Subinterpreter") {
// Add tags to the modules in the main interpreter and test the basics.
- py::module::import("__main__").attr("main_tag") = "main interpreter";
+ py::module_::import("__main__").attr("main_tag") = "main interpreter";
{
- auto m = py::module::import("widget_module");
+ auto m = py::module_::import("widget_module");
m.attr("extension_module_tag") = "added to module in main interpreter";
REQUIRE(m.attr("add")(1, 2).cast<int>() == 3);
REQUIRE(has_pybind11_internals_static());
// Modules tags should be gone.
- REQUIRE_FALSE(py::hasattr(py::module::import("__main__"), "tag"));
+ REQUIRE_FALSE(py::hasattr(py::module_::import("__main__"), "tag"));
{
- auto m = py::module::import("widget_module");
+ auto m = py::module_::import("widget_module");
REQUIRE_FALSE(py::hasattr(m, "extension_module_tag"));
// Function bindings should still work.
Py_EndInterpreter(sub_tstate);
PyThreadState_Swap(main_tstate);
- REQUIRE(py::hasattr(py::module::import("__main__"), "main_tag"));
- REQUIRE(py::hasattr(py::module::import("widget_module"), "extension_module_tag"));
+ REQUIRE(py::hasattr(py::module_::import("__main__"), "main_tag"));
+ REQUIRE(py::hasattr(py::module_::import("widget_module"), "extension_module_tag"));
}
TEST_CASE("Execution frame") {
// Disable generation of cached bytecode (.pyc files) for this test, otherwise
// Python might pick up an old version from the cache instead of the new versions
// of the .py files generated below
- auto sys = py::module::import("sys");
+ auto sys = py::module_::import("sys");
bool dont_write_bytecode = sys.attr("dont_write_bytecode").cast<bool>();
sys.attr("dont_write_bytecode") = true;
// Reset the value at scope exit
});
// Import the module from file
- auto module = py::module::import(module_name.c_str());
- int result = module.attr("test")().cast<int>();
+ auto module_ = py::module_::import(module_name.c_str());
+ int result = module_.attr("test")().cast<int>();
REQUIRE(result == 1);
// Update the module .py file with a small change
test_module.close();
// Reload the module
- module.reload();
- result = module.attr("test")().cast<int>();
+ module_.reload();
+ result = module_.attr("test")().cast<int>();
REQUIRE(result == 2);
}
+# -*- coding: utf-8 -*-
from widget_module import Widget
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import enums as m
assert str(m.UnscopedEnum.EOne) == "UnscopedEnum.EOne"
assert str(m.UnscopedEnum.ETwo) == "UnscopedEnum.ETwo"
assert str(m.EOne) == "UnscopedEnum.EOne"
+ assert repr(m.UnscopedEnum.EOne) == "<UnscopedEnum.EOne: 1>"
+ assert repr(m.UnscopedEnum.ETwo) == "<UnscopedEnum.ETwo: 2>"
+ assert repr(m.EOne) == "<UnscopedEnum.EOne: 1>"
# name property
assert m.UnscopedEnum.EOne.name == "EOne"
+ assert m.UnscopedEnum.EOne.value == 1
assert m.UnscopedEnum.ETwo.name == "ETwo"
- assert m.EOne.name == "EOne"
- # name readonly
+ assert m.UnscopedEnum.ETwo.value == 2
+ assert m.EOne is m.UnscopedEnum.EOne
+ # name, value readonly
with pytest.raises(AttributeError):
m.UnscopedEnum.EOne.name = ""
- # name returns a copy
- foo = m.UnscopedEnum.EOne.name
- foo = "bar"
+ with pytest.raises(AttributeError):
+ m.UnscopedEnum.EOne.value = 10
+ # name, value returns a copy
+ # TODO: Neither the name nor value tests actually check against aliasing.
+ # Use a mutable type that has reference semantics.
+ nonaliased_name = m.UnscopedEnum.EOne.name
+ nonaliased_name = "bar" # noqa: F841
assert m.UnscopedEnum.EOne.name == "EOne"
+ nonaliased_value = m.UnscopedEnum.EOne.value
+ nonaliased_value = 10 # noqa: F841
+ assert m.UnscopedEnum.EOne.value == 1
# __members__ property
- assert m.UnscopedEnum.__members__ == \
- {"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree}
+ assert m.UnscopedEnum.__members__ == {
+ "EOne": m.UnscopedEnum.EOne,
+ "ETwo": m.UnscopedEnum.ETwo,
+ "EThree": m.UnscopedEnum.EThree,
+ }
# __members__ readonly
with pytest.raises(AttributeError):
m.UnscopedEnum.__members__ = {}
# __members__ returns a copy
- foo = m.UnscopedEnum.__members__
- foo["bar"] = "baz"
- assert m.UnscopedEnum.__members__ == \
- {"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree}
+ nonaliased_members = m.UnscopedEnum.__members__
+ nonaliased_members["bar"] = "baz"
+ assert m.UnscopedEnum.__members__ == {
+ "EOne": m.UnscopedEnum.EOne,
+ "ETwo": m.UnscopedEnum.ETwo,
+ "EThree": m.UnscopedEnum.EThree,
+ }
- for docstring_line in '''An unscoped enumeration
+ for docstring_line in """An unscoped enumeration
Members:
ETwo : Docstring for ETwo
- EThree : Docstring for EThree'''.split('\n'):
+ EThree : Docstring for EThree""".split(
+ "\n"
+ ):
assert docstring_line in m.UnscopedEnum.__doc__
# Unscoped enums will accept ==/!= int comparisons
assert y != 3
assert 3 != y
# Compare with None
- assert (y != None) # noqa: E711
+ assert y != None # noqa: E711
assert not (y == None) # noqa: E711
# Compare with an object
- assert (y != object())
+ assert y != object()
assert not (y == object())
# Compare with string
assert y != "2"
assert not (y == "2")
with pytest.raises(TypeError):
- y < object()
+ y < object() # noqa: B015
with pytest.raises(TypeError):
- y <= object()
+ y <= object() # noqa: B015
with pytest.raises(TypeError):
- y > object()
+ y > object() # noqa: B015
with pytest.raises(TypeError):
- y >= object()
+ y >= object() # noqa: B015
with pytest.raises(TypeError):
- y | object()
+ y | object() # noqa: B015
with pytest.raises(TypeError):
- y & object()
+ y & object() # noqa: B015
with pytest.raises(TypeError):
- y ^ object()
+ y ^ object() # noqa: B015
assert int(m.UnscopedEnum.ETwo) == 2
assert str(m.UnscopedEnum(2)) == "UnscopedEnum.ETwo"
assert z != 3
assert 3 != z
# Compare with None
- assert (z != None) # noqa: E711
+ assert z != None # noqa: E711
assert not (z == None) # noqa: E711
# Compare with an object
- assert (z != object())
+ assert z != object()
assert not (z == object())
# Scoped enums will *NOT* accept >, <, >= and <= int comparisons (Will throw exceptions)
with pytest.raises(TypeError):
- z > 3
+ z > 3 # noqa: B015
with pytest.raises(TypeError):
- z < 3
+ z < 3 # noqa: B015
with pytest.raises(TypeError):
- z >= 3
+ z >= 3 # noqa: B015
with pytest.raises(TypeError):
- z <= 3
+ z <= 3 # noqa: B015
# order
assert m.ScopedEnum.Two < m.ScopedEnum.Three
def test_implicit_conversion():
assert str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "EMode.EFirstMode"
assert str(m.ClassWithUnscopedEnum.EFirstMode) == "EMode.EFirstMode"
+ assert repr(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "<EMode.EFirstMode: 1>"
+ assert repr(m.ClassWithUnscopedEnum.EFirstMode) == "<EMode.EFirstMode: 1>"
f = m.ClassWithUnscopedEnum.test_function
first = m.ClassWithUnscopedEnum.EFirstMode
x[f(first)] = 3
x[f(second)] = 4
# Hashing test
- assert str(x) == "{EMode.EFirstMode: 3, EMode.ESecondMode: 4}"
+ assert repr(x) == "{<EMode.EFirstMode: 1>: 3, <EMode.ESecondMode: 2>: 4}"
def test_binary_operators():
with pytest.raises(ValueError) as excinfo:
m.register_bad_enum()
assert str(excinfo.value) == 'SimpleEnum: element "ONE" already exists!'
+
+
+def test_docstring_signatures():
+ for enum_type in [m.ScopedEnum, m.UnscopedEnum]:
+ for attr in enum_type.__dict__.values():
+ # Issue #2623/PR #2637: Add argument names to enum_ methods
+ assert "arg0" not in (attr.__doc__ or "")
TEST_SUBMODULE(eval_, m) {
// test_evals
- auto global = py::dict(py::module::import("__main__").attr("__dict__"));
+ auto global = py::dict(py::module_::import("__main__").attr("__dict__"));
m.def("test_eval_statements", [global]() {
auto local = py::dict();
}
return false;
});
+
+ // test_eval_empty_globals
+ m.def("eval_empty_globals", [](py::object global) {
+ if (global.is_none())
+ global = py::dict();
+ auto int_class = py::eval("isinstance(42, int)", global);
+ return global;
+ });
}
+# -*- coding: utf-8 -*-
import os
+
+import pytest
+
+import env # noqa: F401
+
from pybind11_tests import eval_ as m
assert m.test_eval()
assert m.test_eval_single_statement()
+ assert m.test_eval_failure()
+
+
+@pytest.mark.xfail("env.PYPY and not env.PY2", raises=RuntimeError)
+def test_eval_file():
filename = os.path.join(os.path.dirname(__file__), "test_eval_call.py")
assert m.test_eval_file(filename)
- assert m.test_eval_failure()
assert m.test_eval_file_failure()
+
+
+def test_eval_empty_globals():
+ assert "__builtins__" in m.eval_empty_globals(None)
+
+ g = {}
+ assert "__builtins__" in m.eval_empty_globals(g)
+ assert "__builtins__" in g
+# -*- coding: utf-8 -*-
# This file is called from 'test_eval.py'
-if 'call_test2' in locals():
+if "call_test2" in locals():
call_test2(y) # noqa: F821 undefined name
class MyException : public std::exception {
public:
explicit MyException(const char * m) : message{m} {}
- virtual const char * what() const noexcept override {return message.c_str();}
+ const char * what() const noexcept override {return message.c_str();}
private:
std::string message = "";
};
class MyException2 : public std::exception {
public:
explicit MyException2(const char * m) : message{m} {}
- virtual const char * what() const noexcept override {return message.c_str();}
+ const char * what() const noexcept override {return message.c_str();}
private:
std::string message = "";
};
public:
explicit MyException3(const char * m) : message{m} {}
virtual const char * what() const noexcept {return message.c_str();}
+ // Rule of 5 BEGIN: to preempt compiler warnings.
+ MyException3(const MyException3&) = default;
+ MyException3(MyException3&&) = default;
+ MyException3& operator=(const MyException3&) = default;
+ MyException3& operator=(MyException3&&) = default;
+ virtual ~MyException3() = default;
+ // Rule of 5 END.
private:
std::string message = "";
};
class MyException4 : public std::exception {
public:
explicit MyException4(const char * m) : message{m} {}
- virtual const char * what() const noexcept override {return message.c_str();}
+ const char * what() const noexcept override {return message.c_str();}
private:
std::string message = "";
};
py::dict d;
};
+
+
+struct PythonAlreadySetInDestructor {
+ PythonAlreadySetInDestructor(const py::str &s) : s(s) {}
+ ~PythonAlreadySetInDestructor() {
+ py::dict foo;
+ try {
+ // Assign to a py::object to force read access of nonexistent dict entry
+ py::object o = foo["bar"];
+ }
+ catch (py::error_already_set& ex) {
+ ex.discard_as_unraisable(s);
+ }
+ }
+
+ py::str s;
+};
+
+
TEST_SUBMODULE(exceptions, m) {
m.def("throw_std_exception", []() {
throw std::runtime_error("This exception was intentionally thrown.");
m.def("throws5", []() { throw MyException5("this is a helper-defined translated exception"); });
m.def("throws5_1", []() { throw MyException5_1("MyException5 subclass"); });
m.def("throws_logic_error", []() { throw std::logic_error("this error should fall through to the standard handler"); });
+ m.def("throws_overflow_error", []() {throw std::overflow_error(""); });
m.def("exception_matches", []() {
py::dict foo;
try {
m.def("modulenotfound_exception_matches_base", []() {
try {
// On Python >= 3.6, this raises a ModuleNotFoundError, a subclass of ImportError
- py::module::import("nonexistent");
+ py::module_::import("nonexistent");
}
catch (py::error_already_set &ex) {
if (!ex.matches(PyExc_ImportError)) throw;
return false;
});
+ m.def("python_alreadyset_in_destructor", [](py::str s) {
+ PythonAlreadySetInDestructor alreadyset_in_destructor(s);
+ return true;
+ });
+
// test_nested_throws
m.def("try_catch", [m](py::object exc_type, py::function f, py::args args) {
try { f(*args); }
}
});
+ // Test repr that cannot be displayed
+ m.def("simple_bool_passthrough", [](bool x) {return x;});
+
}
+# -*- coding: utf-8 -*-
+import sys
+
import pytest
from pybind11_tests import exceptions as m
assert d["good"] is True
+def ignore_pytest_unraisable_warning(f):
+ unraisable = "PytestUnraisableExceptionWarning"
+ if hasattr(pytest, unraisable): # Python >= 3.8 and pytest >= 6
+ dec = pytest.mark.filterwarnings("ignore::pytest.{}".format(unraisable))
+ return dec(f)
+ else:
+ return f
+
+
+@ignore_pytest_unraisable_warning
+def test_python_alreadyset_in_destructor(monkeypatch, capsys):
+ hooked = False
+ triggered = [False] # mutable, so Python 2.7 closure can modify it
+
+ if hasattr(sys, "unraisablehook"): # Python 3.8+
+ hooked = True
+ # Don't take `sys.unraisablehook`, as that's overwritten by pytest
+ default_hook = sys.__unraisablehook__
+
+ def hook(unraisable_hook_args):
+ exc_type, exc_value, exc_tb, err_msg, obj = unraisable_hook_args
+ if obj == "already_set demo":
+ triggered[0] = True
+ default_hook(unraisable_hook_args)
+ return
+
+ # Use monkeypatch so pytest can apply and remove the patch as appropriate
+ monkeypatch.setattr(sys, "unraisablehook", hook)
+
+ assert m.python_alreadyset_in_destructor("already_set demo") is True
+ if hooked:
+ assert triggered[0] is True
+
+ _, captured_stderr = capsys.readouterr()
+ # Error message is different in Python 2 and 3, check for words that appear in both
+ assert "ignored" in captured_stderr and "already_set demo" in captured_stderr
+
+
def test_exception_matches():
assert m.exception_matches()
assert m.exception_matches_base()
# Can we fall-through to the default handler?
with pytest.raises(RuntimeError) as excinfo:
m.throws_logic_error()
- assert msg(excinfo.value) == "this error should fall through to the standard handler"
+ assert (
+ msg(excinfo.value) == "this error should fall through to the standard handler"
+ )
+
+ # OverFlow error translation.
+ with pytest.raises(OverflowError) as excinfo:
+ m.throws_overflow_error()
# Can we handle a helper-declared exception?
with pytest.raises(m.MyException5) as excinfo:
# C++ -> Python -> C++ -> Python
with capture:
m.try_catch(
- m.MyException5, pycatch, m.MyException, m.try_catch, m.MyException, throw_myex5)
+ m.MyException5,
+ pycatch,
+ m.MyException,
+ m.try_catch,
+ m.MyException,
+ throw_myex5,
+ )
assert str(capture).startswith("MyException5: nested error 5")
# C++ -> Python -> C++
with pytest.raises(m.MyException5) as excinfo:
m.try_catch(m.MyException, pycatch, m.MyException, m.throws5)
assert str(excinfo.value) == "this is a helper-defined translated exception"
+
+
+# This can often happen if you wrap a pybind11 class in a Python wrapper
+def test_invalid_repr():
+ class MyRepr(object):
+ def __repr__(self):
+ raise AttributeError("Example error")
+
+ with pytest.raises(TypeError):
+ m.simple_bool_passthrough(MyRepr())
#include "pybind11_tests.h"
#include "constructor_stats.h"
#include <cmath>
+#include <new>
// Classes for testing python construction via C++ factory function:
// Not publicly constructible, copyable, or movable:
public:
TestFactory4() : TestFactory3() { print_default_created(this); }
TestFactory4(int v) : TestFactory3(v) { print_created(this, v); }
- virtual ~TestFactory4() { print_destroyed(this); }
+ ~TestFactory4() override { print_destroyed(this); }
};
// Another class for an invalid downcast test
class TestFactory5 : public TestFactory3 {
public:
TestFactory5(int i) : TestFactory3(i) { print_created(this, i); }
- virtual ~TestFactory5() { print_destroyed(this); }
+ ~TestFactory5() override { print_destroyed(this); }
};
class TestFactory6 {
PyTF6(PyTF6 &&f) : TestFactory6(std::move(f)) { print_move_created(this); }
PyTF6(const PyTF6 &f) : TestFactory6(f) { print_copy_created(this); }
PyTF6(std::string s) : TestFactory6((int) s.size()) { alias = true; print_created(this, s); }
- virtual ~PyTF6() { print_destroyed(this); }
- int get() override { PYBIND11_OVERLOAD(int, TestFactory6, get, /*no args*/); }
+ ~PyTF6() override { print_destroyed(this); }
+ int get() override { PYBIND11_OVERRIDE(int, TestFactory6, get, /*no args*/); }
};
class TestFactory7 {
PyTF7(int i) : TestFactory7(i) { alias = true; print_created(this, i); }
PyTF7(PyTF7 &&f) : TestFactory7(std::move(f)) { print_move_created(this); }
PyTF7(const PyTF7 &f) : TestFactory7(f) { print_copy_created(this); }
- virtual ~PyTF7() { print_destroyed(this); }
- int get() override { PYBIND11_OVERLOAD(int, TestFactory7, get, /*no args*/); }
+ ~PyTF7() override { print_destroyed(this); }
+ int get() override { PYBIND11_OVERRIDE(int, TestFactory7, get, /*no args*/); }
};
TEST_SUBMODULE(factory_constructors, m) {
// Define various trivial types to allow simpler overload resolution:
- py::module m_tag = m.def_submodule("tag");
+ py::module_ m_tag = m.def_submodule("tag");
#define MAKE_TAG_TYPE(Name) \
struct Name##_tag {}; \
py::class_<Name##_tag>(m_tag, #Name "_tag").def(py::init<>()); \
MAKE_TAG_TYPE(TF4);
MAKE_TAG_TYPE(TF5);
MAKE_TAG_TYPE(null_ptr);
+ MAKE_TAG_TYPE(null_unique_ptr);
+ MAKE_TAG_TYPE(null_shared_ptr);
MAKE_TAG_TYPE(base);
MAKE_TAG_TYPE(invalid_base);
MAKE_TAG_TYPE(alias);
auto c4a = [c](pointer_tag, TF4_tag, int a) { (void) c; return new TestFactory4(a);};
// test_init_factory_basic, test_init_factory_casting
- py::class_<TestFactory3, std::shared_ptr<TestFactory3>>(m, "TestFactory3")
+ py::class_<TestFactory3, std::shared_ptr<TestFactory3>> pyTestFactory3(m, "TestFactory3");
+ pyTestFactory3
.def(py::init([](pointer_tag, int v) { return TestFactoryHelper::construct3(v); }))
- .def(py::init([](shared_ptr_tag) { return TestFactoryHelper::construct3(); }))
- .def("__init__", [](TestFactory3 &self, std::string v) { new (&self) TestFactory3(v); }) // placement-new ctor
-
+ .def(py::init([](shared_ptr_tag) { return TestFactoryHelper::construct3(); }));
+ ignoreOldStyleInitWarnings([&pyTestFactory3]() {
+ pyTestFactory3.def("__init__", [](TestFactory3 &self, std::string v) { new (&self) TestFactory3(v); }); // placement-new ctor
+ });
+ pyTestFactory3
// factories returning a derived type:
.def(py::init(c4a)) // derived ptr
.def(py::init([](pointer_tag, TF5_tag, int a) { return new TestFactory5(a); }))
// Returns nullptr:
.def(py::init([](null_ptr_tag) { return (TestFactory3 *) nullptr; }))
+ .def(py::init([](null_unique_ptr_tag) { return std::unique_ptr<TestFactory3>(); }))
+ .def(py::init([](null_shared_ptr_tag) { return std::shared_ptr<TestFactory3>(); }))
.def_readwrite("value", &TestFactory3::value)
;
static void operator delete(void *p) { py::print("noisy delete"); ::operator delete(p); }
#endif
};
- py::class_<NoisyAlloc>(m, "NoisyAlloc")
+
+
+ py::class_<NoisyAlloc> pyNoisyAlloc(m, "NoisyAlloc");
// Since these overloads have the same number of arguments, the dispatcher will try each of
// them until the arguments convert. Thus we can get a pre-allocation here when passing a
// single non-integer:
- .def("__init__", [](NoisyAlloc *a, int i) { new (a) NoisyAlloc(i); }) // Regular constructor, runs first, requires preallocation
- .def(py::init([](double d) { return new NoisyAlloc(d); }))
-
- // The two-argument version: first the factory pointer overload.
- .def(py::init([](int i, int) { return new NoisyAlloc(i); }))
- // Return-by-value:
- .def(py::init([](double d, int) { return NoisyAlloc(d); }))
- // Old-style placement new init; requires preallocation
- .def("__init__", [](NoisyAlloc &a, double d, double) { new (&a) NoisyAlloc(d); })
- // Requires deallocation of previous overload preallocated value:
- .def(py::init([](int i, double) { return new NoisyAlloc(i); }))
- // Regular again: requires yet another preallocation
- .def("__init__", [](NoisyAlloc &a, int i, std::string) { new (&a) NoisyAlloc(i); })
- ;
+ ignoreOldStyleInitWarnings([&pyNoisyAlloc]() {
+ pyNoisyAlloc.def("__init__", [](NoisyAlloc *a, int i) { new (a) NoisyAlloc(i); }); // Regular constructor, runs first, requires preallocation
+ });
+
+ pyNoisyAlloc.def(py::init([](double d) { return new NoisyAlloc(d); }));
+
+ // The two-argument version: first the factory pointer overload.
+ pyNoisyAlloc.def(py::init([](int i, int) { return new NoisyAlloc(i); }));
+ // Return-by-value:
+ pyNoisyAlloc.def(py::init([](double d, int) { return NoisyAlloc(d); }));
+ // Old-style placement new init; requires preallocation
+ ignoreOldStyleInitWarnings([&pyNoisyAlloc]() {
+ pyNoisyAlloc.def("__init__", [](NoisyAlloc &a, double d, double) { new (&a) NoisyAlloc(d); });
+ });
+ // Requires deallocation of previous overload preallocated value:
+ pyNoisyAlloc.def(py::init([](int i, double) { return new NoisyAlloc(i); }));
+ // Regular again: requires yet another preallocation
+ ignoreOldStyleInitWarnings([&pyNoisyAlloc]() {
+ pyNoisyAlloc.def("__init__", [](NoisyAlloc &a, int i, std::string) { new (&a) NoisyAlloc(i); });
+ });
+# -*- coding: utf-8 -*-
import pytest
import re
+import env # noqa: F401
+
from pybind11_tests import factory_constructors as m
from pybind11_tests.factory_constructors import tag
from pybind11_tests import ConstructorStats
def test_init_factory_basic():
"""Tests py::init_factory() wrapper around various ways of returning the object"""
- cstats = [ConstructorStats.get(c) for c in [m.TestFactory1, m.TestFactory2, m.TestFactory3]]
+ cstats = [
+ ConstructorStats.get(c)
+ for c in [m.TestFactory1, m.TestFactory2, m.TestFactory3]
+ ]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
z3 = m.TestFactory3("bye")
assert z3.value == "bye"
- with pytest.raises(TypeError) as excinfo:
- m.TestFactory3(tag.null_ptr)
- assert str(excinfo.value) == "pybind11::init(): factory function returned nullptr"
+ for null_ptr_kind in [tag.null_ptr, tag.null_unique_ptr, tag.null_shared_ptr]:
+ with pytest.raises(TypeError) as excinfo:
+ m.TestFactory3(null_ptr_kind)
+ assert (
+ str(excinfo.value) == "pybind11::init(): factory function returned nullptr"
+ )
assert [i.alive() for i in cstats] == [3, 3, 3]
assert ConstructorStats.detail_reg_inst() == n_inst + 9
assert [i.values() for i in cstats] == [
["3", "hi!"],
["7", "hi again"],
- ["42", "bye"]
+ ["42", "bye"],
]
assert [i.default_constructions for i in cstats] == [1, 1, 1]
def test_init_factory_signature(msg):
with pytest.raises(TypeError) as excinfo:
m.TestFactory1("invalid", "constructor", "arguments")
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int)
2. m.factory_constructors.TestFactory1(arg0: str)
Invoked with: 'invalid', 'constructor', 'arguments'
""" # noqa: E501 line too long
+ )
- assert msg(m.TestFactory1.__init__.__doc__) == """
+ assert (
+ msg(m.TestFactory1.__init__.__doc__)
+ == """
__init__(*args, **kwargs)
Overloaded function.
4. __init__(self: m.factory_constructors.TestFactory1, arg0: handle, arg1: int, arg2: handle) -> None
""" # noqa: E501 line too long
+ )
def test_init_factory_casting():
"""Tests py::init_factory() wrapper with various upcasting and downcasting returns"""
- cstats = [ConstructorStats.get(c) for c in [m.TestFactory3, m.TestFactory4, m.TestFactory5]]
+ cstats = [
+ ConstructorStats.get(c)
+ for c in [m.TestFactory3, m.TestFactory4, m.TestFactory5]
+ ]
cstats[0].alive() # force gc
n_inst = ConstructorStats.detail_reg_inst()
assert [i.values() for i in cstats] == [
["4", "5", "6", "7", "8"],
["4", "5", "8"],
- ["6", "7"]
+ ["6", "7"],
]
assert [i.values() for i in cstats] == [
["1", "8", "3", "4", "5", "6", "123", "10", "47"],
- ["hi there", "3", "4", "6", "move", "123", "why hello!", "move", "47"]
+ ["hi there", "3", "4", "6", "move", "123", "why hello!", "move", "47"],
]
assert not g1.has_alias()
with pytest.raises(TypeError) as excinfo:
PythFactory7(tag.shared_ptr, tag.invalid_base, 14)
- assert (str(excinfo.value) ==
- "pybind11::init(): construction failed: returned holder-wrapped instance is not an "
- "alias instance")
+ assert (
+ str(excinfo.value)
+ == "pybind11::init(): construction failed: returned holder-wrapped instance is not an "
+ "alias instance"
+ )
assert [i.alive() for i in cstats] == [13, 7]
assert ConstructorStats.detail_reg_inst() == n_inst + 13
assert [i.values() for i in cstats] == [
["1", "2", "3", "4", "5", "6", "7", "8", "9", "100", "11", "12", "13", "14"],
- ["2", "4", "6", "8", "9", "100", "12"]
+ ["2", "4", "6", "8", "9", "100", "12"],
]
with capture:
a = m.NoPlacementNew(123)
- found = re.search(r'^operator new called, returning (\d+)\n$', str(capture))
+ found = re.search(r"^operator new called, returning (\d+)\n$", str(capture))
assert found
assert a.i == 123
with capture:
with capture:
b = m.NoPlacementNew()
- found = re.search(r'^operator new called, returning (\d+)\n$', str(capture))
+ found = re.search(r"^operator new called, returning (\d+)\n$", str(capture))
assert found
assert b.i == 100
with capture:
def strip_comments(s):
- return re.sub(r'\s+#.*', '', s)
+ return re.sub(r"\s+#.*", "", s)
-def test_reallocations(capture, msg):
+def test_reallocation_a(capture, msg):
"""When the constructor is overloaded, previous overloads can require a preallocated value.
This test makes sure that such preallocated values only happen when they might be necessary,
- and that they are deallocated properly"""
+ and that they are deallocated properly."""
pytest.gc_collect()
with capture:
create_and_destroy(1)
- assert msg(capture) == """
+ assert (
+ msg(capture)
+ == """
noisy new
noisy placement new
NoisyAlloc(int 1)
~NoisyAlloc()
noisy delete
"""
+ )
+
+
+def test_reallocation_b(capture, msg):
with capture:
create_and_destroy(1.5)
- assert msg(capture) == strip_comments("""
+ assert msg(capture) == strip_comments(
+ """
noisy new # allocation required to attempt first overload
noisy delete # have to dealloc before considering factory init overload
noisy new # pointer factory calling "new", part 1: allocation
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
- """)
+ """
+ )
+
+def test_reallocation_c(capture, msg):
with capture:
create_and_destroy(2, 3)
- assert msg(capture) == strip_comments("""
+ assert msg(capture) == strip_comments(
+ """
noisy new # pointer factory calling "new", allocation
NoisyAlloc(int 2) # constructor
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
- """)
+ """
+ )
+
+def test_reallocation_d(capture, msg):
with capture:
create_and_destroy(2.5, 3)
- assert msg(capture) == strip_comments("""
+ assert msg(capture) == strip_comments(
+ """
NoisyAlloc(double 2.5) # construction (local func variable: operator_new not called)
noisy new # return-by-value "new" part 1: allocation
~NoisyAlloc() # moved-away local func variable destruction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
- """)
+ """
+ )
+
+def test_reallocation_e(capture, msg):
with capture:
create_and_destroy(3.5, 4.5)
- assert msg(capture) == strip_comments("""
+ assert msg(capture) == strip_comments(
+ """
noisy new # preallocation needed before invoking placement-new overload
noisy placement new # Placement new
NoisyAlloc(double 3.5) # construction
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
- """)
+ """
+ )
+
+def test_reallocation_f(capture, msg):
with capture:
create_and_destroy(4, 0.5)
- assert msg(capture) == strip_comments("""
+ assert msg(capture) == strip_comments(
+ """
noisy new # preallocation needed before invoking placement-new overload
noisy delete # deallocation of preallocated storage
noisy new # Factory pointer allocation
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
- """)
+ """
+ )
+
+def test_reallocation_g(capture, msg):
with capture:
create_and_destroy(5, "hi")
- assert msg(capture) == strip_comments("""
+ assert msg(capture) == strip_comments(
+ """
noisy new # preallocation needed before invoking first placement new
noisy delete # delete before considering new-style constructor
noisy new # preallocation for second placement new
---
~NoisyAlloc() # Destructor
noisy delete # operator delete
- """)
+ """
+ )
-@pytest.unsupported_on_py2
+@pytest.mark.skipif("env.PY2")
def test_invalid_self():
"""Tests invocation of the pybind-registered base class with an invalid `self` argument. You
can only actually do this on Python 3: Python 2 raises an exception itself if you try."""
+
class NotPybindDerived(object):
pass
a = m.TestFactory2(tag.pointer, 1)
m.TestFactory6.__init__(a, tag.alias, 1)
elif bad == 3:
- m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.base, 1)
+ m.TestFactory6.__init__(
+ NotPybindDerived.__new__(NotPybindDerived), tag.base, 1
+ )
elif bad == 4:
- m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.alias, 1)
+ m.TestFactory6.__init__(
+ NotPybindDerived.__new__(NotPybindDerived), tag.alias, 1
+ )
for arg in (1, 2):
with pytest.raises(TypeError) as excinfo:
BrokenTF1(arg)
- assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument"
+ assert (
+ str(excinfo.value)
+ == "__init__(self, ...) called with invalid `self` argument"
+ )
for arg in (1, 2, 3, 4):
with pytest.raises(TypeError) as excinfo:
BrokenTF6(arg)
- assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument"
+ assert (
+ str(excinfo.value)
+ == "__init__(self, ...) called with invalid `self` argument"
+ )
class VirtClass {
public:
- virtual ~VirtClass() {}
+ virtual ~VirtClass() = default;
+ VirtClass() = default;
+ VirtClass(const VirtClass&) = delete;
virtual void virtual_func() {}
virtual void pure_virtual_func() = 0;
};
class PyVirtClass : public VirtClass {
void virtual_func() override {
- PYBIND11_OVERLOAD(void, VirtClass, virtual_func,);
+ PYBIND11_OVERRIDE(void, VirtClass, virtual_func,);
}
void pure_virtual_func() override {
- PYBIND11_OVERLOAD_PURE(void, VirtClass, pure_virtual_func,);
+ PYBIND11_OVERRIDE_PURE(void, VirtClass, pure_virtual_func,);
}
};
[](VirtClass &virt) { virt.pure_virtual_func(); });
m.def("test_cross_module_gil",
[]() {
- auto cm = py::module::import("cross_module_gil_utils");
+ auto cm = py::module_::import("cross_module_gil_utils");
auto gil_acquire = reinterpret_cast<void (*)()>(
PyLong_AsVoidPtr(cm.attr("gil_acquire_funcaddr").ptr()));
py::gil_scoped_release gil_release;
+# -*- coding: utf-8 -*-
import multiprocessing
import threading
+
from pybind11_tests import gil_scoped as m
def _python_to_cpp_to_python():
"""Calls different C++ functions that come back to Python."""
+
class ExtendedVirtClass(m.VirtClass):
def virtual_func(self):
pass
thread.join()
+# TODO: FIXME, sometimes returns -11 (segfault) instead of 0 on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread():
"""Makes sure there is no GIL deadlock when running in a thread.
assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0
+# TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread_multiple_parallel():
"""Makes sure there is no GIL deadlock when running in a thread multiple times in parallel.
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0
+# TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread_multiple_sequential():
"""Makes sure there is no GIL deadlock when running in a thread multiple times sequentially.
It runs in a separate process to be able to stop and assert if it deadlocks.
"""
- assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0
+ assert (
+ _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0
+ )
+# TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_process():
"""Makes sure there is no GIL deadlock when using processes.
BSD-style license that can be found in the LICENSE file.
*/
+#if defined(_MSC_VER) && _MSC_VER < 1910 // VS 2015's MSVC
+# pragma warning(disable: 4702) // unreachable code in system header (xatomic.h(382))
+#endif
#include <pybind11/iostream.h>
#include "pybind11_tests.h"
+#include <atomic>
#include <iostream>
+#include <thread>
void noisy_function(std::string msg, bool flush) {
std::cerr << emsg;
}
+// object to manage C++ thread
+// simply repeatedly write to std::cerr until stopped
+// redirect is called at some point to test the safety of scoped_estream_redirect
+struct TestThread {
+ TestThread() : t_{nullptr}, stop_{false} {
+ auto thread_f = [this] {
+ while (!stop_) {
+ std::cout << "x" << std::flush;
+ std::this_thread::sleep_for(std::chrono::microseconds(50));
+ } };
+ t_ = new std::thread(std::move(thread_f));
+ }
+
+ ~TestThread() {
+ delete t_;
+ }
+
+ void stop() { stop_ = true; }
+
+ void join() {
+ py::gil_scoped_release gil_lock;
+ t_->join();
+ }
+
+ void sleep() {
+ py::gil_scoped_release gil_lock;
+ std::this_thread::sleep_for(std::chrono::milliseconds(50));
+ }
+
+ std::thread * t_;
+ std::atomic<bool> stop_;
+};
+
+
TEST_SUBMODULE(iostream, m) {
add_ostream_redirect(m);
});
m.def("captured_output", [](std::string msg) {
- py::scoped_ostream_redirect redir(std::cout, py::module::import("sys").attr("stdout"));
+ py::scoped_ostream_redirect redir(std::cout, py::module_::import("sys").attr("stdout"));
std::cout << msg << std::flush;
});
py::arg("msg"), py::arg("flush")=true);
m.def("captured_err", [](std::string msg) {
- py::scoped_ostream_redirect redir(std::cerr, py::module::import("sys").attr("stderr"));
+ py::scoped_ostream_redirect redir(std::cerr, py::module_::import("sys").attr("stderr"));
std::cerr << msg << std::flush;
});
});
m.def("captured_dual", [](std::string msg, std::string emsg) {
- py::scoped_ostream_redirect redirout(std::cout, py::module::import("sys").attr("stdout"));
- py::scoped_ostream_redirect redirerr(std::cerr, py::module::import("sys").attr("stderr"));
+ py::scoped_ostream_redirect redirout(std::cout, py::module_::import("sys").attr("stdout"));
+ py::scoped_ostream_redirect redirerr(std::cerr, py::module_::import("sys").attr("stderr"));
std::cout << msg << std::flush;
std::cerr << emsg << std::flush;
});
+
+ py::class_<TestThread>(m, "TestThread")
+ .def(py::init<>())
+ .def("stop", &TestThread::stop)
+ .def("join", &TestThread::join)
+ .def("sleep", &TestThread::sleep);
}
+# -*- coding: utf-8 -*-
from pybind11_tests import iostream as m
import sys
# Python 3.4
from contextlib import redirect_stdout
except ImportError:
+
@contextmanager
def redirect_stdout(target):
original = sys.stdout
yield
sys.stdout = original
+
try:
# Python 3.5
from contextlib import redirect_stderr
except ImportError:
+
@contextmanager
def redirect_stderr(target):
original = sys.stderr
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
- assert stderr == ''
+ assert stderr == ""
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
- assert stderr == ''
+ assert stderr == ""
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
- assert stdout == ''
+ assert stdout == ""
assert stderr == msg
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
- assert stderr == ''
+ assert stderr == ""
def test_guard_capture(capsys):
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
- assert stderr == ''
+ assert stderr == ""
def test_series_captured(capture):
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
- assert stdout == ''
+ assert stdout == ""
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
- assert stderr == ''
- assert stream.getvalue() == ''
+ assert stderr == ""
+ assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
- assert stdout == ''
- assert stderr == ''
+ assert stdout == ""
+ assert stderr == ""
assert stream.getvalue() == msg
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
- assert stdout == ''
+ assert stdout == ""
assert stderr == msg
- assert stream.getvalue() == ''
+ assert stream.getvalue() == ""
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
- assert stdout == ''
- assert stderr == ''
+ assert stdout == ""
+ assert stderr == ""
assert stream.getvalue() == msg
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
- assert stdout == 'bd'
- assert stream.getvalue() == 'ac'
+ assert stdout == "bd"
+ assert stream.getvalue() == "ac"
def test_dual(capsys):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
- assert stream.getvalue() == ''
+ assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
- assert stdout == ''
+ assert stdout == ""
assert stream.getvalue() == msg
stream = StringIO()
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
- assert stream.getvalue() == ''
+ assert stream.getvalue() == ""
def test_redirect_err(capfd):
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
- assert stderr == ''
+ assert stderr == ""
assert stream.getvalue() == msg2
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
- assert stdout == ''
- assert stderr == ''
+ assert stdout == ""
+ assert stderr == ""
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
+
+
+def test_threading():
+ with m.ostream_redirect(stdout=True, stderr=False):
+ # start some threads
+ threads = []
+
+ # start some threads
+ for _j in range(20):
+ threads.append(m.TestThread())
+
+ # give the threads some time to fail
+ threads[0].sleep()
+
+ # stop all the threads
+ for t in threads:
+ t.stop()
+
+ for t in threads:
+ t.join()
+
+ # if a thread segfaults, we don't get here
+ assert True
py::tuple t(a.size());
for (size_t i = 0; i < a.size(); i++)
// Use raw Python API here to avoid an extra, intermediate incref on the tuple item:
- t[i] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<ssize_t>(i)));
+ t[i] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<py::ssize_t>(i)));
return t;
});
m.def("mixed_args_refcount", [](py::object o, py::args a) {
t[0] = o.ref_count();
for (size_t i = 0; i < a.size(); i++)
// Use raw Python API here to avoid an extra, intermediate incref on the tuple item:
- t[i + 1] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<ssize_t>(i)));
+ t[i + 1] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<py::ssize_t>(i)));
return t;
});
// m.def("bad_args6", [](py::args, py::args) {});
// m.def("bad_args7", [](py::kwargs, py::kwargs) {});
+ // test_keyword_only_args
+ m.def("kw_only_all", [](int i, int j) { return py::make_tuple(i, j); },
+ py::kw_only(), py::arg("i"), py::arg("j"));
+ m.def("kw_only_some", [](int i, int j, int k) { return py::make_tuple(i, j, k); },
+ py::arg(), py::kw_only(), py::arg("j"), py::arg("k"));
+ m.def("kw_only_with_defaults", [](int i, int j, int k, int z) { return py::make_tuple(i, j, k, z); },
+ py::arg() = 3, "j"_a = 4, py::kw_only(), "k"_a = 5, "z"_a);
+ m.def("kw_only_mixed", [](int i, int j) { return py::make_tuple(i, j); },
+ "i"_a, py::kw_only(), "j"_a);
+ m.def("kw_only_plus_more", [](int i, int j, int k, py::kwargs kwargs) {
+ return py::make_tuple(i, j, k, kwargs); },
+ py::arg() /* positional */, py::arg("j") = -1 /* both */, py::kw_only(), py::arg("k") /* kw-only */);
+
+ m.def("register_invalid_kw_only", [](py::module_ m) {
+ m.def("bad_kw_only", [](int i, int j) { return py::make_tuple(i, j); },
+ py::kw_only(), py::arg() /* invalid unnamed argument */, "j"_a);
+ });
+
+ // test_positional_only_args
+ m.def("pos_only_all", [](int i, int j) { return py::make_tuple(i, j); },
+ py::arg("i"), py::arg("j"), py::pos_only());
+ m.def("pos_only_mix", [](int i, int j) { return py::make_tuple(i, j); },
+ py::arg("i"), py::pos_only(), py::arg("j"));
+ m.def("pos_kw_only_mix", [](int i, int j, int k) { return py::make_tuple(i, j, k); },
+ py::arg("i"), py::pos_only(), py::arg("j"), py::kw_only(), py::arg("k"));
+ m.def("pos_only_def_mix", [](int i, int j, int k) { return py::make_tuple(i, j, k); },
+ py::arg("i"), py::arg("j") = 2, py::pos_only(), py::arg("k") = 3);
+
+
+ // These should fail to compile:
+ // argument annotations are required when using kw_only
+// m.def("bad_kw_only1", [](int) {}, py::kw_only());
+ // can't specify both `py::kw_only` and a `py::args` argument
+// m.def("bad_kw_only2", [](int i, py::args) {}, py::kw_only(), "i"_a);
+
// test_function_signatures (along with most of the above)
struct KWClass { void foo(int, float) {} };
py::class_<KWClass>(m, "KWClass")
.def("foo0", &KWClass::foo)
.def("foo1", &KWClass::foo, "x"_a, "y"_a);
+
+ // Make sure a class (not an instance) can be used as a default argument.
+ // The return value doesn't matter, only that the module is importable.
+ m.def("class_default_argument", [](py::object a) { return py::repr(a); },
+ "a"_a = py::module_::import("decimal").attr("Decimal"));
}
+# -*- coding: utf-8 -*-
import pytest
+
+import env # noqa: F401
+
from pybind11_tests import kwargs_and_defaults as m
assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str"
assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str"
assert doc(m.args_function) == "args_function(*args) -> tuple"
- assert doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
- assert doc(m.KWClass.foo0) == \
- "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
- assert doc(m.KWClass.foo1) == \
- "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
+ assert (
+ doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
+ )
+ assert (
+ doc(m.KWClass.foo0)
+ == "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
+ )
+ assert (
+ doc(m.KWClass.foo1)
+ == "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
+ )
def test_named_arguments(msg):
# noinspection PyArgumentList
m.kw_func2(x=5, y=10, z=12)
assert excinfo.match(
- r'(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$')
+ r"(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))"
+ + "{3}$"
+ )
assert m.kw_func4() == "{13 17}"
assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}"
def test_arg_and_kwargs():
- args = 'arg1_value', 'arg2_value', 3
+ args = "arg1_value", "arg2_value", 3
assert m.args_function(*args) == args
- args = 'a1', 'a2'
- kwargs = dict(arg3='a3', arg4=4)
+ args = "a1", "a2"
+ kwargs = dict(arg3="a3", arg4=4)
assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)
assert mpa(1, 2.5) == (1, 2.5, ())
with pytest.raises(TypeError) as excinfo:
assert mpa(1)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with: 1
""" # noqa: E501 line too long
+ )
with pytest.raises(TypeError) as excinfo:
assert mpa()
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
mixed_plus_args(): incompatible function arguments. The following argument types are supported:
1. (arg0: int, arg1: float, *args) -> tuple
Invoked with:
""" # noqa: E501 line too long
+ )
- assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159})
+ assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (
+ -2,
+ 3.5,
+ {"e": 2.71828, "pi": 3.14159},
+ )
assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (
- 7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7})
+ 7,
+ 7.7,
+ (7.77, 7.777, 7.7777),
+ {"minusseven": -7},
+ )
assert mpakd() == (1, 3.14159, (), {})
assert mpakd(3) == (3, 3.14159, (), {})
assert mpakd(j=2.71828) == (1, 2.71828, (), {})
- assert mpakd(k=42) == (1, 3.14159, (), {'k': 42})
+ assert mpakd(k=42) == (1, 3.14159, (), {"k": 42})
assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (
- 1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21})
+ 1,
+ 1,
+ (2, 3, 5, 8),
+ {"then": 13, "followedby": 21},
+ )
# Arguments specified both positionally and via kwargs should fail:
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, i=1)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1; kwargs: i=1
""" # noqa: E501 line too long
+ )
with pytest.raises(TypeError) as excinfo:
assert mpakd(1, 2, j=1)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
Invoked with: 1, 2; kwargs: j=1
""" # noqa: E501 line too long
+ )
+
+
+def test_keyword_only_args(msg):
+ assert m.kw_only_all(i=1, j=2) == (1, 2)
+ assert m.kw_only_all(j=1, i=2) == (2, 1)
+
+ with pytest.raises(TypeError) as excinfo:
+ assert m.kw_only_all(i=1) == (1,)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ with pytest.raises(TypeError) as excinfo:
+ assert m.kw_only_all(1, 2) == (1, 2)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ assert m.kw_only_some(1, k=3, j=2) == (1, 2, 3)
+ assert m.kw_only_with_defaults(z=8) == (3, 4, 5, 8)
+ assert m.kw_only_with_defaults(2, z=8) == (2, 4, 5, 8)
+ assert m.kw_only_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9)
+ assert m.kw_only_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9)
+
+ assert m.kw_only_mixed(1, j=2) == (1, 2)
+ assert m.kw_only_mixed(j=2, i=3) == (3, 2)
+ assert m.kw_only_mixed(i=2, j=3) == (2, 3)
+
+ assert m.kw_only_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {"extra": 7})
+ assert m.kw_only_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {"extra": 6})
+ assert m.kw_only_plus_more(2, k=3, extra=4) == (2, -1, 3, {"extra": 4})
+
+ with pytest.raises(TypeError) as excinfo:
+ assert m.kw_only_mixed(i=1) == (1,)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ with pytest.raises(RuntimeError) as excinfo:
+ m.register_invalid_kw_only(m)
+ assert (
+ msg(excinfo.value)
+ == """
+ arg(): cannot specify an unnamed argument after an kw_only() annotation
+ """
+ )
+
+
+def test_positional_only_args(msg):
+ assert m.pos_only_all(1, 2) == (1, 2)
+ assert m.pos_only_all(2, 1) == (2, 1)
+
+ with pytest.raises(TypeError) as excinfo:
+ m.pos_only_all(i=1, j=2)
+ assert "incompatible function arguments" in str(excinfo.value)
+ assert m.pos_only_mix(1, 2) == (1, 2)
+ assert m.pos_only_mix(2, j=1) == (2, 1)
+
+ with pytest.raises(TypeError) as excinfo:
+ m.pos_only_mix(i=1, j=2)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ assert m.pos_kw_only_mix(1, 2, k=3) == (1, 2, 3)
+ assert m.pos_kw_only_mix(1, j=2, k=3) == (1, 2, 3)
+
+ with pytest.raises(TypeError) as excinfo:
+ m.pos_kw_only_mix(i=1, j=2, k=3)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ with pytest.raises(TypeError) as excinfo:
+ m.pos_kw_only_mix(1, 2, 3)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ with pytest.raises(TypeError) as excinfo:
+ m.pos_only_def_mix()
+ assert "incompatible function arguments" in str(excinfo.value)
+
+ assert m.pos_only_def_mix(1) == (1, 2, 3)
+ assert m.pos_only_def_mix(1, 4) == (1, 4, 3)
+ assert m.pos_only_def_mix(1, 4, 7) == (1, 4, 7)
+ assert m.pos_only_def_mix(1, 4, k=7) == (1, 4, 7)
+
+ with pytest.raises(TypeError) as excinfo:
+ m.pos_only_def_mix(1, j=4)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+
+def test_signatures():
+ assert "kw_only_all(*, i: int, j: int) -> tuple\n" == m.kw_only_all.__doc__
+ assert "kw_only_mixed(i: int, *, j: int) -> tuple\n" == m.kw_only_mixed.__doc__
+ assert "pos_only_all(i: int, j: int, /) -> tuple\n" == m.pos_only_all.__doc__
+ assert "pos_only_mix(i: int, /, j: int) -> tuple\n" == m.pos_only_mix.__doc__
+ assert (
+ "pos_kw_only_mix(i: int, /, j: int, *, k: int) -> tuple\n"
+ == m.pos_kw_only_mix.__doc__
+ )
+
+
+@pytest.mark.xfail("env.PYPY and env.PY2", reason="PyPy2 doesn't double count")
def test_args_refcount():
"""Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular
arguments"""
assert m.args_function(-1, myval) == (-1, myval)
assert refcount(myval) == expected
- assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {"a": myval})
+ assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (
+ 5,
+ 6.0,
+ (myval,),
+ {"a": myval},
+ )
assert refcount(myval) == expected
- assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \
- ((7, 8, myval), {"a": 1, "b": myval})
+ assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == (
+ (7, 8, myval),
+ {"a": 1, "b": myval},
+ )
assert refcount(myval) == expected
exp3 = refcount(myval, myval, myval)
# tuple without having to inc_ref the individual elements, but here we can't, hence the extra
# refs.
assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3)
+
+ assert m.class_default_argument() == "<class 'decimal.Decimal'>"
// should raise a runtime error from the duplicate definition attempt. If test_class isn't
// available it *also* throws a runtime error (with "test_class not enabled" as value).
m.def("register_local_external", [m]() {
- auto main = py::module::import("pybind11_tests");
+ auto main = py::module_::import("pybind11_tests");
if (py::hasattr(main, "class_")) {
bind_local<LocalExternal, 7>(m, "LocalExternal", py::module_local());
}
+# -*- coding: utf-8 -*-
import pytest
+import env # noqa: F401
+
from pybind11_tests import local_bindings as m
assert i2.get() == 11
assert i2.get2() == 12
- assert not hasattr(i1, 'get2')
- assert not hasattr(i2, 'get3')
+ assert not hasattr(i1, "get2")
+ assert not hasattr(i2, "get3")
# Loading within the local module
assert m.local_value(i1) == 5
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal()
- assert str(excinfo.value) == 'generic_type: type "NonLocalType" is already registered!'
+ assert (
+ str(excinfo.value) == 'generic_type: type "NonLocalType" is already registered!'
+ )
def test_duplicate_local():
with pytest.raises(RuntimeError) as excinfo:
m.register_local_external()
import pybind11_tests
+
assert str(excinfo.value) == (
'generic_type: type "LocalExternal" is already registered!'
- if hasattr(pybind11_tests, 'class_') else 'test_class not enabled')
+ if hasattr(pybind11_tests, "class_")
+ else "test_class not enabled"
+ )
def test_stl_bind_local():
d1["b"] = v1[1]
d2["c"] = v2[0]
d2["d"] = v2[1]
- assert {i: d1[i].get() for i in d1} == {'a': 0, 'b': 1}
- assert {i: d2[i].get() for i in d2} == {'c': 2, 'd': 3}
+ assert {i: d1[i].get() for i in d1} == {"a": 0, "b": 1}
+ assert {i: d2[i].get() for i in d2} == {"c": 2, "d": 3}
def test_stl_bind_global():
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map()
- assert str(excinfo.value) == 'generic_type: type "NonLocalMap" is already registered!'
+ assert (
+ str(excinfo.value) == 'generic_type: type "NonLocalMap" is already registered!'
+ )
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_vec()
- assert str(excinfo.value) == 'generic_type: type "NonLocalVec" is already registered!'
+ assert (
+ str(excinfo.value) == 'generic_type: type "NonLocalVec" is already registered!'
+ )
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map2()
- assert str(excinfo.value) == 'generic_type: type "NonLocalMap2" is already registered!'
+ assert (
+ str(excinfo.value) == 'generic_type: type "NonLocalMap2" is already registered!'
+ )
def test_mixed_local_global():
type can be registered even if the type is already registered globally. With the module,
casting will go to the local type; outside the module casting goes to the global type."""
import pybind11_cross_module_tests as cm
+
m.register_mixed_global()
m.register_mixed_local()
a.append(cm.get_mixed_gl(11))
a.append(cm.get_mixed_lg(12))
- assert [x.get() for x in a] == \
- [101, 1002, 103, 1004, 105, 1006, 207, 2008, 109, 1010, 211, 2012]
+ assert [x.get() for x in a] == [
+ 101,
+ 1002,
+ 103,
+ 1004,
+ 105,
+ 1006,
+ 207,
+ 2008,
+ 109,
+ 1010,
+ 211,
+ 2012,
+ ]
def test_internal_locals_differ():
"""Makes sure the internal local type map differs across the two modules"""
import pybind11_cross_module_tests as cm
+
assert m.local_cpp_types_addr() != cm.local_cpp_types_addr()
+@pytest.mark.xfail("env.PYPY and sys.pypy_version_info < (7, 3, 2)")
def test_stl_caster_vs_stl_bind(msg):
"""One module uses a generic vector caster from `<pybind11/stl.h>` while the other
exports `std::vector<int>` via `py:bind_vector` and `py::module_local`"""
v2 = [1, 2, 3]
assert m.load_vector_via_caster(v2) == 6
with pytest.raises(TypeError) as excinfo:
- cm.load_vector_via_binding(v2) == 6
- assert msg(excinfo.value) == """
+ cm.load_vector_via_binding(v2)
+ assert (
+ msg(excinfo.value)
+ == """
load_vector_via_binding(): incompatible function arguments. The following argument types are supported:
1. (arg0: pybind11_cross_module_tests.VectorInt) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
+ )
def test_cross_module_calls():
ExampleMandA() { print_default_created(this); }
ExampleMandA(int value) : value(value) { print_created(this, value); }
ExampleMandA(const ExampleMandA &e) : value(e.value) { print_copy_created(this); }
+ ExampleMandA(std::string&&) {}
ExampleMandA(ExampleMandA &&e) : value(e.value) { print_move_created(this); }
~ExampleMandA() { print_destroyed(this); }
void add9(int *other) { value += *other; } // passing by pointer
void add10(const int *other) { value += *other; } // passing by const pointer
+ void consume_str(std::string&&) {}
+
ExampleMandA self1() { return *this; } // return by value
ExampleMandA &self2() { return *this; } // return by reference
const ExampleMandA &self3() { return *this; } // return by const reference
UserType TestPropRVP::sv1(1);
UserType TestPropRVP::sv2(1);
-// py::arg/py::arg_v testing: these arguments just record their argument when invoked
-class ArgInspector1 { public: std::string arg = "(default arg inspector 1)"; };
-class ArgInspector2 { public: std::string arg = "(default arg inspector 2)"; };
-class ArgAlwaysConverts { };
-namespace pybind11 { namespace detail {
-template <> struct type_caster<ArgInspector1> {
-public:
- PYBIND11_TYPE_CASTER(ArgInspector1, _("ArgInspector1"));
-
- bool load(handle src, bool convert) {
- value.arg = "loading ArgInspector1 argument " +
- std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. "
- "Argument value = " + (std::string) str(src);
- return true;
- }
-
- static handle cast(const ArgInspector1 &src, return_value_policy, handle) {
- return str(src.arg).release();
- }
-};
-template <> struct type_caster<ArgInspector2> {
-public:
- PYBIND11_TYPE_CASTER(ArgInspector2, _("ArgInspector2"));
-
- bool load(handle src, bool convert) {
- value.arg = "loading ArgInspector2 argument " +
- std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. "
- "Argument value = " + (std::string) str(src);
- return true;
- }
-
- static handle cast(const ArgInspector2 &src, return_value_policy, handle) {
- return str(src.arg).release();
- }
-};
-template <> struct type_caster<ArgAlwaysConverts> {
-public:
- PYBIND11_TYPE_CASTER(ArgAlwaysConverts, _("ArgAlwaysConverts"));
-
- bool load(handle, bool convert) {
- return convert;
- }
-
- static handle cast(const ArgAlwaysConverts &, return_value_policy, handle) {
- return py::none().release();
- }
-};
-}}
-
-// test_custom_caster_destruction
-class DestructionTester {
-public:
- DestructionTester() { print_default_created(this); }
- ~DestructionTester() { print_destroyed(this); }
- DestructionTester(const DestructionTester &) { print_copy_created(this); }
- DestructionTester(DestructionTester &&) { print_move_created(this); }
- DestructionTester &operator=(const DestructionTester &) { print_copy_assigned(this); return *this; }
- DestructionTester &operator=(DestructionTester &&) { print_move_assigned(this); return *this; }
-};
-namespace pybind11 { namespace detail {
-template <> struct type_caster<DestructionTester> {
- PYBIND11_TYPE_CASTER(DestructionTester, _("DestructionTester"));
- bool load(handle, bool) { return true; }
-
- static handle cast(const DestructionTester &, return_value_policy, handle) {
- return py::bool_(true).release();
- }
-};
-}}
-
// Test None-allowed py::arg argument policy
class NoneTester { public: int answer = 42; };
int none1(const NoneTester &obj) { return obj.answer; }
double sum() const { return rw_value + ro_value; }
};
+// Test explicit lvalue ref-qualification
+struct RefQualified {
+ int value = 0;
+
+ void refQualified(int other) & { value += other; }
+ int constRefQualified(int other) const & { return value + other; }
+};
+
TEST_SUBMODULE(methods_and_attributes, m) {
// test_methods_and_attributes
py::class_<ExampleMandA> emna(m, "ExampleMandA");
emna.def(py::init<>())
.def(py::init<int>())
+ .def(py::init<std::string&&>())
.def(py::init<const ExampleMandA&>())
.def("add1", &ExampleMandA::add1)
.def("add2", &ExampleMandA::add2)
.def("add8", &ExampleMandA::add8)
.def("add9", &ExampleMandA::add9)
.def("add10", &ExampleMandA::add10)
+ .def("consume_str", &ExampleMandA::consume_str)
.def("self1", &ExampleMandA::self1)
.def("self2", &ExampleMandA::self2)
.def("self3", &ExampleMandA::self3)
// test_no_mixed_overloads
// Raise error if trying to mix static/non-static overloads on the same name:
.def_static("add_mixed_overloads1", []() {
- auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA"));
+ auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module_::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA"));
emna.def ("overload_mixed1", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded))
.def_static("overload_mixed1", static_cast<py::str ( *)(float )>(&ExampleMandA::overloaded));
})
.def_static("add_mixed_overloads2", []() {
- auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA"));
+ auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module_::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA"));
emna.def_static("overload_mixed2", static_cast<py::str ( *)(float )>(&ExampleMandA::overloaded))
.def ("overload_mixed2", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded));
})
py::class_<MetaclassOverride>(m, "MetaclassOverride", py::metaclass((PyObject *) &PyType_Type))
.def_property_readonly_static("readonly", [](py::object) { return 1; });
+ // test_overload_ordering
+ m.def("overload_order", [](std::string) { return 1; });
+ m.def("overload_order", [](std::string) { return 2; });
+ m.def("overload_order", [](int) { return 3; });
+ m.def("overload_order", [](int) { return 4; }, py::prepend{});
+
#if !defined(PYPY_VERSION)
// test_dynamic_attributes
class DynamicClass {
public:
DynamicClass() { print_default_created(this); }
+ DynamicClass(const DynamicClass&) = delete;
~DynamicClass() { print_destroyed(this); }
};
py::class_<DynamicClass>(m, "DynamicClass", py::dynamic_attr())
.def(py::init());
#endif
- // test_noconvert_args
- //
- // Test converting. The ArgAlwaysConverts is just there to make the first no-conversion pass
- // fail so that our call always ends up happening via the second dispatch (the one that allows
- // some conversion).
- class ArgInspector {
- public:
- ArgInspector1 f(ArgInspector1 a, ArgAlwaysConverts) { return a; }
- std::string g(ArgInspector1 a, const ArgInspector1 &b, int c, ArgInspector2 *d, ArgAlwaysConverts) {
- return a.arg + "\n" + b.arg + "\n" + std::to_string(c) + "\n" + d->arg;
- }
- static ArgInspector2 h(ArgInspector2 a, ArgAlwaysConverts) { return a; }
- };
- py::class_<ArgInspector>(m, "ArgInspector")
- .def(py::init<>())
- .def("f", &ArgInspector::f, py::arg(), py::arg() = ArgAlwaysConverts())
- .def("g", &ArgInspector::g, "a"_a.noconvert(), "b"_a, "c"_a.noconvert()=13, "d"_a=ArgInspector2(), py::arg() = ArgAlwaysConverts())
- .def_static("h", &ArgInspector::h, py::arg().noconvert(), py::arg() = ArgAlwaysConverts())
- ;
- m.def("arg_inspect_func", [](ArgInspector2 a, ArgInspector1 b, ArgAlwaysConverts) { return a.arg + "\n" + b.arg; },
- py::arg().noconvert(false), py::arg_v(nullptr, ArgInspector1()).noconvert(true), py::arg() = ArgAlwaysConverts());
-
- m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f"));
- m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert());
- m.def("ints_preferred", [](int i) { return i / 2; }, py::arg("i"));
- m.def("ints_only", [](int i) { return i / 2; }, py::arg("i").noconvert());
-
// test_bad_arg_default
// Issue/PR #648: bad arg default debugging output
#if !defined(NDEBUG)
m.attr("debug_enabled") = false;
#endif
m.def("bad_arg_def_named", []{
- auto m = py::module::import("pybind11_tests");
+ auto m = py::module_::import("pybind11_tests");
m.def("should_fail", [](int, UnregisteredType) {}, py::arg(), py::arg("a") = UnregisteredType());
});
m.def("bad_arg_def_unnamed", []{
- auto m = py::module::import("pybind11_tests");
+ auto m = py::module_::import("pybind11_tests");
m.def("should_fail", [](int, UnregisteredType) {}, py::arg(), py::arg() = UnregisteredType());
});
+ // [workaround(intel)] ICC 20/21 breaks with py::arg().stuff, using py::arg{}.stuff works.
+
// test_accepts_none
py::class_<NoneTester, std::shared_ptr<NoneTester>>(m, "NoneTester")
.def(py::init<>());
- m.def("no_none1", &none1, py::arg().none(false));
- m.def("no_none2", &none2, py::arg().none(false));
- m.def("no_none3", &none3, py::arg().none(false));
- m.def("no_none4", &none4, py::arg().none(false));
- m.def("no_none5", &none5, py::arg().none(false));
+ m.def("no_none1", &none1, py::arg{}.none(false));
+ m.def("no_none2", &none2, py::arg{}.none(false));
+ m.def("no_none3", &none3, py::arg{}.none(false));
+ m.def("no_none4", &none4, py::arg{}.none(false));
+ m.def("no_none5", &none5, py::arg{}.none(false));
m.def("ok_none1", &none1);
- m.def("ok_none2", &none2, py::arg().none(true));
+ m.def("ok_none2", &none2, py::arg{}.none(true));
m.def("ok_none3", &none3);
- m.def("ok_none4", &none4, py::arg().none(true));
+ m.def("ok_none4", &none4, py::arg{}.none(true));
m.def("ok_none5", &none5);
+ m.def("no_none_kwarg", &none2, "a"_a.none(false));
+ m.def("no_none_kwarg_kw_only", &none2, py::kw_only(), "a"_a.none(false));
+
// test_str_issue
// Issue #283: __str__ called on uninitialized instance when constructor arguments invalid
py::class_<StrIssue>(m, "StrIssue")
using Adapted = decltype(py::method_adaptor<RegisteredDerived>(&RegisteredDerived::do_nothing));
static_assert(std::is_same<Adapted, void (RegisteredDerived::*)() const>::value, "");
- // test_custom_caster_destruction
- // Test that `take_ownership` works on types with a custom type caster when given a pointer
-
- // default policy: don't take ownership:
- m.def("custom_caster_no_destroy", []() { static auto *dt = new DestructionTester(); return dt; });
-
- m.def("custom_caster_destroy", []() { return new DestructionTester(); },
- py::return_value_policy::take_ownership); // Takes ownership: destroy when finished
- m.def("custom_caster_destroy_const", []() -> const DestructionTester * { return new DestructionTester(); },
- py::return_value_policy::take_ownership); // Likewise (const doesn't inhibit destruction)
- m.def("destruction_tester_cstats", &ConstructorStats::get<DestructionTester>, py::return_value_policy::reference);
+ // test_methods_and_attributes
+ py::class_<RefQualified>(m, "RefQualified")
+ .def(py::init<>())
+ .def_readonly("value", &RefQualified::value)
+ .def("refQualified", &RefQualified::refQualified)
+ .def("constRefQualified", &RefQualified::constRefQualified);
}
+# -*- coding: utf-8 -*-
import pytest
+
+import env # noqa: F401
+
from pybind11_tests import methods_and_attributes as m
from pybind11_tests import ConstructorStats
assert instance1.overloaded(0) == "(int)"
assert instance1.overloaded(1, 1.0) == "(int, float)"
assert instance1.overloaded(2.0, 2) == "(float, int)"
- assert instance1.overloaded(3, 3) == "(int, int)"
- assert instance1.overloaded(4., 4.) == "(float, float)"
+ assert instance1.overloaded(3, 3) == "(int, int)"
+ assert instance1.overloaded(4.0, 4.0) == "(float, float)"
assert instance1.overloaded_const(-3) == "(int) const"
assert instance1.overloaded_const(5, 5.0) == "(int, float) const"
assert instance1.overloaded_const(6.0, 6) == "(float, int) const"
- assert instance1.overloaded_const(7, 7) == "(int, int) const"
- assert instance1.overloaded_const(8., 8.) == "(float, float) const"
+ assert instance1.overloaded_const(7, 7) == "(int, int) const"
+ assert instance1.overloaded_const(8.0, 8.0) == "(float, float) const"
assert instance1.overloaded_float(1, 1) == "(float, float)"
- assert instance1.overloaded_float(1, 1.) == "(float, float)"
- assert instance1.overloaded_float(1., 1) == "(float, float)"
- assert instance1.overloaded_float(1., 1.) == "(float, float)"
+ assert instance1.overloaded_float(1, 1.0) == "(float, float)"
+ assert instance1.overloaded_float(1.0, 1) == "(float, float)"
+ assert instance1.overloaded_float(1.0, 1.0) == "(float, float)"
assert instance1.value == 320
instance1.value = 100
assert cstats.alive() == 0
assert cstats.values() == ["32"]
assert cstats.default_constructions == 1
- assert cstats.copy_constructions == 3
- assert cstats.move_constructions >= 1
+ assert cstats.copy_constructions == 2
+ assert cstats.move_constructions >= 2
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
assert m.TestPropertiesOverride().def_readonly == 99
assert m.TestPropertiesOverride.def_readonly_static == 99
+ # Only static attributes can be deleted
+ del m.TestPropertiesOverride.def_readonly_static
+ assert (
+ hasattr(m.TestPropertiesOverride, "def_readonly_static")
+ and m.TestPropertiesOverride.def_readonly_static
+ is m.TestProperties.def_readonly_static
+ )
+ assert "def_readonly_static" not in m.TestPropertiesOverride.__dict__
+ properties_override = m.TestPropertiesOverride()
+ with pytest.raises(AttributeError) as excinfo:
+ del properties_override.def_readonly
+ assert "can't delete attribute" in str(excinfo.value)
+
def test_static_cls():
"""Static property getter and setters expect the type object as the their only argument"""
assert type(m.MetaclassOverride).__name__ == "type"
assert m.MetaclassOverride.readonly == 1
- assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property"
+ assert (
+ type(m.MetaclassOverride.__dict__["readonly"]).__name__
+ == "pybind11_static_property"
+ )
# Regular `type` replaces the property instead of calling `__set__()`
m.MetaclassOverride.readonly = 2
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads1()
- assert (str(excinfo.value) ==
- "overloading a method with both static and instance methods is not supported; " +
- ("compile in debug mode for more details" if not debug_enabled else
- "error while attempting to bind static method ExampleMandA.overload_mixed1"
- "(arg0: float) -> str")
- )
+ assert str(
+ excinfo.value
+ ) == "overloading a method with both static and instance methods is not supported; " + (
+ "compile in debug mode for more details"
+ if not debug_enabled
+ else "error while attempting to bind static method ExampleMandA.overload_mixed1"
+ "(arg0: float) -> str"
+ )
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads2()
- assert (str(excinfo.value) ==
- "overloading a method with both static and instance methods is not supported; " +
- ("compile in debug mode for more details" if not debug_enabled else
- "error while attempting to bind instance method ExampleMandA.overload_mixed2"
- "(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)"
- " -> str")
- )
+ assert str(
+ excinfo.value
+ ) == "overloading a method with both static and instance methods is not supported; " + (
+ "compile in debug mode for more details"
+ if not debug_enabled
+ else "error while attempting to bind instance method ExampleMandA.overload_mixed2"
+ "(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)"
+ " -> str"
+ )
@pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"])
assert os.value == 1
-# https://bitbucket.org/pypy/pypy/issues/2447
-@pytest.unsupported_on_pypy
+# https://foss.heptapod.net/pypy/pypy/-/issues/2447
+@pytest.mark.xfail("env.PYPY")
def test_dynamic_attributes():
instance = m.DynamicClass()
assert not hasattr(instance, "foo")
assert cstats.alive() == 0
-# https://bitbucket.org/pypy/pypy/issues/2447
-@pytest.unsupported_on_pypy
+# https://foss.heptapod.net/pypy/pypy/-/issues/2447
+@pytest.mark.xfail("env.PYPY")
def test_cyclic_gc():
# One object references itself
instance = m.DynamicClass()
assert cstats.alive() == 0
-def test_noconvert_args(msg):
- a = m.ArgInspector()
- assert msg(a.f("hi")) == """
- loading ArgInspector1 argument WITH conversion allowed. Argument value = hi
- """
- assert msg(a.g("this is a", "this is b")) == """
- loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
- loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
- 13
- loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
- """ # noqa: E501 line too long
- assert msg(a.g("this is a", "this is b", 42)) == """
- loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
- loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
- 42
- loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
- """ # noqa: E501 line too long
- assert msg(a.g("this is a", "this is b", 42, "this is d")) == """
- loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
- loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
- 42
- loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d
- """
- assert (a.h("arg 1") ==
- "loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1")
- assert msg(m.arg_inspect_func("A1", "A2")) == """
- loading ArgInspector2 argument WITH conversion allowed. Argument value = A1
- loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2
- """
-
- assert m.floats_preferred(4) == 2.0
- assert m.floats_only(4.0) == 2.0
- with pytest.raises(TypeError) as excinfo:
- m.floats_only(4)
- assert msg(excinfo.value) == """
- floats_only(): incompatible function arguments. The following argument types are supported:
- 1. (f: float) -> float
-
- Invoked with: 4
- """
-
- assert m.ints_preferred(4) == 2
- assert m.ints_preferred(True) == 0
- with pytest.raises(TypeError) as excinfo:
- m.ints_preferred(4.0)
- assert msg(excinfo.value) == """
- ints_preferred(): incompatible function arguments. The following argument types are supported:
- 1. (i: int) -> int
-
- Invoked with: 4.0
- """ # noqa: E501 line too long
-
- assert m.ints_only(4) == 2
- with pytest.raises(TypeError) as excinfo:
- m.ints_only(4.0)
- assert msg(excinfo.value) == """
- ints_only(): incompatible function arguments. The following argument types are supported:
- 1. (i: int) -> int
-
- Invoked with: 4.0
- """
-
-
def test_bad_arg_default(msg):
from pybind11_tests import debug_enabled
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'a: UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
- if debug_enabled else
- "arg(): could not convert default argument into a Python object (type not registered "
+ if debug_enabled
+ else "arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
- if debug_enabled else
- "arg(): could not convert default argument into a Python object (type not registered "
+ if debug_enabled
+ else "arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
# The first one still raises because you can't pass None as a lvalue reference arg:
with pytest.raises(TypeError) as excinfo:
assert m.ok_none1(None) == -1
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
ok_none1(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.methods_and_attributes.NoneTester) -> int
Invoked with: None
"""
+ )
# The rest take the argument as pointer or holder, and accept None:
assert m.ok_none2(None) == -1
assert m.ok_none4(None) == -1
assert m.ok_none5(None) == -1
+ with pytest.raises(TypeError) as excinfo:
+ m.no_none_kwarg(None)
+ assert "incompatible function arguments" in str(excinfo.value)
+ with pytest.raises(TypeError) as excinfo:
+ m.no_none_kwarg(a=None)
+ assert "incompatible function arguments" in str(excinfo.value)
+ with pytest.raises(TypeError) as excinfo:
+ m.no_none_kwarg_kw_only(None)
+ assert "incompatible function arguments" in str(excinfo.value)
+ with pytest.raises(TypeError) as excinfo:
+ m.no_none_kwarg_kw_only(a=None)
+ assert "incompatible function arguments" in str(excinfo.value)
+
def test_str_issue(msg):
"""#283: __str__ called on uninitialized instance when constructor arguments invalid"""
with pytest.raises(TypeError) as excinfo:
str(m.StrIssue("no", "such", "constructor"))
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.methods_and_attributes.StrIssue(arg0: int)
2. m.methods_and_attributes.StrIssue()
Invoked with: 'no', 'such', 'constructor'
"""
+ )
def test_unregistered_base_implementations():
assert a.ro_value_prop == 1.75
-def test_custom_caster_destruction():
- """Tests that returning a pointer to a type that gets converted with a custom type caster gets
- destroyed when the function has py::return_value_policy::take_ownership policy applied."""
+def test_ref_qualified():
+ """Tests that explicit lvalue ref-qualified methods can be called just like their
+ non ref-qualified counterparts."""
- cstats = m.destruction_tester_cstats()
- # This one *doesn't* have take_ownership: the pointer should be used but not destroyed:
- z = m.custom_caster_no_destroy()
- assert cstats.alive() == 1 and cstats.default_constructions == 1
- assert z
+ r = m.RefQualified()
+ assert r.value == 0
+ r.refQualified(17)
+ assert r.value == 17
+ assert r.constRefQualified(23) == 40
- # take_ownership applied: this constructs a new object, casts it, then destroys it:
- z = m.custom_caster_destroy()
- assert z
- assert cstats.default_constructions == 2
- # Same, but with a const pointer return (which should *not* inhibit destruction):
- z = m.custom_caster_destroy_const()
- assert z
- assert cstats.default_constructions == 3
+def test_overload_ordering():
+ "Check to see if the normal overload order (first defined) and prepend overload order works"
+ assert m.overload_order("string") == 1
+ assert m.overload_order(0) == 4
- # Make sure we still only have the original object (from ..._no_destroy()) alive:
- assert cstats.alive() == 1
+ # Different for Python 2 vs. 3
+ uni_name = type(u"").__name__
+
+ assert "1. overload_order(arg0: int) -> int" in m.overload_order.__doc__
+ assert (
+ "2. overload_order(arg0: {}) -> int".format(uni_name)
+ in m.overload_order.__doc__
+ )
+ assert (
+ "3. overload_order(arg0: {}) -> int".format(uni_name)
+ in m.overload_order.__doc__
+ )
+ assert "4. overload_order(arg0: int) -> int" in m.overload_order.__doc__
+
+ with pytest.raises(TypeError) as err:
+ m.overload_order(1.1)
+
+ assert "1. (arg0: int) -> int" in str(err.value)
+ assert "2. (arg0: {}) -> int".format(uni_name) in str(err.value)
+ assert "3. (arg0: {}) -> int".format(uni_name) in str(err.value)
+ assert "4. (arg0: int) -> int" in str(err.value)
TEST_SUBMODULE(modules, m) {
// test_nested_modules
+ // This is intentionally "py::module" to verify it still can be used in place of "py::module_"
py::module m_sub = m.def_submodule("subsubmodule");
m_sub.def("submodule_func", []() { return "submodule_func()"; });
.def_readwrite("a1", &B::a1) // def_readonly uses an internal reference return policy by default
.def_readwrite("a2", &B::a2);
+ // This is intentionally "py::module" to verify it still can be used in place of "py::module_"
m.attr("OD") = py::module::import("collections").attr("OrderedDict");
// test_duplicate_registration
class Dupe3 { };
class DupeException { };
- auto dm = py::module("dummy");
+ // Go ahead and leak, until we have a non-leaking py::module_ constructor
+ auto dm = py::module_::create_extension_module("dummy", nullptr, new py::module_::module_def);
auto failures = py::list();
py::class_<Dupe1>(dm, "Dupe1");
+# -*- coding: utf-8 -*-
from pybind11_tests import modules as m
from pybind11_tests.modules import subsubmodule as ms
from pybind11_tests import ConstructorStats
def test_nested_modules():
import pybind11_tests
+
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.modules.__name__ == "pybind11_tests.modules"
- assert pybind11_tests.modules.subsubmodule.__name__ == "pybind11_tests.modules.subsubmodule"
+ assert (
+ pybind11_tests.modules.subsubmodule.__name__
+ == "pybind11_tests.modules.subsubmodule"
+ )
assert m.__name__ == "pybind11_tests.modules"
assert ms.__name__ == "pybind11_tests.modules.subsubmodule"
del b
assert astats.alive() == 0
assert bstats.alive() == 0
- assert astats.values() == ['1', '2', '42', '43']
+ assert astats.values() == ["1", "2", "42", "43"]
assert bstats.values() == []
assert astats.default_constructions == 0
assert bstats.default_constructions == 1
from collections import OrderedDict
assert OD is OrderedDict
- assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])"
+ assert str(OD([(1, "a"), (2, "b")])) == "OrderedDict([(1, 'a'), (2, 'b')])"
def test_pydoc():
"""Registering two things with the same name"""
assert m.duplicate_registration() == []
+
+
+def test_builtin_key_type():
+ """Test that all the keys in the builtin modules have type str.
+
+ Previous versions of pybind11 would add a unicode key in python 2.
+ """
+ if hasattr(__builtins__, "keys"):
+ keys = __builtins__.keys()
+ else: # this is to make pypy happy since builtins is different there.
+ keys = __builtins__.__dict__.keys()
+
+ assert {type(k) for k in keys} == {str}
.def_readwrite_static("static_value", &VanillaStaticMix2::static_value);
-#if !defined(PYPY_VERSION)
struct WithDict { };
struct VanillaDictMix1 : Vanilla, WithDict { };
struct VanillaDictMix2 : WithDict, Vanilla { };
py::class_<WithDict>(m, "WithDict", py::dynamic_attr()).def(py::init<>());
py::class_<VanillaDictMix1, Vanilla, WithDict>(m, "VanillaDictMix1").def(py::init<>());
py::class_<VanillaDictMix2, WithDict, Vanilla>(m, "VanillaDictMix2").def(py::init<>());
-#endif
// test_diamond_inheritance
// Issue #959: segfault when constructing diamond inheritance instance
+# -*- coding: utf-8 -*-
import pytest
+
+import env # noqa: F401
+
from pybind11_tests import ConstructorStats
from pybind11_tests import multiple_inheritance as m
assert mt.bar() == 4
+@pytest.mark.skipif("env.PYPY and env.PY2")
+@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_mix1():
class Base1:
def __init__(self, i):
def test_multiple_inheritance_mix2():
-
class Base2:
def __init__(self, i):
self.i = i
assert mt.bar() == 4
+@pytest.mark.skipif("env.PYPY and env.PY2")
+@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_python():
-
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
def test_multiple_inheritance_python_many_bases():
-
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
- class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15,
- m.BaseN16):
+ class MIMany916(
+ m.BaseN9,
+ m.BaseN10,
+ m.BaseN11,
+ m.BaseN12,
+ m.BaseN13,
+ m.BaseN14,
+ m.BaseN15,
+ m.BaseN16,
+ ):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
def test_multiple_inheritance_virtbase():
-
class MITypePy(m.Base12a):
def __init__(self, i, j):
m.Base12a.__init__(self, i, j)
def test_mi_static_properties():
"""Mixing bases with and without static properties should be possible
- and the result should be independent of base definition order"""
+ and the result should be independent of base definition order"""
for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):
assert d.vanilla() == "Vanilla"
assert d.static_value == 0
-@pytest.unsupported_on_pypy
+# Requires PyPy 6+
def test_mi_dynamic_attributes():
"""Mixing bases with and without dynamic attribute support"""
template <typename T>
DtypeCheck get_dtype_check(const char* name) {
- py::module np = py::module::import("numpy");
+ py::module_ np = py::module_::import("numpy");
DtypeCheck check{};
check.numpy = np.attr("dtype")(np.attr(name));
check.pybind11 = py::dtype::of<T>();
template<typename... Ix> arr& mutate_data(arr& a, Ix... index) {
auto ptr = (uint8_t *) a.mutable_data(index...);
- for (ssize_t i = 0; i < a.nbytes() - a.offset_at(index...); i++)
+ for (py::ssize_t i = 0; i < a.nbytes() - a.offset_at(index...); i++)
ptr[i] = (uint8_t) (ptr[i] * 2);
return a;
}
template<typename... Ix> arr_t& mutate_data_t(arr_t& a, Ix... index) {
auto ptr = a.mutable_data(index...);
- for (ssize_t i = 0; i < a.size() - a.index_at(index...); i++)
+ for (py::ssize_t i = 0; i < a.size() - a.index_at(index...); i++)
ptr[i]++;
return a;
}
-template<typename... Ix> ssize_t index_at(const arr& a, Ix... idx) { return a.index_at(idx...); }
-template<typename... Ix> ssize_t index_at_t(const arr_t& a, Ix... idx) { return a.index_at(idx...); }
-template<typename... Ix> ssize_t offset_at(const arr& a, Ix... idx) { return a.offset_at(idx...); }
-template<typename... Ix> ssize_t offset_at_t(const arr_t& a, Ix... idx) { return a.offset_at(idx...); }
-template<typename... Ix> ssize_t at_t(const arr_t& a, Ix... idx) { return a.at(idx...); }
+template<typename... Ix> py::ssize_t index_at(const arr& a, Ix... idx) { return a.index_at(idx...); }
+template<typename... Ix> py::ssize_t index_at_t(const arr_t& a, Ix... idx) { return a.index_at(idx...); }
+template<typename... Ix> py::ssize_t offset_at(const arr& a, Ix... idx) { return a.offset_at(idx...); }
+template<typename... Ix> py::ssize_t offset_at_t(const arr_t& a, Ix... idx) { return a.offset_at(idx...); }
+template<typename... Ix> py::ssize_t at_t(const arr_t& a, Ix... idx) { return a.at(idx...); }
template<typename... Ix> arr_t& mutate_at_t(arr_t& a, Ix... idx) { a.mutable_at(idx...)++; return a; }
#define def_index_fn(name, type) \
static int data_i = 42;
TEST_SUBMODULE(numpy_array, sm) {
- try { py::module::import("numpy"); }
+ try { py::module_::import("numpy"); }
catch (...) { return; }
// test_dtypes
// test_array_attributes
sm.def("ndim", [](const arr& a) { return a.ndim(); });
sm.def("shape", [](const arr& a) { return arr(a.ndim(), a.shape()); });
- sm.def("shape", [](const arr& a, ssize_t dim) { return a.shape(dim); });
+ sm.def("shape", [](const arr& a, py::ssize_t dim) { return a.shape(dim); });
sm.def("strides", [](const arr& a) { return arr(a.ndim(), a.strides()); });
- sm.def("strides", [](const arr& a, ssize_t dim) { return a.strides(dim); });
+ sm.def("strides", [](const arr& a, py::ssize_t dim) { return a.strides(dim); });
sm.def("writeable", [](const arr& a) { return a.writeable(); });
sm.def("size", [](const arr& a) { return a.size(); });
sm.def("itemsize", [](const arr& a) { return a.itemsize(); });
.def(py::init<>())
.def("numpy_view", [](py::object &obj) {
py::print("ArrayClass::numpy_view()");
- ArrayClass &a = obj.cast<ArrayClass&>();
+ auto &a = obj.cast<ArrayClass&>();
return py::array_t<int>({2}, {4}, a.data, obj);
}
);
sm.def("overloaded2", [](py::array_t<std::complex<float>>) { return "float complex"; });
sm.def("overloaded2", [](py::array_t<float>) { return "float"; });
+ // [workaround(intel)] ICC 20/21 breaks with py::arg().stuff, using py::arg{}.stuff works.
+
// Only accept the exact types:
- sm.def("overloaded3", [](py::array_t<int>) { return "int"; }, py::arg().noconvert());
- sm.def("overloaded3", [](py::array_t<double>) { return "double"; }, py::arg().noconvert());
+ sm.def("overloaded3", [](py::array_t<int>) { return "int"; }, py::arg{}.noconvert());
+ sm.def("overloaded3", [](py::array_t<double>) { return "double"; }, py::arg{}.noconvert());
// Make sure we don't do unsafe coercion (e.g. float to int) when not using forcecast, but
// rather that float gets converted via the safe (conversion to double) overload:
// test_array_unchecked_fixed_dims
sm.def("proxy_add2", [](py::array_t<double> a, double v) {
auto r = a.mutable_unchecked<2>();
- for (ssize_t i = 0; i < r.shape(0); i++)
- for (ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
r(i, j) += v;
- }, py::arg().noconvert(), py::arg());
+ }, py::arg{}.noconvert(), py::arg());
sm.def("proxy_init3", [](double start) {
py::array_t<double, py::array::c_style> a({ 3, 3, 3 });
auto r = a.mutable_unchecked<3>();
- for (ssize_t i = 0; i < r.shape(0); i++)
- for (ssize_t j = 0; j < r.shape(1); j++)
- for (ssize_t k = 0; k < r.shape(2); k++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) = start++;
return a;
});
sm.def("proxy_init3F", [](double start) {
py::array_t<double, py::array::f_style> a({ 3, 3, 3 });
auto r = a.mutable_unchecked<3>();
- for (ssize_t k = 0; k < r.shape(2); k++)
- for (ssize_t j = 0; j < r.shape(1); j++)
- for (ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t k = 0; k < r.shape(2); k++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
r(i, j, k) = start++;
return a;
});
sm.def("proxy_squared_L2_norm", [](py::array_t<double> a) {
auto r = a.unchecked<1>();
double sumsq = 0;
- for (ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
sumsq += r[i] * r(i); // Either notation works for a 1D array
return sumsq;
});
return auxiliaries(r, r2);
});
+ sm.def("proxy_auxiliaries1_const_ref", [](py::array_t<double> a) {
+ const auto &r = a.unchecked<1>();
+ const auto &r2 = a.mutable_unchecked<1>();
+ return r(0) == r2(0) && r[0] == r2[0];
+ });
+
+ sm.def("proxy_auxiliaries2_const_ref", [](py::array_t<double> a) {
+ const auto &r = a.unchecked<2>();
+ const auto &r2 = a.mutable_unchecked<2>();
+ return r(0, 0) == r2(0, 0);
+ });
+
// test_array_unchecked_dyn_dims
// Same as the above, but without a compile-time dimensions specification:
sm.def("proxy_add2_dyn", [](py::array_t<double> a, double v) {
auto r = a.mutable_unchecked();
if (r.ndim() != 2) throw std::domain_error("error: ndim != 2");
- for (ssize_t i = 0; i < r.shape(0); i++)
- for (ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
r(i, j) += v;
- }, py::arg().noconvert(), py::arg());
+ }, py::arg{}.noconvert(), py::arg());
sm.def("proxy_init3_dyn", [](double start) {
py::array_t<double, py::array::c_style> a({ 3, 3, 3 });
auto r = a.mutable_unchecked();
if (r.ndim() != 3) throw std::domain_error("error: ndim != 3");
- for (ssize_t i = 0; i < r.shape(0); i++)
- for (ssize_t j = 0; j < r.shape(1); j++)
- for (ssize_t k = 0; k < r.shape(2); k++)
+ for (py::ssize_t i = 0; i < r.shape(0); i++)
+ for (py::ssize_t j = 0; j < r.shape(1); j++)
+ for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) = start++;
return a;
});
// test_array_resize
// reshape array to 2D without changing size
sm.def("array_reshape2", [](py::array_t<double> a) {
- const ssize_t dim_sz = (ssize_t)std::sqrt(a.size());
+ const auto dim_sz = (py::ssize_t)std::sqrt(a.size());
if (dim_sz * dim_sz != a.size())
throw std::domain_error("array_reshape2: input array total size is not a squared integer");
a.resize({dim_sz, dim_sz});
return a;
});
-#if PY_MAJOR_VERSION >= 3
- sm.def("index_using_ellipsis", [](py::array a) {
- return a[py::make_tuple(0, py::ellipsis(), 0)];
- });
-#endif
+ sm.def("index_using_ellipsis", [](py::array a) {
+ return a[py::make_tuple(0, py::ellipsis(), 0)];
+ });
+
+ // test_argument_conversions
+ sm.def("accept_double",
+ [](py::array_t<double, 0>) {},
+ py::arg("a"));
+ sm.def("accept_double_forcecast",
+ [](py::array_t<double, py::array::forcecast>) {},
+ py::arg("a"));
+ sm.def("accept_double_c_style",
+ [](py::array_t<double, py::array::c_style>) {},
+ py::arg("a"));
+ sm.def("accept_double_c_style_forcecast",
+ [](py::array_t<double, py::array::forcecast | py::array::c_style>) {},
+ py::arg("a"));
+ sm.def("accept_double_f_style",
+ [](py::array_t<double, py::array::f_style>) {},
+ py::arg("a"));
+ sm.def("accept_double_f_style_forcecast",
+ [](py::array_t<double, py::array::forcecast | py::array::f_style>) {},
+ py::arg("a"));
+ sm.def("accept_double_noconvert",
+ [](py::array_t<double, 0>) {},
+ "a"_a.noconvert());
+ sm.def("accept_double_forcecast_noconvert",
+ [](py::array_t<double, py::array::forcecast>) {},
+ "a"_a.noconvert());
+ sm.def("accept_double_c_style_noconvert",
+ [](py::array_t<double, py::array::c_style>) {},
+ "a"_a.noconvert());
+ sm.def("accept_double_c_style_forcecast_noconvert",
+ [](py::array_t<double, py::array::forcecast | py::array::c_style>) {},
+ "a"_a.noconvert());
+ sm.def("accept_double_f_style_noconvert",
+ [](py::array_t<double, py::array::f_style>) {},
+ "a"_a.noconvert());
+ sm.def("accept_double_f_style_forcecast_noconvert",
+ [](py::array_t<double, py::array::forcecast | py::array::f_style>) {},
+ "a"_a.noconvert());
}
+# -*- coding: utf-8 -*-
import pytest
-from pybind11_tests import numpy_array as m
-pytestmark = pytest.requires_numpy
+import env # noqa: F401
+
+from pybind11_tests import numpy_array as m
-with pytest.suppress(ImportError):
- import numpy as np
+np = pytest.importorskip("numpy")
def test_dtypes():
print(check)
assert check.numpy == check.pybind11, check
if check.numpy.num != check.pybind11.num:
- print("NOTE: typenum mismatch for {}: {} != {}".format(
- check, check.numpy.num, check.pybind11.num))
+ print(
+ "NOTE: typenum mismatch for {}: {} != {}".format(
+ check, check.numpy.num, check.pybind11.num
+ )
+ )
-@pytest.fixture(scope='function')
+@pytest.fixture(scope="function")
def arr():
- return np.array([[1, 2, 3], [4, 5, 6]], '=u2')
+ return np.array([[1, 2, 3], [4, 5, 6]], "=u2")
def test_array_attributes():
- a = np.array(0, 'f8')
+ a = np.array(0, "f8")
assert m.ndim(a) == 0
assert all(m.shape(a) == [])
assert all(m.strides(a) == [])
with pytest.raises(IndexError) as excinfo:
m.shape(a, 0)
- assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
+ assert str(excinfo.value) == "invalid axis: 0 (ndim = 0)"
with pytest.raises(IndexError) as excinfo:
m.strides(a, 0)
- assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
+ assert str(excinfo.value) == "invalid axis: 0 (ndim = 0)"
assert m.writeable(a)
assert m.size(a) == 1
assert m.itemsize(a) == 8
assert m.nbytes(a) == 8
assert m.owndata(a)
- a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view()
+ a = np.array([[1, 2, 3], [4, 5, 6]], "u2").view()
a.flags.writeable = False
assert m.ndim(a) == 2
assert all(m.shape(a) == [2, 3])
assert m.strides(a, 1) == 2
with pytest.raises(IndexError) as excinfo:
m.shape(a, 2)
- assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
+ assert str(excinfo.value) == "invalid axis: 2 (ndim = 2)"
with pytest.raises(IndexError) as excinfo:
m.strides(a, 2)
- assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
+ assert str(excinfo.value) == "invalid axis: 2 (ndim = 2)"
assert not m.writeable(a)
assert m.size(a) == 6
assert m.itemsize(a) == 2
assert not m.owndata(a)
-@pytest.mark.parametrize('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)])
+@pytest.mark.parametrize(
+ "args, ret", [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)]
+)
def test_index_offset(arr, args, ret):
assert m.index_at(arr, *args) == ret
assert m.index_at_t(arr, *args) == ret
def test_dim_check_fail(arr):
- for func in (m.index_at, m.index_at_t, m.offset_at, m.offset_at_t, m.data, m.data_t,
- m.mutate_data, m.mutate_data_t):
+ for func in (
+ m.index_at,
+ m.index_at_t,
+ m.offset_at,
+ m.offset_at_t,
+ m.data,
+ m.data_t,
+ m.mutate_data,
+ m.mutate_data_t,
+ ):
with pytest.raises(IndexError) as excinfo:
func(arr, 1, 2, 3)
- assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)'
-
-
-@pytest.mark.parametrize('args, ret',
- [([], [1, 2, 3, 4, 5, 6]),
- ([1], [4, 5, 6]),
- ([0, 1], [2, 3, 4, 5, 6]),
- ([1, 2], [6])])
+ assert str(excinfo.value) == "too many indices for an array: 3 (ndim = 2)"
+
+
+@pytest.mark.parametrize(
+ "args, ret",
+ [
+ ([], [1, 2, 3, 4, 5, 6]),
+ ([1], [4, 5, 6]),
+ ([0, 1], [2, 3, 4, 5, 6]),
+ ([1, 2], [6]),
+ ],
+)
def test_data(arr, args, ret):
from sys import byteorder
+
assert all(m.data_t(arr, *args) == ret)
- assert all(m.data(arr, *args)[(0 if byteorder == 'little' else 1)::2] == ret)
- assert all(m.data(arr, *args)[(1 if byteorder == 'little' else 0)::2] == 0)
+ assert all(m.data(arr, *args)[(0 if byteorder == "little" else 1) :: 2] == ret)
+ assert all(m.data(arr, *args)[(1 if byteorder == "little" else 0) :: 2] == 0)
-@pytest.mark.parametrize('dim', [0, 1, 3])
+@pytest.mark.parametrize("dim", [0, 1, 3])
def test_at_fail(arr, dim):
for func in m.at_t, m.mutate_at_t:
with pytest.raises(IndexError) as excinfo:
func(arr, *([0] * dim))
- assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim)
+ assert str(excinfo.value) == "index dimension mismatch: {} (ndim = 2)".format(
+ dim
+ )
def test_at(arr):
def test_mutate_readonly(arr):
arr.flags.writeable = False
- for func, args in (m.mutate_data, ()), (m.mutate_data_t, ()), (m.mutate_at_t, (0, 0)):
+ for func, args in (
+ (m.mutate_data, ()),
+ (m.mutate_data_t, ()),
+ (m.mutate_at_t, (0, 0)),
+ ):
with pytest.raises(ValueError) as excinfo:
func(arr, *args)
- assert str(excinfo.value) == 'array is not writeable'
+ assert str(excinfo.value) == "array is not writeable"
def test_mutate_data(arr):
def test_bounds_check(arr):
- for func in (m.index_at, m.index_at_t, m.data, m.data_t,
- m.mutate_data, m.mutate_data_t, m.at_t, m.mutate_at_t):
+ for func in (
+ m.index_at,
+ m.index_at_t,
+ m.data,
+ m.data_t,
+ m.mutate_data,
+ m.mutate_data_t,
+ m.at_t,
+ m.mutate_at_t,
+ ):
with pytest.raises(IndexError) as excinfo:
func(arr, 2, 0)
- assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2'
+ assert str(excinfo.value) == "index 2 is out of bounds for axis 0 with size 2"
with pytest.raises(IndexError) as excinfo:
func(arr, 0, 4)
- assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3'
+ assert str(excinfo.value) == "index 4 is out of bounds for axis 1 with size 3"
def test_make_c_f_array():
def test_wrap():
def assert_references(a, b, base=None):
from distutils.version import LooseVersion
+
if base is None:
base = a
assert a is not b
- assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0]
+ assert a.__array_interface__["data"][0] == b.__array_interface__["data"][0]
assert a.shape == b.shape
assert a.strides == b.strides
assert a.flags.c_contiguous == b.flags.c_contiguous
a2 = m.wrap(a1)
assert_references(a1, a2)
- a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F')
+ a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order="F")
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
- a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C')
+ a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order="C")
a1.flags.writeable = False
a2 = m.wrap(a1)
assert_references(a1, a2)
assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32))
del ac
pytest.gc_collect()
- assert capture == """
+ assert (
+ capture
+ == """
ArrayClass()
ArrayClass::numpy_view()
ArrayClass::numpy_view()
"""
+ )
ac_view_1[0] = 4
ac_view_1[1] = 3
assert ac_view_2[0] == 4
del ac_view_2
pytest.gc_collect()
pytest.gc_collect()
- assert capture == """
+ assert (
+ capture
+ == """
~ArrayClass()
"""
+ )
-@pytest.unsupported_on_pypy
def test_cast_numpy_int64_to_uint64():
m.function_taking_uint64(123)
m.function_taking_uint64(np.uint64(123))
def test_overload_resolution(msg):
# Exact overload matches:
- assert m.overloaded(np.array([1], dtype='float64')) == 'double'
- assert m.overloaded(np.array([1], dtype='float32')) == 'float'
- assert m.overloaded(np.array([1], dtype='ushort')) == 'unsigned short'
- assert m.overloaded(np.array([1], dtype='intc')) == 'int'
- assert m.overloaded(np.array([1], dtype='longlong')) == 'long long'
- assert m.overloaded(np.array([1], dtype='complex')) == 'double complex'
- assert m.overloaded(np.array([1], dtype='csingle')) == 'float complex'
+ assert m.overloaded(np.array([1], dtype="float64")) == "double"
+ assert m.overloaded(np.array([1], dtype="float32")) == "float"
+ assert m.overloaded(np.array([1], dtype="ushort")) == "unsigned short"
+ assert m.overloaded(np.array([1], dtype="intc")) == "int"
+ assert m.overloaded(np.array([1], dtype="longlong")) == "long long"
+ assert m.overloaded(np.array([1], dtype="complex")) == "double complex"
+ assert m.overloaded(np.array([1], dtype="csingle")) == "float complex"
# No exact match, should call first convertible version:
- assert m.overloaded(np.array([1], dtype='uint8')) == 'double'
+ assert m.overloaded(np.array([1], dtype="uint8")) == "double"
with pytest.raises(TypeError) as excinfo:
m.overloaded("not an array")
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
overloaded(): incompatible function arguments. The following argument types are supported:
- 1. (arg0: numpy.ndarray[float64]) -> str
- 2. (arg0: numpy.ndarray[float32]) -> str
- 3. (arg0: numpy.ndarray[int32]) -> str
- 4. (arg0: numpy.ndarray[uint16]) -> str
- 5. (arg0: numpy.ndarray[int64]) -> str
- 6. (arg0: numpy.ndarray[complex128]) -> str
- 7. (arg0: numpy.ndarray[complex64]) -> str
+ 1. (arg0: numpy.ndarray[numpy.float64]) -> str
+ 2. (arg0: numpy.ndarray[numpy.float32]) -> str
+ 3. (arg0: numpy.ndarray[numpy.int32]) -> str
+ 4. (arg0: numpy.ndarray[numpy.uint16]) -> str
+ 5. (arg0: numpy.ndarray[numpy.int64]) -> str
+ 6. (arg0: numpy.ndarray[numpy.complex128]) -> str
+ 7. (arg0: numpy.ndarray[numpy.complex64]) -> str
Invoked with: 'not an array'
"""
+ )
- assert m.overloaded2(np.array([1], dtype='float64')) == 'double'
- assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
- assert m.overloaded2(np.array([1], dtype='complex64')) == 'float complex'
- assert m.overloaded2(np.array([1], dtype='complex128')) == 'double complex'
- assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
+ assert m.overloaded2(np.array([1], dtype="float64")) == "double"
+ assert m.overloaded2(np.array([1], dtype="float32")) == "float"
+ assert m.overloaded2(np.array([1], dtype="complex64")) == "float complex"
+ assert m.overloaded2(np.array([1], dtype="complex128")) == "double complex"
+ assert m.overloaded2(np.array([1], dtype="float32")) == "float"
- assert m.overloaded3(np.array([1], dtype='float64')) == 'double'
- assert m.overloaded3(np.array([1], dtype='intc')) == 'int'
+ assert m.overloaded3(np.array([1], dtype="float64")) == "double"
+ assert m.overloaded3(np.array([1], dtype="intc")) == "int"
expected_exc = """
overloaded3(): incompatible function arguments. The following argument types are supported:
- 1. (arg0: numpy.ndarray[int32]) -> str
- 2. (arg0: numpy.ndarray[float64]) -> str
+ 1. (arg0: numpy.ndarray[numpy.int32]) -> str
+ 2. (arg0: numpy.ndarray[numpy.float64]) -> str
Invoked with: """
with pytest.raises(TypeError) as excinfo:
- m.overloaded3(np.array([1], dtype='uintc'))
- assert msg(excinfo.value) == expected_exc + repr(np.array([1], dtype='uint32'))
+ m.overloaded3(np.array([1], dtype="uintc"))
+ assert msg(excinfo.value) == expected_exc + repr(np.array([1], dtype="uint32"))
with pytest.raises(TypeError) as excinfo:
- m.overloaded3(np.array([1], dtype='float32'))
- assert msg(excinfo.value) == expected_exc + repr(np.array([1.], dtype='float32'))
+ m.overloaded3(np.array([1], dtype="float32"))
+ assert msg(excinfo.value) == expected_exc + repr(np.array([1.0], dtype="float32"))
with pytest.raises(TypeError) as excinfo:
- m.overloaded3(np.array([1], dtype='complex'))
- assert msg(excinfo.value) == expected_exc + repr(np.array([1. + 0.j]))
+ m.overloaded3(np.array([1], dtype="complex"))
+ assert msg(excinfo.value) == expected_exc + repr(np.array([1.0 + 0.0j]))
# Exact matches:
- assert m.overloaded4(np.array([1], dtype='double')) == 'double'
- assert m.overloaded4(np.array([1], dtype='longlong')) == 'long long'
+ assert m.overloaded4(np.array([1], dtype="double")) == "double"
+ assert m.overloaded4(np.array([1], dtype="longlong")) == "long long"
# Non-exact matches requiring conversion. Since float to integer isn't a
# save conversion, it should go to the double overload, but short can go to
# either (and so should end up on the first-registered, the long long).
- assert m.overloaded4(np.array([1], dtype='float32')) == 'double'
- assert m.overloaded4(np.array([1], dtype='short')) == 'long long'
+ assert m.overloaded4(np.array([1], dtype="float32")) == "double"
+ assert m.overloaded4(np.array([1], dtype="short")) == "long long"
- assert m.overloaded5(np.array([1], dtype='double')) == 'double'
- assert m.overloaded5(np.array([1], dtype='uintc')) == 'unsigned int'
- assert m.overloaded5(np.array([1], dtype='float32')) == 'unsigned int'
+ assert m.overloaded5(np.array([1], dtype="double")) == "double"
+ assert m.overloaded5(np.array([1], dtype="uintc")) == "unsigned int"
+ assert m.overloaded5(np.array([1], dtype="float32")) == "unsigned int"
def test_greedy_string_overload():
"""Tests fix for #685 - ndarray shouldn't go to std::string overload"""
assert m.issue685("abc") == "string"
- assert m.issue685(np.array([97, 98, 99], dtype='b')) == "array"
+ assert m.issue685(np.array([97, 98, 99], dtype="b")) == "array"
assert m.issue685(123) == "other"
def test_array_unchecked_fixed_dims(msg):
- z1 = np.array([[1, 2], [3, 4]], dtype='float64')
+ z1 = np.array([[1, 2], [3, 4]], dtype="float64")
m.proxy_add2(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
with pytest.raises(ValueError) as excinfo:
- m.proxy_add2(np.array([1., 2, 3]), 5.0)
- assert msg(excinfo.value) == "array has incorrect number of dimensions: 1; expected 2"
+ m.proxy_add2(np.array([1.0, 2, 3]), 5.0)
+ assert (
+ msg(excinfo.value) == "array has incorrect number of dimensions: 1; expected 2"
+ )
- expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
+ expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype="int")
assert np.all(m.proxy_init3(3.0) == expect_c)
expect_f = np.transpose(expect_c)
assert np.all(m.proxy_init3F(3.0) == expect_f)
assert m.proxy_auxiliaries2(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2(z1) == m.array_auxiliaries2(z1)
+ assert m.proxy_auxiliaries1_const_ref(z1[0, :])
+ assert m.proxy_auxiliaries2_const_ref(z1)
+
def test_array_unchecked_dyn_dims(msg):
- z1 = np.array([[1, 2], [3, 4]], dtype='float64')
+ z1 = np.array([[1, 2], [3, 4]], dtype="float64")
m.proxy_add2_dyn(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
- expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
+ expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype="int")
assert np.all(m.proxy_init3_dyn(3.0) == expect_c)
assert m.proxy_auxiliaries2_dyn(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
def test_array_failure():
with pytest.raises(ValueError) as excinfo:
m.array_fail_test()
- assert str(excinfo.value) == 'cannot create a pybind11::array from a nullptr'
+ assert str(excinfo.value) == "cannot create a pybind11::array from a nullptr"
with pytest.raises(ValueError) as excinfo:
m.array_t_fail_test()
- assert str(excinfo.value) == 'cannot create a pybind11::array_t from a nullptr'
+ assert str(excinfo.value) == "cannot create a pybind11::array_t from a nullptr"
with pytest.raises(ValueError) as excinfo:
m.array_fail_test_negative_size()
- assert str(excinfo.value) == 'negative dimensions are not allowed'
+ assert str(excinfo.value) == "negative dimensions are not allowed"
def test_initializer_list():
def test_array_resize(msg):
- a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float64')
+ a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype="float64")
m.array_reshape2(a)
- assert(a.size == 9)
- assert(np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
+ assert a.size == 9
+ assert np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# total size change should succced with refcheck off
m.array_resize3(a, 4, False)
- assert(a.size == 64)
+ assert a.size == 64
# ... and fail with refcheck on
try:
m.array_resize3(a, 3, True)
except ValueError as e:
- assert(str(e).startswith("cannot resize an array"))
+ assert str(e).startswith("cannot resize an array")
# transposed array doesn't own data
b = a.transpose()
try:
m.array_resize3(b, 3, False)
except ValueError as e:
- assert(str(e).startswith("cannot resize this array: it does not own its data"))
+ assert str(e).startswith("cannot resize this array: it does not own its data")
# ... but reshape should be fine
m.array_reshape2(b)
- assert(b.shape == (8, 8))
+ assert b.shape == (8, 8)
-@pytest.unsupported_on_pypy
+@pytest.mark.xfail("env.PYPY")
def test_array_create_and_resize(msg):
a = m.create_and_resize(2)
- assert(a.size == 4)
- assert(np.all(a == 42.))
+ assert a.size == 4
+ assert np.all(a == 42.0)
-@pytest.unsupported_on_py2
def test_index_using_ellipsis():
a = m.index_using_ellipsis(np.zeros((5, 6, 7)))
assert a.shape == (6,)
-@pytest.unsupported_on_pypy
+@pytest.mark.parametrize("forcecast", [False, True])
+@pytest.mark.parametrize("contiguity", [None, "C", "F"])
+@pytest.mark.parametrize("noconvert", [False, True])
+@pytest.mark.filterwarnings(
+ "ignore:Casting complex values to real discards the imaginary part:numpy.ComplexWarning"
+)
+def test_argument_conversions(forcecast, contiguity, noconvert):
+ function_name = "accept_double"
+ if contiguity == "C":
+ function_name += "_c_style"
+ elif contiguity == "F":
+ function_name += "_f_style"
+ if forcecast:
+ function_name += "_forcecast"
+ if noconvert:
+ function_name += "_noconvert"
+ function = getattr(m, function_name)
+
+ for dtype in [np.dtype("float32"), np.dtype("float64"), np.dtype("complex128")]:
+ for order in ["C", "F"]:
+ for shape in [(2, 2), (1, 3, 1, 1), (1, 1, 1), (0,)]:
+ if not noconvert:
+ # If noconvert is not passed, only complex128 needs to be truncated and
+ # "cannot be safely obtained". So without `forcecast`, the argument shouldn't
+ # be accepted.
+ should_raise = dtype.name == "complex128" and not forcecast
+ else:
+ # If noconvert is passed, only float64 and the matching order is accepted.
+ # If at most one dimension has a size greater than 1, the array is also
+ # trivially contiguous.
+ trivially_contiguous = sum(1 for d in shape if d > 1) <= 1
+ should_raise = dtype.name != "float64" or (
+ contiguity is not None
+ and contiguity != order
+ and not trivially_contiguous
+ )
+
+ array = np.zeros(shape, dtype=dtype, order=order)
+ if not should_raise:
+ function(array)
+ else:
+ with pytest.raises(
+ TypeError, match="incompatible function arguments"
+ ):
+ function(array)
+
+
+@pytest.mark.xfail("env.PYPY")
def test_dtype_refcount_leak():
from sys import getrefcount
+
dtype = np.dtype(np.float_)
a = np.array([1], dtype=dtype)
before = getrefcount(dtype)
const auto req = arr.request();
const auto ptr = static_cast<S*>(req.ptr);
auto l = py::list();
- for (ssize_t i = 0; i < req.size; i++) {
+ for (py::ssize_t i = 0; i < req.size; i++) {
std::stringstream ss;
ss << ptr[i];
l.append(py::str(ss.str()));
using arr_t = py::array_t<int32_t, 0>;
std::vector<int32_t> data { 1, 2, 3, 4, 5, 6 };
- std::vector<ssize_t> shape { 3, 2 };
- std::vector<ssize_t> strides { 8, 4 };
+ std::vector<py::ssize_t> shape { 3, 2 };
+ std::vector<py::ssize_t> strides { 8, 4 };
auto ptr = data.data();
auto vptr = (void *) ptr;
struct B {};
TEST_SUBMODULE(numpy_dtypes, m) {
- try { py::module::import("numpy"); }
+ try { py::module_::import("numpy"); }
catch (...) { return; }
// typeinfo may be registered before the dtype descriptor for scalar casts to work...
- py::class_<SimpleStruct>(m, "SimpleStruct");
+ py::class_<SimpleStruct>(m, "SimpleStruct")
+ // Explicit construct to ensure zero-valued initialization.
+ .def(py::init([]() { return SimpleStruct(); }))
+ .def_readwrite("bool_", &SimpleStruct::bool_)
+ .def_readwrite("uint_", &SimpleStruct::uint_)
+ .def_readwrite("float_", &SimpleStruct::float_)
+ .def_readwrite("ldbl_", &SimpleStruct::ldbl_)
+ .def("astuple", [](const SimpleStruct& self) {
+ return py::make_tuple(self.bool_, self.uint_, self.float_, self.ldbl_);
+ })
+ .def_static("fromtuple", [](const py::tuple tup) {
+ if (py::len(tup) != 4) {
+ throw py::cast_error("Invalid size");
+ }
+ return SimpleStruct{
+ tup[0].cast<bool>(),
+ tup[1].cast<uint32_t>(),
+ tup[2].cast<float>(),
+ tup[3].cast<long double>()};
+ });
PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_);
PYBIND11_NUMPY_DTYPE(SimpleStructReordered, bool_, uint_, float_, ldbl_);
if (non_empty) {
auto req = arr.request();
auto ptr = static_cast<StringStruct*>(req.ptr);
- for (ssize_t i = 0; i < req.size * req.itemsize; i++)
+ for (py::ssize_t i = 0; i < req.size * req.itemsize; i++)
static_cast<char*>(req.ptr)[i] = 0;
ptr[1].a[0] = 'a'; ptr[1].b[0] = 'a';
ptr[2].a[0] = 'a'; ptr[2].b[0] = 'a';
m.def("buffer_to_dtype", [](py::buffer& buf) { return py::dtype(buf.request()); });
// test_scalar_conversion
- m.def("f_simple", [](SimpleStruct s) { return s.uint_ * 10; });
+ auto f_simple = [](SimpleStruct s) { return s.uint_ * 10; };
+ m.def("f_simple", f_simple);
m.def("f_packed", [](PackedStruct s) { return s.uint_ * 10; });
m.def("f_nested", [](NestedStruct s) { return s.a.uint_ * 10; });
+ // test_vectorize
+ m.def("f_simple_vectorized", py::vectorize(f_simple));
+ auto f_simple_pass_thru = [](SimpleStruct s) { return s; };
+ m.def("f_simple_pass_thru_vectorized", py::vectorize(f_simple_pass_thru));
+
// test_register_dtype
m.def("register_dtype", []() { PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); });
+# -*- coding: utf-8 -*-
import re
+
import pytest
-from pybind11_tests import numpy_dtypes as m
-pytestmark = pytest.requires_numpy
+import env # noqa: F401
-with pytest.suppress(ImportError):
- import numpy as np
+from pybind11_tests import numpy_dtypes as m
+np = pytest.importorskip("numpy")
-@pytest.fixture(scope='module')
+
+@pytest.fixture(scope="module")
def simple_dtype():
- ld = np.dtype('longdouble')
- return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'],
- 'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)],
- 'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]})
+ ld = np.dtype("longdouble")
+ return np.dtype(
+ {
+ "names": ["bool_", "uint_", "float_", "ldbl_"],
+ "formats": ["?", "u4", "f4", "f{}".format(ld.itemsize)],
+ "offsets": [0, 4, 8, (16 if ld.alignment > 4 else 12)],
+ }
+ )
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def packed_dtype():
- return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')])
+ return np.dtype([("bool_", "?"), ("uint_", "u4"), ("float_", "f4"), ("ldbl_", "g")])
def dt_fmt():
from sys import byteorder
- e = '<' if byteorder == 'little' else '>'
- return ("{{'names':['bool_','uint_','float_','ldbl_'],"
- " 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
- " 'offsets':[0,4,8,{}], 'itemsize':{}}}")
+
+ e = "<" if byteorder == "little" else ">"
+ return (
+ "{{'names':['bool_','uint_','float_','ldbl_'],"
+ " 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
+ " 'offsets':[0,4,8,{}], 'itemsize':{}}}"
+ )
def simple_dtype_fmt():
- ld = np.dtype('longdouble')
+ ld = np.dtype("longdouble")
simple_ld_off = 12 + 4 * (ld.alignment > 4)
return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)
def packed_dtype_fmt():
from sys import byteorder
+
return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(
- np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>')
+ np.dtype("longdouble").itemsize, e="<" if byteorder == "little" else ">"
+ )
def partial_ld_offset():
- return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * (
- np.dtype('longdouble').alignment > 8)
+ return (
+ 12
+ + 4 * (np.dtype("uint64").alignment > 4)
+ + 8
+ + 8 * (np.dtype("longdouble").alignment > 8)
+ )
def partial_dtype_fmt():
- ld = np.dtype('longdouble')
+ ld = np.dtype("longdouble")
partial_ld_off = partial_ld_offset()
return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)
def partial_nested_fmt():
- ld = np.dtype('longdouble')
+ ld = np.dtype("longdouble")
partial_nested_off = 8 + 8 * (ld.alignment > 8)
partial_ld_off = partial_ld_offset()
partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize
return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format(
- partial_dtype_fmt(), partial_nested_off, partial_nested_size)
+ partial_dtype_fmt(), partial_nested_off, partial_nested_size
+ )
def assert_equal(actual, expected_data, expected_dtype):
def test_format_descriptors():
with pytest.raises(RuntimeError) as excinfo:
m.get_format_unbound()
- assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))
+ assert re.match(
+ "^NumPy type info missing for .*UnboundStruct.*$", str(excinfo.value)
+ )
- ld = np.dtype('longdouble')
- ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char
+ ld = np.dtype("longdouble")
+ ldbl_fmt = ("4x" if ld.alignment > 4 else "") + ld.char
ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}"
- dbl = np.dtype('double')
- partial_fmt = ("^T{?:bool_:3xI:uint_:f:float_:" +
- str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) +
- "xg:ldbl_:}")
+ dbl = np.dtype("double")
+ partial_fmt = (
+ "^T{?:bool_:3xI:uint_:f:float_:"
+ + str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8))
+ + "xg:ldbl_:}"
+ )
nested_extra = str(max(8, ld.alignment))
assert m.print_format_descriptors() == [
ss_fmt,
"^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}",
"^T{3s:a:3s:b:}",
"^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}",
- '^T{q:e1:B:e2:}',
- '^T{Zf:cflt:Zd:cdbl:}'
+ "^T{q:e1:B:e2:}",
+ "^T{Zf:cflt:Zd:cdbl:}",
]
def test_dtype(simple_dtype):
from sys import byteorder
- e = '<' if byteorder == 'little' else '>'
+
+ e = "<" if byteorder == "little" else ">"
assert m.print_dtypes() == [
simple_dtype_fmt(),
partial_dtype_fmt(),
partial_nested_fmt(),
"[('a', 'S3'), ('b', 'S3')]",
- ("{{'names':['a','b','c','d'], " +
- "'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('" + e + "f4', (4, 2))], " +
- "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e),
+ (
+ "{{'names':['a','b','c','d'], "
+ + "'formats':[('S4', (3,)),('"
+ + e
+ + "i4', (2,)),('u1', (3,)),('"
+ + e
+ + "f4', (4, 2))], "
+ + "'offsets':[0,12,20,24], 'itemsize':56}}"
+ ).format(e=e),
"[('e1', '" + e + "i8'), ('e2', 'u1')]",
"[('x', 'i1'), ('y', '" + e + "u8')]",
- "[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]"
+ "[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]",
]
- d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'],
- 'offsets': [1, 10], 'itemsize': 20})
- d2 = np.dtype([('a', 'i4'), ('b', 'f4')])
- assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'),
- np.dtype('bool'), d1, d1, np.dtype('uint32'), d2]
+ d1 = np.dtype(
+ {
+ "names": ["a", "b"],
+ "formats": ["int32", "float64"],
+ "offsets": [1, 10],
+ "itemsize": 20,
+ }
+ )
+ d2 = np.dtype([("a", "i4"), ("b", "f4")])
+ assert m.test_dtype_ctors() == [
+ np.dtype("int32"),
+ np.dtype("float64"),
+ np.dtype("bool"),
+ d1,
+ d1,
+ np.dtype("uint32"),
+ d2,
+ ]
- assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True,
- np.dtype('int32').itemsize, simple_dtype.itemsize]
+ assert m.test_dtype_methods() == [
+ np.dtype("int32"),
+ simple_dtype,
+ False,
+ True,
+ np.dtype("int32").itemsize,
+ simple_dtype.itemsize,
+ ]
- assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype()))
+ assert m.trailing_padding_dtype() == m.buffer_to_dtype(
+ np.zeros(1, m.trailing_padding_dtype())
+ )
def test_recarray(simple_dtype, packed_dtype):
elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]
- for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:
+ for func, dtype in [
+ (m.create_rec_simple, simple_dtype),
+ (m.create_rec_packed, packed_dtype),
+ ]:
arr = func(0)
assert arr.dtype == dtype
assert_equal(arr, [], simple_dtype)
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
+ # Show what recarray's look like in NumPy.
+ assert type(arr[0]) == np.void
+ assert type(arr[0].item()) == tuple
+
if dtype == simple_dtype:
assert m.print_rec_simple(arr) == [
"s:0,0,0,-0",
"s:1,1,1.5,-2.5",
- "s:0,2,3,-5"
+ "s:0,2,3,-5",
]
else:
assert m.print_rec_packed(arr) == [
"p:0,0,0,-0",
"p:1,1,1.5,-2.5",
- "p:0,2,3,-5"
+ "p:0,2,3,-5",
]
- nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])
+ nested_dtype = np.dtype([("a", simple_dtype), ("b", packed_dtype)])
arr = m.create_rec_nested(0)
assert arr.dtype == nested_dtype
arr = m.create_rec_nested(3)
assert arr.dtype == nested_dtype
- assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
- ((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
- ((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype)
+ assert_equal(
+ arr,
+ [
+ ((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
+ ((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
+ ((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5)),
+ ],
+ nested_dtype,
+ )
assert m.print_rec_nested(arr) == [
"n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5",
"n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5",
- "n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5"
+ "n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5",
]
arr = m.create_rec_partial(3)
assert str(arr.dtype) == partial_dtype_fmt()
partial_dtype = arr.dtype
- assert '' not in arr.dtype.fields
+ assert "" not in arr.dtype.fields
assert partial_dtype.itemsize > simple_dtype.itemsize
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
arr = m.create_rec_partial_nested(3)
assert str(arr.dtype) == partial_nested_fmt()
- assert '' not in arr.dtype.fields
- assert '' not in arr.dtype.fields['a'][0].fields
+ assert "" not in arr.dtype.fields
+ assert "" not in arr.dtype.fields["a"][0].fields
assert arr.dtype.itemsize > partial_dtype.itemsize
- np.testing.assert_equal(arr['a'], m.create_rec_partial(3))
+ np.testing.assert_equal(arr["a"], m.create_rec_partial(3))
def test_array_constructors():
- data = np.arange(1, 7, dtype='int32')
+ data = np.arange(1, 7, dtype="int32")
for i in range(8):
np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))
np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))
"a='',b=''",
"a='a',b='a'",
"a='ab',b='ab'",
- "a='abc',b='abc'"
+ "a='abc',b='abc'",
]
dtype = arr.dtype
- assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc']
- assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc']
+ assert arr["a"].tolist() == [b"", b"a", b"ab", b"abc"]
+ assert arr["b"].tolist() == [b"", b"a", b"ab", b"abc"]
arr = m.create_string_array(False)
assert dtype == arr.dtype
def test_array_array():
from sys import byteorder
- e = '<' if byteorder == 'little' else '>'
+
+ e = "<" if byteorder == "little" else ">"
arr = m.create_array_array(3)
assert str(arr.dtype) == (
- "{{'names':['a','b','c','d'], " +
- "'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], " +
- "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e)
+ "{{'names':['a','b','c','d'], "
+ + "'formats':[('S4', (3,)),('"
+ + e
+ + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], "
+ + "'offsets':[0,12,20,24], 'itemsize':56}}"
+ ).format(e=e)
assert m.print_array_array(arr) == [
- "a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1}," +
- "c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
- "a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001}," +
- "c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
- "a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001}," +
- "c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
+ "a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1},"
+ + "c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
+ "a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001},"
+ + "c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
+ "a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001},"
+ + "c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
+ ]
+ assert arr["a"].tolist() == [
+ [b"ABCD", b"KLMN", b"UVWX"],
+ [b"WXYZ", b"GHIJ", b"QRST"],
+ [b"STUV", b"CDEF", b"MNOP"],
]
- assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'],
- [b'WXYZ', b'GHIJ', b'QRST'],
- [b'STUV', b'CDEF', b'MNOP']]
- assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
+ assert arr["b"].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
assert m.create_array_array(0).dtype == arr.dtype
def test_enum_array():
from sys import byteorder
- e = '<' if byteorder == 'little' else '>'
+
+ e = "<" if byteorder == "little" else ">"
arr = m.create_enum_array(3)
dtype = arr.dtype
- assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')])
- assert m.print_enum_array(arr) == [
- "e1=A,e2=X",
- "e1=B,e2=Y",
- "e1=A,e2=X"
- ]
- assert arr['e1'].tolist() == [-1, 1, -1]
- assert arr['e2'].tolist() == [1, 2, 1]
+ assert dtype == np.dtype([("e1", e + "i8"), ("e2", "u1")])
+ assert m.print_enum_array(arr) == ["e1=A,e2=X", "e1=B,e2=Y", "e1=A,e2=X"]
+ assert arr["e1"].tolist() == [-1, 1, -1]
+ assert arr["e2"].tolist() == [1, 2, 1]
assert m.create_enum_array(0).dtype == dtype
def test_complex_array():
from sys import byteorder
- e = '<' if byteorder == 'little' else '>'
+
+ e = "<" if byteorder == "little" else ">"
arr = m.create_complex_array(3)
dtype = arr.dtype
- assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')])
+ assert dtype == np.dtype([("cflt", e + "c8"), ("cdbl", e + "c16")])
assert m.print_complex_array(arr) == [
"c:(0,0.25),(0.5,0.75)",
"c:(1,1.25),(1.5,1.75)",
- "c:(2,2.25),(2.5,2.75)"
+ "c:(2,2.25),(2.5,2.75)",
]
- assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
- assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
+ assert arr["cflt"].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
+ assert arr["cdbl"].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
assert m.create_complex_array(0).dtype == dtype
def test_signature(doc):
- assert doc(m.create_rec_nested) == \
- "create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
+ assert (
+ doc(m.create_rec_nested)
+ == "create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
+ )
def test_scalar_conversion():
n = 3
- arrays = [m.create_rec_simple(n), m.create_rec_packed(n),
- m.create_rec_nested(n), m.create_enum_array(n)]
+ arrays = [
+ m.create_rec_simple(n),
+ m.create_rec_packed(n),
+ m.create_rec_nested(n),
+ m.create_enum_array(n),
+ ]
funcs = [m.f_simple, m.f_packed, m.f_nested]
for i, func in enumerate(funcs):
else:
with pytest.raises(TypeError) as excinfo:
func(arr[0])
- assert 'incompatible function arguments' in str(excinfo.value)
+ assert "incompatible function arguments" in str(excinfo.value)
+
+
+def test_vectorize():
+ n = 3
+ array = m.create_rec_simple(n)
+ values = m.f_simple_vectorized(array)
+ np.testing.assert_array_equal(values, [0, 10, 20])
+ array_2 = m.f_simple_pass_thru_vectorized(array)
+ np.testing.assert_array_equal(array, array_2)
+
+
+def test_cls_and_dtype_conversion(simple_dtype):
+ s = m.SimpleStruct()
+ assert s.astuple() == (False, 0, 0.0, 0.0)
+ assert m.SimpleStruct.fromtuple(s.astuple()).astuple() == s.astuple()
+
+ s.uint_ = 2
+ assert m.f_simple(s) == 20
+
+ # Try as recarray of shape==(1,).
+ s_recarray = np.array([(False, 2, 0.0, 0.0)], dtype=simple_dtype)
+ # Show that this will work for vectorized case.
+ np.testing.assert_array_equal(m.f_simple_vectorized(s_recarray), [20])
+
+ # Show as a scalar that inherits from np.generic.
+ s_scalar = s_recarray[0]
+ assert isinstance(s_scalar, np.void)
+ assert m.f_simple(s_scalar) == 20
+
+ # Show that an *array* scalar (np.ndarray.shape == ()) does not convert.
+ # More specifically, conversion to SimpleStruct is not implicit.
+ s_recarray_scalar = s_recarray.reshape(())
+ assert isinstance(s_recarray_scalar, np.ndarray)
+ assert s_recarray_scalar.dtype == simple_dtype
+ with pytest.raises(TypeError) as excinfo:
+ m.f_simple(s_recarray_scalar)
+ assert "incompatible function arguments" in str(excinfo.value)
+ # Explicitly convert to m.SimpleStruct.
+ assert m.f_simple(m.SimpleStruct.fromtuple(s_recarray_scalar.item())) == 20
+
+ # Show that an array of dtype=object does *not* convert.
+ s_array_object = np.array([s])
+ assert s_array_object.dtype == object
+ with pytest.raises(TypeError) as excinfo:
+ m.f_simple_vectorized(s_array_object)
+ assert "incompatible function arguments" in str(excinfo.value)
+ # Explicitly convert to `np.array(..., dtype=simple_dtype)`
+ s_array = np.array([s.astuple()], dtype=simple_dtype)
+ np.testing.assert_array_equal(m.f_simple_vectorized(s_array), [20])
def test_register_dtype():
with pytest.raises(RuntimeError) as excinfo:
m.register_dtype()
- assert 'dtype is already registered' in str(excinfo.value)
+ assert "dtype is already registered" in str(excinfo.value)
-@pytest.unsupported_on_pypy
+@pytest.mark.xfail("env.PYPY")
def test_str_leak():
from sys import getrefcount
+
fmt = "f4"
pytest.gc_collect()
start = getrefcount(fmt)
}
TEST_SUBMODULE(numpy_vectorize, m) {
- try { py::module::import("numpy"); }
+ try { py::module_::import("numpy"); }
catch (...) { return; }
// test_vectorize, test_docs, test_array_collapse
));
// test_type_selection
- // Numpy function which only accepts specific data types
+ // NumPy function which only accepts specific data types
m.def("selective_func", [](py::array_t<int, py::array::c_style>) { return "Int branch taken."; });
m.def("selective_func", [](py::array_t<float, py::array::c_style>) { return "Float branch taken."; });
m.def("selective_func", [](py::array_t<std::complex<float>, py::array::c_style>) { return "Complex float branch taken."; });
NonPODClass(int v) : value{v} {}
int value;
};
- py::class_<NonPODClass>(m, "NonPODClass").def(py::init<int>());
+ py::class_<NonPODClass>(m, "NonPODClass")
+ .def(py::init<int>())
+ .def_readwrite("value", &NonPODClass::value);
m.def("vec_passthrough", py::vectorize(
[](double *a, double b, py::array_t<double> c, const int &d, int &e, NonPODClass f, const double g) {
return *a + b + c.at(0) + d + e + f.value + g;
py::array_t<float, py::array::forcecast> arg2,
py::array_t<double, py::array::forcecast> arg3
) {
- ssize_t ndim;
- std::vector<ssize_t> shape;
+ py::ssize_t ndim;
+ std::vector<py::ssize_t> shape;
std::array<py::buffer_info, 3> buffers {{ arg1.request(), arg2.request(), arg3.request() }};
return py::detail::broadcast(buffers, ndim, shape);
});
+
+ m.def("add_to", py::vectorize([](NonPODClass& x, int a) { x.value += a; }));
}
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import numpy_vectorize as m
-pytestmark = pytest.requires_numpy
-
-with pytest.suppress(ImportError):
- import numpy as np
+np = pytest.importorskip("numpy")
def test_vectorize(capture):
assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
with capture:
assert np.allclose(f(np.array([1, 3]), np.array([2, 4]), 3), [6, 36])
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=3)
my_func(x:int=3, y:float=4, z:float=3)
"""
+ )
with capture:
- a = np.array([[1, 2], [3, 4]], order='F')
- b = np.array([[10, 20], [30, 40]], order='F')
+ a = np.array([[1, 2], [3, 4]], order="F")
+ b = np.array([[10, 20], [30, 40]], order="F")
c = 3
result = f(a, b, c)
assert np.allclose(result, a * b * c)
assert result.flags.f_contiguous
# All inputs are F order and full or singletons, so we the result is in col-major order:
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=10, z:float=3)
my_func(x:int=3, y:float=30, z:float=3)
my_func(x:int=2, y:float=20, z:float=3)
my_func(x:int=4, y:float=40, z:float=3)
"""
+ )
with capture:
- a, b, c = np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3
+ a, b, c = (
+ np.array([[1, 3, 5], [7, 9, 11]]),
+ np.array([[2, 4, 6], [8, 10, 12]]),
+ 3,
+ )
assert np.allclose(f(a, b, c), a * b * c)
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=3)
my_func(x:int=3, y:float=4, z:float=3)
my_func(x:int=5, y:float=6, z:float=3)
my_func(x:int=9, y:float=10, z:float=3)
my_func(x:int=11, y:float=12, z:float=3)
"""
+ )
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2
assert np.allclose(f(a, b, c), a * b * c)
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=3, z:float=2)
my_func(x:int=3, y:float=4, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=4, z:float=2)
"""
+ )
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
+ )
with capture:
- a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F'), np.array([[2], [3]]), 2
+ a, b, c = (
+ np.array([[1, 2, 3], [4, 5, 6]], order="F"),
+ np.array([[2], [3]]),
+ 2,
+ )
assert np.allclose(f(a, b, c), a * b * c)
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=2, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=5, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
+ )
with capture:
a, b, c = np.array([[1, 2, 3], [4, 5, 6]])[::, ::2], np.array([[2], [3]]), 2
assert np.allclose(f(a, b, c), a * b * c)
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
+ )
with capture:
- a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F')[::, ::2], np.array([[2], [3]]), 2
+ a, b, c = (
+ np.array([[1, 2, 3], [4, 5, 6]], order="F")[::, ::2],
+ np.array([[2], [3]]),
+ 2,
+ )
assert np.allclose(f(a, b, c), a * b * c)
- assert capture == """
+ assert (
+ capture
+ == """
my_func(x:int=1, y:float=2, z:float=2)
my_func(x:int=3, y:float=2, z:float=2)
my_func(x:int=4, y:float=3, z:float=2)
my_func(x:int=6, y:float=3, z:float=2)
"""
+ )
def test_type_selection():
assert m.selective_func(np.array([1], dtype=np.int32)) == "Int branch taken."
assert m.selective_func(np.array([1.0], dtype=np.float32)) == "Float branch taken."
- assert m.selective_func(np.array([1.0j], dtype=np.complex64)) == "Complex float branch taken."
+ assert (
+ m.selective_func(np.array([1.0j], dtype=np.complex64))
+ == "Complex float branch taken."
+ )
def test_docs(doc):
- assert doc(m.vectorized_func) == """
- vectorized_func(arg0: numpy.ndarray[int32], arg1: numpy.ndarray[float32], arg2: numpy.ndarray[float64]) -> object
+ assert (
+ doc(m.vectorized_func)
+ == """
+ vectorized_func(arg0: numpy.ndarray[numpy.int32], arg1: numpy.ndarray[numpy.float32], arg2: numpy.ndarray[numpy.float64]) -> object
""" # noqa: E501 line too long
+ )
def test_trivial_broadcasting():
assert vectorized_is_trivial(1, 2, 3) == trivial.c_trivial
assert vectorized_is_trivial(np.array(1), np.array(2), 3) == trivial.c_trivial
- assert vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3) == trivial.c_trivial
+ assert (
+ vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3)
+ == trivial.c_trivial
+ )
assert trivial.c_trivial == vectorized_is_trivial(
- np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3)
- assert vectorized_is_trivial(
- np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2) == trivial.non_trivial
- assert vectorized_is_trivial(
- np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2) == trivial.non_trivial
- z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype='int32')
- z2 = np.array(z1, dtype='float32')
- z3 = np.array(z1, dtype='float64')
+ np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3
+ )
+ assert (
+ vectorized_is_trivial(np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2)
+ == trivial.non_trivial
+ )
+ assert (
+ vectorized_is_trivial(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2)
+ == trivial.non_trivial
+ )
+ z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype="int32")
+ z2 = np.array(z1, dtype="float32")
+ z3 = np.array(z1, dtype="float64")
assert vectorized_is_trivial(z1, z2, z3) == trivial.c_trivial
assert vectorized_is_trivial(1, z2, z3) == trivial.c_trivial
assert vectorized_is_trivial(z1, 1, z3) == trivial.c_trivial
assert vectorized_is_trivial(1, 1, z3[::2, ::2]) == trivial.non_trivial
assert vectorized_is_trivial(z1, 1, z3[1::4, 1::4]) == trivial.c_trivial
- y1 = np.array(z1, order='F')
+ y1 = np.array(z1, order="F")
y2 = np.array(y1)
y3 = np.array(y1)
assert vectorized_is_trivial(y1, y2, y3) == trivial.f_trivial
def test_passthrough_arguments(doc):
assert doc(m.vec_passthrough) == (
- "vec_passthrough(" + ", ".join([
- "arg0: float",
- "arg1: numpy.ndarray[float64]",
- "arg2: numpy.ndarray[float64]",
- "arg3: numpy.ndarray[int32]",
- "arg4: int",
- "arg5: m.numpy_vectorize.NonPODClass",
- "arg6: numpy.ndarray[float64]"]) + ") -> object")
-
- b = np.array([[10, 20, 30]], dtype='float64')
+ "vec_passthrough("
+ + ", ".join(
+ [
+ "arg0: float",
+ "arg1: numpy.ndarray[numpy.float64]",
+ "arg2: numpy.ndarray[numpy.float64]",
+ "arg3: numpy.ndarray[numpy.int32]",
+ "arg4: int",
+ "arg5: m.numpy_vectorize.NonPODClass",
+ "arg6: numpy.ndarray[numpy.float64]",
+ ]
+ )
+ + ") -> object"
+ )
+
+ b = np.array([[10, 20, 30]], dtype="float64")
c = np.array([100, 200]) # NOT a vectorized argument
- d = np.array([[1000], [2000], [3000]], dtype='int')
- g = np.array([[1000000, 2000000, 3000000]], dtype='int') # requires casting
+ d = np.array([[1000], [2000], [3000]], dtype="int")
+ g = np.array([[1000000, 2000000, 3000000]], dtype="int") # requires casting
assert np.all(
- m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g) ==
- np.array([[1111111, 2111121, 3111131],
- [1112111, 2112121, 3112131],
- [1113111, 2113121, 3113131]]))
+ m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g)
+ == np.array(
+ [
+ [1111111, 2111121, 3111131],
+ [1112111, 2112121, 3112131],
+ [1113111, 2113121, 3113131],
+ ]
+ )
+ )
def test_method_vectorization():
o = m.VectorizeTestClass(3)
- x = np.array([1, 2], dtype='int')
- y = np.array([[10], [20]], dtype='float32')
+ x = np.array([1, 2], dtype="int")
+ y = np.array([[10], [20]], dtype="float32")
assert np.all(o.method(x, y) == [[14, 15], [24, 25]])
assert not isinstance(m.vectorized_func(np.array(1), 2, 3), np.ndarray)
z = m.vectorized_func([1], 2, 3)
assert isinstance(z, np.ndarray)
- assert z.shape == (1, )
+ assert z.shape == (1,)
z = m.vectorized_func(1, [[[2]]], 3)
assert isinstance(z, np.ndarray)
assert z.shape == (1, 1, 1)
+
+
+def test_vectorized_noreturn():
+ x = m.NonPODClass(0)
+ assert x.value == 0
+ m.add_to(x, [1, 2, 3, 4])
+ assert x.value == 10
+ m.add_to(x, 1)
+ assert x.value == 11
+ m.add_to(x, [[1, 1], [2, 3]])
+ assert x.value == 18
m.def("get_null_str_value", [](char *ptr) { return reinterpret_cast<std::intptr_t>(ptr); });
m.def("return_unique_ptr", []() -> std::unique_ptr<StringList> {
- StringList *result = new StringList();
+ auto *result = new StringList();
result->push_back("some value");
return std::unique_ptr<StringList>(result);
});
+
+ // test unions
+ py::class_<IntFloat>(m, "IntFloat")
+ .def(py::init<>())
+ .def_readwrite("i", &IntFloat::i)
+ .def_readwrite("f", &IntFloat::f);
}
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import opaque_types as m
from pybind11_tests import ConstructorStats, UserType
with pytest.raises(TypeError) as excinfo:
m.get_void_ptr_value([1, 2, 3]) # This should not work
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
get_void_ptr_value(): incompatible function arguments. The following argument types are supported:
1. (arg0: capsule) -> int
Invoked with: [1, 2, 3]
""" # noqa: E501 line too long
+ )
assert m.return_null_str() is None
assert m.get_null_str_value(m.return_null_str()) is not None
ptr = m.return_unique_ptr()
assert "StringList" in repr(ptr)
assert m.print_opaque_list(ptr) == "Opaque list: [some value]"
+
+
+def test_unions():
+ int_float_union = m.IntFloat()
+ int_float_union.i = 42
+ assert int_float_union.i == 42
+ int_float_union.f = 3.0
+ assert int_float_union.f == 3.0
friend Vector2 operator-(float f, const Vector2 &v) { return Vector2(f - v.x, f - v.y); }
friend Vector2 operator*(float f, const Vector2 &v) { return Vector2(f * v.x, f * v.y); }
friend Vector2 operator/(float f, const Vector2 &v) { return Vector2(f / v.x, f / v.y); }
+
+ bool operator==(const Vector2 &v) const {
+ return x == v.x && y == v.y;
+ }
+ bool operator!=(const Vector2 &v) const {
+ return x != v.x || y != v.y;
+ }
private:
float x, y;
};
int operator+(const C2 &, const C1 &) { return 21; }
int operator+(const C1 &, const C2 &) { return 12; }
+// Note: Specializing explicit within `namespace std { ... }` is done due to a
+// bug in GCC<7. If you are supporting compilers later than this, consider
+// specializing `using template<> struct std::hash<...>` in the global
+// namespace instead, per this recommendation:
+// https://en.cppreference.com/w/cpp/language/extending_std#Adding_template_specializations
namespace std {
template<>
struct hash<Vector2> {
// Not a good hash function, but easy to test
size_t operator()(const Vector2 &) { return 4; }
};
+} // namespace std
+
+// Not a good abs function, but easy to test.
+std::string abs(const Vector2&) {
+ return "abs(Vector2)";
}
// MSVC warns about unknown pragmas, and warnings are errors.
// Here, we suppress the warning using `#pragma diagnostic`.
// Taken from: https://github.com/RobotLocomotion/drake/commit/aaf84b46
// TODO(eric): This could be resolved using a function / functor (e.g. `py::self()`).
- #if (__APPLE__) && (__clang__)
- #if (__clang_major__ >= 10) && (__clang_minor__ >= 0) && (__clang_patchlevel__ >= 1)
+ #if defined(__APPLE__) && defined(__clang__)
+ #if (__clang_major__ >= 10)
#pragma GCC diagnostic ignored "-Wself-assign-overloaded"
#endif
- #elif (__clang__)
+ #elif defined(__clang__)
#if (__clang_major__ >= 7)
#pragma GCC diagnostic ignored "-Wself-assign-overloaded"
#endif
.def(float() / py::self)
.def(-py::self)
.def("__str__", &Vector2::toString)
- .def(hash(py::self))
+ .def("__repr__", &Vector2::toString)
+ .def(py::self == py::self)
+ .def(py::self != py::self)
+ .def(py::hash(py::self))
+ // N.B. See warning about usage of `py::detail::abs(py::self)` in
+ // `operators.h`.
+ .def("__abs__", [](const Vector2& v) { return abs(v); })
;
m.attr("Vector") = m.attr("Vector2");
.def(py::self *= int())
.def_readwrite("b", &NestC::b);
m.def("get_NestC", [](const NestC &c) { return c.value; });
+
+
+ // test_overriding_eq_reset_hash
+ // #2191 Overriding __eq__ should set __hash__ to None
+ struct Comparable {
+ int value;
+ bool operator==(const Comparable& rhs) const {return value == rhs.value;}
+ };
+
+ struct Hashable : Comparable {
+ explicit Hashable(int value): Comparable{value}{};
+ size_t hash() const { return static_cast<size_t>(value); }
+ };
+
+ struct Hashable2 : Hashable {
+ using Hashable::Hashable;
+ };
+
+ py::class_<Comparable>(m, "Comparable")
+ .def(py::init<int>())
+ .def(py::self == py::self);
+
+ py::class_<Hashable>(m, "Hashable")
+ .def(py::init<int>())
+ .def(py::self == py::self)
+ .def("__hash__", &Hashable::hash);
+
+ // define __hash__ before __eq__
+ py::class_<Hashable2>(m, "Hashable2")
+ .def("__hash__", &Hashable::hash)
+ .def(py::init<int>())
+ .def(py::self == py::self);
}
#ifndef _MSC_VER
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import operators as m
from pybind11_tests import ConstructorStats
def test_operator_overloading():
v1 = m.Vector2(1, 2)
v2 = m.Vector(3, -1)
+ v3 = m.Vector2(1, 2) # Same value as v1, but different instance.
+ assert v1 is not v3
+
assert str(v1) == "[1.000000, 2.000000]"
assert str(v2) == "[3.000000, -1.000000]"
assert str(v1 * v2) == "[3.000000, -2.000000]"
assert str(v2 / v1) == "[3.000000, -0.500000]"
+ assert v1 == v3
+ assert v1 != v2
+ assert hash(v1) == 4
+ # TODO(eric.cousineau): Make this work.
+ # assert abs(v1) == "abs(Vector2)"
+
v1 += 2 * v2
assert str(v1) == "[7.000000, 0.000000]"
v1 -= v2
v2 /= v1
assert str(v2) == "[2.000000, 8.000000]"
- assert hash(v1) == 4
-
cstats = ConstructorStats.get(m.Vector2)
- assert cstats.alive() == 2
+ assert cstats.alive() == 3
del v1
- assert cstats.alive() == 1
+ assert cstats.alive() == 2
del v2
+ assert cstats.alive() == 1
+ del v3
assert cstats.alive() == 0
- assert cstats.values() == ['[1.000000, 2.000000]', '[3.000000, -1.000000]',
- '[-3.000000, 1.000000]', '[4.000000, 1.000000]',
- '[-2.000000, 3.000000]', '[-7.000000, -6.000000]',
- '[9.000000, 10.000000]', '[8.000000, 16.000000]',
- '[0.125000, 0.250000]', '[7.000000, 6.000000]',
- '[9.000000, 10.000000]', '[8.000000, 16.000000]',
- '[8.000000, 4.000000]', '[3.000000, -2.000000]',
- '[3.000000, -0.500000]', '[6.000000, -2.000000]']
+ assert cstats.values() == [
+ "[1.000000, 2.000000]",
+ "[3.000000, -1.000000]",
+ "[1.000000, 2.000000]",
+ "[-3.000000, 1.000000]",
+ "[4.000000, 1.000000]",
+ "[-2.000000, 3.000000]",
+ "[-7.000000, -6.000000]",
+ "[9.000000, 10.000000]",
+ "[8.000000, 16.000000]",
+ "[0.125000, 0.250000]",
+ "[7.000000, 6.000000]",
+ "[9.000000, 10.000000]",
+ "[8.000000, 16.000000]",
+ "[8.000000, 4.000000]",
+ "[3.000000, -2.000000]",
+ "[3.000000, -0.500000]",
+ "[6.000000, -2.000000]",
+ ]
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 10
assert abase.value == 42
del abase, b
pytest.gc_collect()
+
+
+def test_overriding_eq_reset_hash():
+
+ assert m.Comparable(15) is not m.Comparable(15)
+ assert m.Comparable(15) == m.Comparable(15)
+
+ with pytest.raises(TypeError):
+ hash(m.Comparable(15)) # TypeError: unhashable type: 'm.Comparable'
+
+ for hashable in (m.Hashable, m.Hashable2):
+ assert hashable(15) is not hashable(15)
+ assert hashable(15) == hashable(15)
+
+ assert hash(hashable(15)) == 15
+ assert hash(hashable(15)) == hash(hashable(15))
using Pickleable::Pickleable;
};
- py::class_<Pickleable>(m, "Pickleable")
+ py::class_<Pickleable> pyPickleable(m, "Pickleable");
+ pyPickleable
.def(py::init<std::string>())
.def("value", &Pickleable::value)
.def("extra1", &Pickleable::extra1)
.def("__getstate__", [](const Pickleable &p) {
/* Return a tuple that fully encodes the state of the object */
return py::make_tuple(p.value(), p.extra1(), p.extra2());
- })
- .def("__setstate__", [](Pickleable &p, py::tuple t) {
- if (t.size() != 3)
- throw std::runtime_error("Invalid state!");
- /* Invoke the constructor (need to use in-place version) */
- new (&p) Pickleable(t[0].cast<std::string>());
-
- /* Assign any additional state */
- p.setExtra1(t[1].cast<int>());
- p.setExtra2(t[2].cast<int>());
});
+ ignoreOldStyleInitWarnings([&pyPickleable]() {
+ pyPickleable
+ .def("__setstate__", [](Pickleable &p, py::tuple t) {
+ if (t.size() != 3)
+ throw std::runtime_error("Invalid state!");
+ /* Invoke the constructor (need to use in-place version) */
+ new (&p) Pickleable(t[0].cast<std::string>());
+
+ /* Assign any additional state */
+ p.setExtra1(t[1].cast<int>());
+ p.setExtra2(t[2].cast<int>());
+ });
+ });
py::class_<PickleableNew, Pickleable>(m, "PickleableNew")
.def(py::init<std::string>())
using PickleableWithDict::PickleableWithDict;
};
- py::class_<PickleableWithDict>(m, "PickleableWithDict", py::dynamic_attr())
+ py::class_<PickleableWithDict> pyPickleableWithDict(m, "PickleableWithDict", py::dynamic_attr());
+ pyPickleableWithDict
.def(py::init<std::string>())
.def_readwrite("value", &PickleableWithDict::value)
.def_readwrite("extra", &PickleableWithDict::extra)
.def("__getstate__", [](py::object self) {
/* Also include __dict__ in state */
return py::make_tuple(self.attr("value"), self.attr("extra"), self.attr("__dict__"));
- })
- .def("__setstate__", [](py::object self, py::tuple t) {
- if (t.size() != 3)
- throw std::runtime_error("Invalid state!");
- /* Cast and construct */
- auto& p = self.cast<PickleableWithDict&>();
- new (&p) PickleableWithDict(t[0].cast<std::string>());
-
- /* Assign C++ state */
- p.extra = t[1].cast<int>();
-
- /* Assign Python state */
- self.attr("__dict__") = t[2];
});
+ ignoreOldStyleInitWarnings([&pyPickleableWithDict]() {
+ pyPickleableWithDict
+ .def("__setstate__", [](py::object self, py::tuple t) {
+ if (t.size() != 3)
+ throw std::runtime_error("Invalid state!");
+ /* Cast and construct */
+ auto& p = self.cast<PickleableWithDict&>();
+ new (&p) PickleableWithDict(t[0].cast<std::string>());
+
+ /* Assign C++ state */
+ p.extra = t[1].cast<int>();
+
+ /* Assign Python state */
+ self.attr("__dict__") = t[2];
+ });
+ });
py::class_<PickleableWithDictNew, PickleableWithDict>(m, "PickleableWithDictNew")
.def(py::init<std::string>())
+# -*- coding: utf-8 -*-
import pytest
+
+import env # noqa: F401
+
from pybind11_tests import pickling as m
try:
assert p2.extra2() == p.extra2()
-@pytest.unsupported_on_pypy
+@pytest.mark.xfail("env.PYPY")
@pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"])
def test_roundtrip_with_dict(cls_name):
cls = getattr(m, cls_name)
def test_enum_pickle():
from pybind11_tests import enums as e
+
data = pickle.dumps(e.EOne, 2)
assert e.EOne == pickle.loads(data)
TEST_SUBMODULE(pytypes, m) {
+ // test_int
+ m.def("get_int", []{return py::int_(0);});
+ // test_iterator
+ m.def("get_iterator", []{return py::iterator();});
+ // test_iterable
+ m.def("get_iterable", []{return py::iterable();});
// test_list
m.def("get_list", []() {
py::list list;
for (auto item : list)
py::print("list item {}: {}"_s.format(index++, item));
});
+ // test_none
+ m.def("get_none", []{return py::none();});
+ m.def("print_none", [](py::none none) {
+ py::print("none: {}"_s.format(none));
+ });
// test_set
m.def("get_set", []() {
m.def("str_from_bytes", []() { return py::str(py::bytes("boo", 3)); });
m.def("str_from_object", [](const py::object& obj) { return py::str(obj); });
m.def("repr_from_object", [](const py::object& obj) { return py::repr(obj); });
+ m.def("str_from_handle", [](py::handle h) { return py::str(h); });
m.def("str_format", []() {
auto s1 = "{} + {} = {}"_s.format(1, 2, 3);
});
m.def("return_capsule_with_name_and_destructor", []() {
- auto capsule = py::capsule((void *) 1234, "pointer type description", [](PyObject *ptr) {
+ auto capsule = py::capsule((void *) 12345, "pointer type description", [](PyObject *ptr) {
if (ptr) {
auto name = PyCapsule_GetName(ptr);
py::print("destructing capsule ({}, '{}')"_s.format(
));
}
});
- void *contents = capsule;
- py::print("created capsule ({}, '{}')"_s.format((size_t) contents, capsule.name()));
+
+ capsule.set_pointer((void *) 1234);
+
+ // Using get_pointer<T>()
+ void* contents1 = static_cast<void*>(capsule);
+ void* contents2 = capsule.get_pointer();
+ void* contents3 = capsule.get_pointer<void>();
+
+ auto result1 = reinterpret_cast<size_t>(contents1);
+ auto result2 = reinterpret_cast<size_t>(contents2);
+ auto result3 = reinterpret_cast<size_t>(contents3);
+
+ py::print("created capsule ({}, '{}')"_s.format(result1 & result2 & result3, capsule.name()));
return capsule;
});
d["basic_attr"] = o.attr("basic_attr");
auto l = py::list();
- for (const auto &item : o.attr("begin_end")) {
+ for (auto item : o.attr("begin_end")) {
l.append(item);
}
d["begin_end"] = l;
// test_constructors
m.def("default_constructors", []() {
return py::dict(
+ "bytes"_a=py::bytes(),
"str"_a=py::str(),
"bool"_a=py::bool_(),
"int"_a=py::int_(),
m.def("converting_constructors", [](py::dict d) {
return py::dict(
+ "bytes"_a=py::bytes(d["bytes"]),
"str"_a=py::str(d["str"]),
"bool"_a=py::bool_(d["bool"]),
"int"_a=py::int_(d["int"]),
m.def("cast_functions", [](py::dict d) {
// When converting between Python types, obj.cast<T>() should be the same as T(obj)
return py::dict(
+ "bytes"_a=d["bytes"].cast<py::bytes>(),
"str"_a=d["str"].cast<py::str>(),
"bool"_a=d["bool"].cast<py::bool_>(),
"int"_a=d["int"].cast<py::int_>(),
);
});
+ m.def("convert_to_pybind11_str", [](py::object o) { return py::str(o); });
+
+ m.def("nonconverting_constructor", [](std::string type, py::object value, bool move) -> py::object {
+ if (type == "bytes") {
+ return move ? py::bytes(std::move(value)) : py::bytes(value);
+ }
+ else if (type == "none") {
+ return move ? py::none(std::move(value)) : py::none(value);
+ }
+ else if (type == "ellipsis") {
+ return move ? py::ellipsis(std::move(value)) : py::ellipsis(value);
+ }
+ else if (type == "type") {
+ return move ? py::type(std::move(value)) : py::type(value);
+ }
+ throw std::runtime_error("Invalid type");
+ });
+
m.def("get_implicit_casting", []() {
py::dict d;
d["char*_i1"] = "abc";
py::print("no new line here", "end"_a=" -- ");
py::print("next print");
- auto py_stderr = py::module::import("sys").attr("stderr");
+ auto py_stderr = py::module_::import("sys").attr("stderr");
py::print("this goes to stderr", "file"_a=py_stderr);
py::print("flush", "flush"_a=true);
m.def("test_list_slicing", [](py::list a) {
return a[py::slice(0, -1, 2)];
});
+
+ // See #2361
+ m.def("issue2361_str_implicit_copy_none", []() {
+ py::str is_this_none = py::none();
+ return is_this_none;
+ });
+ m.def("issue2361_dict_implicit_copy_none", []() {
+ py::dict is_this_none = py::none();
+ return is_this_none;
+ });
+
+ m.def("test_memoryview_object", [](py::buffer b) {
+ return py::memoryview(b);
+ });
+
+ m.def("test_memoryview_buffer_info", [](py::buffer b) {
+ return py::memoryview(b.request());
+ });
+
+ m.def("test_memoryview_from_buffer", [](bool is_unsigned) {
+ static const int16_t si16[] = { 3, 1, 4, 1, 5 };
+ static const uint16_t ui16[] = { 2, 7, 1, 8 };
+ if (is_unsigned)
+ return py::memoryview::from_buffer(
+ ui16, { 4 }, { sizeof(uint16_t) });
+ else
+ return py::memoryview::from_buffer(
+ si16, { 5 }, { sizeof(int16_t) });
+ });
+
+ m.def("test_memoryview_from_buffer_nativeformat", []() {
+ static const char* format = "@i";
+ static const int32_t arr[] = { 4, 7, 5 };
+ return py::memoryview::from_buffer(
+ arr, sizeof(int32_t), format, { 3 }, { sizeof(int32_t) });
+ });
+
+ m.def("test_memoryview_from_buffer_empty_shape", []() {
+ static const char* buf = "";
+ return py::memoryview::from_buffer(buf, 1, "B", { }, { });
+ });
+
+ m.def("test_memoryview_from_buffer_invalid_strides", []() {
+ static const char* buf = "\x02\x03\x04";
+ return py::memoryview::from_buffer(buf, 1, "B", { 3 }, { });
+ });
+
+ m.def("test_memoryview_from_buffer_nullptr", []() {
+ return py::memoryview::from_buffer(
+ static_cast<void*>(nullptr), 1, "B", { }, { });
+ });
+
+#if PY_MAJOR_VERSION >= 3
+ m.def("test_memoryview_from_memory", []() {
+ const char* buf = "\xff\xe1\xab\x37";
+ return py::memoryview::from_memory(
+ buf, static_cast<py::ssize_t>(strlen(buf)));
+ });
+#endif
+
+ // test_builtin_functions
+ m.def("get_len", [](py::handle h) { return py::len(h); });
}
+# -*- coding: utf-8 -*-
from __future__ import division
import pytest
import sys
+import env # noqa: F401
+
from pybind11_tests import pytypes as m
from pybind11_tests import debug_enabled
+def test_int(doc):
+ assert doc(m.get_int) == "get_int() -> int"
+
+
+def test_iterator(doc):
+ assert doc(m.get_iterator) == "get_iterator() -> Iterator"
+
+
+def test_iterable(doc):
+ assert doc(m.get_iterable) == "get_iterable() -> Iterable"
+
+
def test_list(capture, doc):
with capture:
lst = m.get_list()
lst.append("value2")
m.print_list(lst)
- assert capture.unordered == """
+ assert (
+ capture.unordered
+ == """
Entry at position 0: value
list item 0: inserted-0
list item 1: overwritten
list item 2: inserted-2
list item 3: value2
"""
+ )
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
+def test_none(capture, doc):
+ assert doc(m.get_none) == "get_none() -> None"
+ assert doc(m.print_none) == "print_none(arg0: None) -> None"
+
+
def test_set(capture, doc):
s = m.get_set()
assert s == {"key1", "key2", "key3"}
with capture:
s.add("key4")
m.print_set(s)
- assert capture.unordered == """
+ assert (
+ capture.unordered
+ == """
key: key1
key: key2
key: key3
key: key4
"""
+ )
assert not m.set_contains(set([]), 42)
assert m.set_contains({42}, 42)
with capture:
d["key2"] = "value2"
m.print_dict(d)
- assert capture.unordered == """
+ assert (
+ capture.unordered
+ == """
key: key, value=value
key: key2, value=value2
"""
+ )
assert not m.dict_contains({}, 42)
assert m.dict_contains({42: None}, 42)
assert m.str_from_object(A()) == "this is a str"
assert m.repr_from_object(A()) == "this is a repr"
+ assert m.str_from_handle(A()) == "this is a str"
s1, s2 = m.str_format()
assert s1 == "1 + 2 = 3"
assert s1 == s2
+ malformed_utf8 = b"\x80"
+ assert m.str_from_object(malformed_utf8) is malformed_utf8 # To be fixed; see #2380
+ if env.PY2:
+ # with pytest.raises(UnicodeDecodeError):
+ # m.str_from_object(malformed_utf8)
+ with pytest.raises(UnicodeDecodeError):
+ m.str_from_handle(malformed_utf8)
+ else:
+ # assert m.str_from_object(malformed_utf8) == "b'\\x80'"
+ assert m.str_from_handle(malformed_utf8) == "b'\\x80'"
+
def test_bytes(doc):
assert m.bytes_from_string().decode() == "foo"
assert m.bytes_from_str().decode() == "bar"
assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format(
- "bytes" if sys.version_info[0] == 3 else "str"
+ "str" if env.PY2 else "bytes"
)
a = m.return_capsule_with_destructor()
del a
pytest.gc_collect()
- assert capture.unordered == """
+ assert (
+ capture.unordered
+ == """
creating capsule
destructing capsule
"""
+ )
with capture:
a = m.return_capsule_with_destructor_2()
del a
pytest.gc_collect()
- assert capture.unordered == """
+ assert (
+ capture.unordered
+ == """
creating capsule
destructing capsule: 1234
"""
+ )
with capture:
a = m.return_capsule_with_name_and_destructor()
del a
pytest.gc_collect()
- assert capture.unordered == """
+ assert (
+ capture.unordered
+ == """
created capsule (1234, 'pointer type description')
destructing capsule (1234, 'pointer type description')
"""
+ )
def test_accessors():
def test_constructors():
"""C++ default and converting constructors are equivalent to type calls in Python"""
- types = [str, bool, int, float, tuple, list, dict, set]
+ types = [bytes, str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
+ if env.PY2:
+ # Note that bytes.__name__ == 'str' in Python 2.
+ # pybind11::str is unicode even under Python 2.
+ expected["bytes"] = bytes()
+ expected["str"] = unicode() # noqa: F821
assert m.default_constructors() == expected
data = {
+ bytes: b"41", # Currently no supported or working conversions.
str: 42,
bool: "Not empty",
int: "42",
list: range(3),
dict: [("two", 2), ("one", 1), ("three", 3)],
set: [4, 4, 5, 6, 6, 6],
- memoryview: b'abc'
+ memoryview: b"abc",
}
inputs = {k.__name__: v for k, v in data.items()}
expected = {k.__name__: k(v) for k, v in data.items()}
+ if env.PY2: # Similar to the above. See comments above.
+ inputs["bytes"] = b"41"
+ inputs["str"] = 42
+ expected["bytes"] = b"41"
+ expected["str"] = u"42"
assert m.converting_constructors(inputs) == expected
assert m.cast_functions(inputs) == expected
assert noconv2[k] is expected[k]
+def test_non_converting_constructors():
+ non_converting_test_cases = [
+ ("bytes", range(10)),
+ ("none", 42),
+ ("ellipsis", 42),
+ ("type", 42),
+ ]
+ for t, v in non_converting_test_cases:
+ for move in [True, False]:
+ with pytest.raises(TypeError) as excinfo:
+ m.nonconverting_constructor(t, v, move)
+ expected_error = "Object of type '{}' is not an instance of '{}'".format(
+ type(v).__name__, t
+ )
+ assert str(excinfo.value) == expected_error
+
+
+def test_pybind11_str_raw_str():
+ # specifically to exercise pybind11::str::raw_str
+ cvt = m.convert_to_pybind11_str
+ assert cvt(u"Str") == u"Str"
+ assert cvt(b"Bytes") == u"Bytes" if env.PY2 else "b'Bytes'"
+ assert cvt(None) == u"None"
+ assert cvt(False) == u"False"
+ assert cvt(True) == u"True"
+ assert cvt(42) == u"42"
+ assert cvt(2 ** 65) == u"36893488147419103232"
+ assert cvt(-1.50) == u"-1.5"
+ assert cvt(()) == u"()"
+ assert cvt((18,)) == u"(18,)"
+ assert cvt([]) == u"[]"
+ assert cvt([28]) == u"[28]"
+ assert cvt({}) == u"{}"
+ assert cvt({3: 4}) == u"{3: 4}"
+ assert cvt(set()) == u"set([])" if env.PY2 else "set()"
+ assert cvt({3, 3}) == u"set([3])" if env.PY2 else "{3}"
+
+ valid_orig = u"DZ"
+ valid_utf8 = valid_orig.encode("utf-8")
+ valid_cvt = cvt(valid_utf8)
+ assert type(valid_cvt) == bytes # Probably surprising.
+ assert valid_cvt == b"\xc7\xb1"
+
+ malformed_utf8 = b"\x80"
+ malformed_cvt = cvt(malformed_utf8)
+ assert type(malformed_cvt) == bytes # Probably surprising.
+ assert malformed_cvt == b"\x80"
+
+
def test_implicit_casting():
"""Tests implicit casting when assigning or appending to dicts and lists."""
z = m.get_implicit_casting()
- assert z['d'] == {
- 'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
- 'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
- 'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
+ assert z["d"] == {
+ "char*_i1": "abc",
+ "char*_i2": "abc",
+ "char*_e": "abc",
+ "char*_p": "abc",
+ "str_i1": "str",
+ "str_i2": "str1",
+ "str_e": "str2",
+ "str_p": "str3",
+ "int_i1": 42,
+ "int_i2": 42,
+ "int_e": 43,
+ "int_p": 44,
}
- assert z['l'] == [3, 6, 9, 12, 15]
+ assert z["l"] == [3, 6, 9, 12, 15]
def test_print(capture):
with capture:
m.print_function()
- assert capture == """
+ assert (
+ capture
+ == """
Hello, World!
1 2.0 three True -- multiple args
*args-and-a-custom-separator
flush
py::print + str.format = this
"""
+ )
assert capture.stderr == "this goes to stderr"
with pytest.raises(RuntimeError) as excinfo:
m.print_failure()
assert str(excinfo.value) == "make_tuple(): unable to convert " + (
"argument of type 'UnregisteredType' to Python object"
- if debug_enabled else
- "arguments to Python object (compile in debug mode for details)"
+ if debug_enabled
+ else "arguments to Python object (compile in debug mode for details)"
)
def test_number_protocol():
for a, b in [(1, 1), (3, 5)]:
- li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b,
- a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b]
+ li = [
+ a == b,
+ a != b,
+ a < b,
+ a <= b,
+ a > b,
+ a >= b,
+ a + b,
+ a - b,
+ a * b,
+ a / b,
+ a | b,
+ a & b,
+ a ^ b,
+ a >> b,
+ a << b,
+ ]
assert m.test_number_protocol(a, b) == li
def test_list_slicing():
li = list(range(100))
assert li[::2] == m.test_list_slicing(li)
+
+
+def test_issue2361():
+ # See issue #2361
+ assert m.issue2361_str_implicit_copy_none() == "None"
+ with pytest.raises(TypeError) as excinfo:
+ assert m.issue2361_dict_implicit_copy_none()
+ assert "'NoneType' object is not iterable" in str(excinfo.value)
+
+
+@pytest.mark.parametrize(
+ "method, args, fmt, expected_view",
+ [
+ (m.test_memoryview_object, (b"red",), "B", b"red"),
+ (m.test_memoryview_buffer_info, (b"green",), "B", b"green"),
+ (m.test_memoryview_from_buffer, (False,), "h", [3, 1, 4, 1, 5]),
+ (m.test_memoryview_from_buffer, (True,), "H", [2, 7, 1, 8]),
+ (m.test_memoryview_from_buffer_nativeformat, (), "@i", [4, 7, 5]),
+ ],
+)
+def test_memoryview(method, args, fmt, expected_view):
+ view = method(*args)
+ assert isinstance(view, memoryview)
+ assert view.format == fmt
+ if isinstance(expected_view, bytes) or not env.PY2:
+ view_as_list = list(view)
+ else:
+ # Using max to pick non-zero byte (big-endian vs little-endian).
+ view_as_list = [max([ord(c) for c in s]) for s in view]
+ assert view_as_list == list(expected_view)
+
+
+@pytest.mark.xfail("env.PYPY", reason="getrefcount is not available")
+@pytest.mark.parametrize(
+ "method",
+ [
+ m.test_memoryview_object,
+ m.test_memoryview_buffer_info,
+ ],
+)
+def test_memoryview_refcount(method):
+ buf = b"\x0a\x0b\x0c\x0d"
+ ref_before = sys.getrefcount(buf)
+ view = method(buf)
+ ref_after = sys.getrefcount(buf)
+ assert ref_before < ref_after
+ assert list(view) == list(buf)
+
+
+def test_memoryview_from_buffer_empty_shape():
+ view = m.test_memoryview_from_buffer_empty_shape()
+ assert isinstance(view, memoryview)
+ assert view.format == "B"
+ if env.PY2:
+ # Python 2 behavior is weird, but Python 3 (the future) is fine.
+ # PyPy3 has <memoryview, while CPython 2 has <memory
+ assert bytes(view).startswith(b"<memory")
+ else:
+ assert bytes(view) == b""
+
+
+def test_test_memoryview_from_buffer_invalid_strides():
+ with pytest.raises(RuntimeError):
+ m.test_memoryview_from_buffer_invalid_strides()
+
+
+def test_test_memoryview_from_buffer_nullptr():
+ if env.PY2:
+ m.test_memoryview_from_buffer_nullptr()
+ else:
+ with pytest.raises(ValueError):
+ m.test_memoryview_from_buffer_nullptr()
+
+
+@pytest.mark.skipif("env.PY2")
+def test_memoryview_from_memory():
+ view = m.test_memoryview_from_memory()
+ assert isinstance(view, memoryview)
+ assert view.format == "B"
+ assert bytes(view) == b"\xff\xe1\xab\x37"
+
+
+def test_builtin_functions():
+ assert m.get_len([i for i in range(42)]) == 42
+ with pytest.raises(TypeError) as exc_info:
+ m.get_len(i for i in range(42))
+ assert str(exc_info.value) in [
+ "object of type 'generator' has no len()",
+ "'generator' has no length",
+ ] # PyPy
#include <pybind11/operators.h>
#include <pybind11/stl.h>
+#include <algorithm>
+
template<typename T>
class NonZeroIterator {
const T* ptr_;
py::class_<Sliceable>(m,"Sliceable")
.def(py::init<int>())
.def("__getitem__",[](const Sliceable &s, py::slice slice) {
- ssize_t start, stop, step, slicelength;
+ py::ssize_t start, stop, step, slicelength;
if (!slice.compute(s.size, &start, &stop, &step, &slicelength))
throw py::error_already_set();
int istart = static_cast<int>(start);
size_t start, stop, step, slicelength;
if (!slice.compute(s.size(), &start, &stop, &step, &slicelength))
throw py::error_already_set();
- Sequence *seq = new Sequence(slicelength);
+ auto *seq = new Sequence(slicelength);
for (size_t i = 0; i < slicelength; ++i) {
(*seq)[i] = s[start]; start += step;
}
return l;
});
+ // test_sequence_length: check that Python sequences can be converted to py::sequence.
+ m.def("sequence_length", [](py::sequence seq) { return seq.size(); });
+
// Make sure that py::iterator works with std algorithms
m.def("count_none", [](py::object o) {
return std::count_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); });
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import sequences_and_iterators as m
from pybind11_tests import ConstructorStats
def allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0):
- return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list))
+ return all(
+ isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list)
+ )
def test_generalized_iterators():
cstats = ConstructorStats.get(m.Sequence)
s = m.Sequence(5)
- assert cstats.values() == ['of size', '5']
+ assert cstats.values() == ["of size", "5"]
assert "Sequence" in repr(s)
assert len(s) == 5
assert isclose(s[0], 12.34) and isclose(s[3], 56.78)
rev = reversed(s)
- assert cstats.values() == ['of size', '5']
+ assert cstats.values() == ["of size", "5"]
rev2 = s[::-1]
- assert cstats.values() == ['of size', '5']
+ assert cstats.values() == ["of size", "5"]
it = iter(m.Sequence(0))
for _ in range(3): # __next__ must continue to raise StopIteration
with pytest.raises(StopIteration):
next(it)
- assert cstats.values() == ['of size', '0']
+ assert cstats.values() == ["of size", "0"]
expected = [0, 56.78, 0, 0, 12.34]
assert allclose(rev, expected)
assert rev == rev2
rev[0::2] = m.Sequence([2.0, 2.0, 2.0])
- assert cstats.values() == ['of size', '3', 'from std::vector']
+ assert cstats.values() == ["of size", "3", "from std::vector"]
assert allclose(rev, [2, 56.78, 2, 0, 2])
assert cstats.move_assignments == 0
+def test_sequence_length():
+ """#2076: Exception raised by len(arg) should be propagated """
+
+ class BadLen(RuntimeError):
+ pass
+
+ class SequenceLike:
+ def __getitem__(self, i):
+ return None
+
+ def __len__(self):
+ raise BadLen()
+
+ with pytest.raises(BadLen):
+ m.sequence_length(SequenceLike())
+
+ assert m.sequence_length([1, 2, 3]) == 3
+ assert m.sequence_length("hello") == 5
+
+
def test_map_iterator():
- sm = m.StringMap({'hi': 'bye', 'black': 'white'})
- assert sm['hi'] == 'bye'
+ sm = m.StringMap({"hi": "bye", "black": "white"})
+ assert sm["hi"] == "bye"
assert len(sm) == 2
- assert sm['black'] == 'white'
+ assert sm["black"] == "white"
with pytest.raises(KeyError):
- assert sm['orange']
- sm['orange'] = 'banana'
- assert sm['orange'] == 'banana'
+ assert sm["orange"]
+ sm["orange"] = "banana"
+ assert sm["orange"] == "banana"
- expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'}
+ expected = {"hi": "bye", "black": "white", "orange": "banana"}
for k in sm:
assert sm[k] == expected[k]
for k, v in sm.items():
"""#181: iterator passthrough did not compile"""
from pybind11_tests.sequences_and_iterators import iterator_passthrough
- assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]
+ values = [3, 5, 7, 9, 11, 13, 15]
+ assert list(iterator_passthrough(iter(values))) == values
def test_iterator_rvp():
BSD-style license that can be found in the LICENSE file.
*/
-#if defined(_MSC_VER) && _MSC_VER < 1910
-# pragma warning(disable: 4702) // unreachable code in system header
+#if defined(_MSC_VER) && _MSC_VER < 1910 // VS 2015's MSVC
+# pragma warning(disable: 4702) // unreachable code in system header (xatomic.h(382))
#endif
#include "pybind11_tests.h"
struct holder_helper<ref<T>> {
static const T *get(const ref<T> &p) { return p.get_ptr(); }
};
-}}
+} // namespace detail
+} // namespace pybind11
// The following is not required anymore for std::shared_ptr, but it should compile without error:
PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr<T>);
class MyObject1 : public Object {
public:
MyObject1(int value) : value(value) { print_created(this, toString()); }
- std::string toString() const { return "MyObject1[" + std::to_string(value) + "]"; }
+ std::string toString() const override { return "MyObject1[" + std::to_string(value) + "]"; }
protected:
- virtual ~MyObject1() { print_destroyed(this); }
+ ~MyObject1() override { print_destroyed(this); }
private:
int value;
};
// test_unique_nodelete
// Object with a private destructor
+ class MyObject4;
+ static std::unordered_set<MyObject4 *> myobject4_instances;
class MyObject4 {
public:
- MyObject4(int value) : value{value} { print_created(this); }
+ MyObject4(int value) : value{value} {
+ print_created(this);
+ myobject4_instances.insert(this);
+ }
int value;
+
+ static void cleanupAllInstances() {
+ auto tmp = std::move(myobject4_instances);
+ myobject4_instances.clear();
+ for (auto o : tmp)
+ delete o;
+ }
private:
- ~MyObject4() { print_destroyed(this); }
+ ~MyObject4() {
+ myobject4_instances.erase(this);
+ print_destroyed(this);
+ }
};
py::class_<MyObject4, std::unique_ptr<MyObject4, py::nodelete>>(m, "MyObject4")
.def(py::init<int>())
- .def_readwrite("value", &MyObject4::value);
+ .def_readwrite("value", &MyObject4::value)
+ .def_static("cleanup_all_instances", &MyObject4::cleanupAllInstances);
// test_unique_deleter
// Object with std::unique_ptr<T, D> where D is not matching the base class
// Object with a protected destructor
+ class MyObject4a;
+ static std::unordered_set<MyObject4a *> myobject4a_instances;
class MyObject4a {
public:
MyObject4a(int i) {
value = i;
print_created(this);
+ myobject4a_instances.insert(this);
};
int value;
+
+ static void cleanupAllInstances() {
+ auto tmp = std::move(myobject4a_instances);
+ myobject4a_instances.clear();
+ for (auto o : tmp)
+ delete o;
+ }
protected:
- virtual ~MyObject4a() { print_destroyed(this); }
+ virtual ~MyObject4a() {
+ myobject4a_instances.erase(this);
+ print_destroyed(this);
+ }
};
py::class_<MyObject4a, std::unique_ptr<MyObject4a, py::nodelete>>(m, "MyObject4a")
.def(py::init<int>())
- .def_readwrite("value", &MyObject4a::value);
+ .def_readwrite("value", &MyObject4a::value)
+ .def_static("cleanup_all_instances", &MyObject4a::cleanupAllInstances);
// Object derived but with public destructor and no Deleter in default holder
class MyObject4b : public MyObject4a {
public:
MyObject4b(int i) : MyObject4a(i) { print_created(this); }
- ~MyObject4b() { print_destroyed(this); }
+ ~MyObject4b() override { print_destroyed(this); }
};
py::class_<MyObject4b, MyObject4a>(m, "MyObject4b")
.def(py::init<int>());
~C() { print_destroyed(this); }
};
py::class_<C, custom_unique_ptr<C>>(m, "TypeWithMoveOnlyHolder")
- .def_static("make", []() { return custom_unique_ptr<C>(new C); });
+ .def_static("make", []() { return custom_unique_ptr<C>(new C); })
+ .def_static("make_as_object", []() { return py::cast(custom_unique_ptr<C>(new C)); });
// test_holder_with_addressof_operator
struct TypeForHolderWithAddressOf {
// test_shared_ptr_gc
// #187: issue involving std::shared_ptr<> return value policy & garbage collection
struct ElementBase {
- virtual ~ElementBase() { } /* Force creation of virtual table */
+ virtual ~ElementBase() = default; /* Force creation of virtual table */
+ ElementBase() = default;
+ ElementBase(const ElementBase&) = delete;
};
py::class_<ElementBase, std::shared_ptr<ElementBase>>(m, "ElementBase");
+# -*- coding: utf-8 -*-
import pytest
-from pybind11_tests import smart_ptr as m
-from pybind11_tests import ConstructorStats
+
+m = pytest.importorskip("pybind11_tests.smart_ptr")
+from pybind11_tests import ConstructorStats # noqa: E402
def test_smart_ptr(capture):
# Object1
- for i, o in enumerate([m.make_object_1(), m.make_object_2(), m.MyObject1(3)], start=1):
+ for i, o in enumerate(
+ [m.make_object_1(), m.make_object_2(), m.MyObject1(3)], start=1
+ ):
assert o.getRefCount() == 1
with capture:
m.print_object_1(o)
m.print_object_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * 4
- for i, o in enumerate([m.make_myobject1_1(), m.make_myobject1_2(), m.MyObject1(6), 7],
- start=4):
+ for i, o in enumerate(
+ [m.make_myobject1_1(), m.make_myobject1_2(), m.MyObject1(6), 7], start=4
+ ):
print(o)
with capture:
if not isinstance(o, int):
m.print_myobject1_2(o)
m.print_myobject1_3(o)
m.print_myobject1_4(o)
- assert capture == "MyObject1[{i}]\n".format(i=i) * (4 if isinstance(o, int) else 8)
+
+ times = 4 if isinstance(o, int) else 8
+ assert capture == "MyObject1[{i}]\n".format(i=i) * times
cstats = ConstructorStats.get(m.MyObject1)
assert cstats.alive() == 0
- expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4
+ expected_values = ["MyObject1[{}]".format(i) for i in range(1, 7)] + [
+ "MyObject1[7]"
+ ] * 4
assert cstats.values() == expected_values
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_assignments == 0
# Object2
- for i, o in zip([8, 6, 7], [m.MyObject2(8), m.make_myobject2_1(), m.make_myobject2_2()]):
+ for i, o in zip(
+ [8, 6, 7], [m.MyObject2(8), m.make_myobject2_1(), m.make_myobject2_2()]
+ ):
print(o)
with capture:
m.print_myobject2_1(o)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
- assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]']
+ assert cstats.values() == ["MyObject2[8]", "MyObject2[6]", "MyObject2[7]"]
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.move_assignments == 0
# Object3
- for i, o in zip([9, 8, 9], [m.MyObject3(9), m.make_myobject3_1(), m.make_myobject3_2()]):
+ for i, o in zip(
+ [9, 8, 9], [m.MyObject3(9), m.make_myobject3_1(), m.make_myobject3_2()]
+ ):
print(o)
with capture:
m.print_myobject3_1(o)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
- assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]']
+ assert cstats.values() == ["MyObject3[9]", "MyObject3[8]", "MyObject3[9]"]
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
# ref<>
cstats = m.cstats_ref()
assert cstats.alive() == 0
- assert cstats.values() == ['from pointer'] * 10
+ assert cstats.values() == ["from pointer"] * 10
assert cstats.default_constructions == 30
assert cstats.copy_constructions == 12
# assert cstats.move_constructions >= 0 # Doesn't invoke any
cstats = ConstructorStats.get(m.MyObject4)
assert cstats.alive() == 1
del o
- assert cstats.alive() == 1 # Leak, but that's intentional
+ assert cstats.alive() == 1
+ m.MyObject4.cleanup_all_instances()
+ assert cstats.alive() == 0
def test_unique_nodelete4a():
cstats = ConstructorStats.get(m.MyObject4a)
assert cstats.alive() == 1
del o
- assert cstats.alive() == 1 # Leak, but that's intentional
+ assert cstats.alive() == 1
+ m.MyObject4a.cleanup_all_instances()
+ assert cstats.alive() == 0
def test_unique_deleter():
+ m.MyObject4a(0)
o = m.MyObject4b(23)
assert o.value == 23
cstats4a = ConstructorStats.get(m.MyObject4a)
- assert cstats4a.alive() == 2 # Two because of previous test
+ assert cstats4a.alive() == 2
cstats4b = ConstructorStats.get(m.MyObject4b)
assert cstats4b.alive() == 1
del o
- assert cstats4a.alive() == 1 # Should now only be one leftover from previous test
+ assert cstats4a.alive() == 1 # Should now only be one leftover
assert cstats4b.alive() == 0 # Should be deleted
+ m.MyObject4a.cleanup_all_instances()
+ assert cstats4a.alive() == 0
+ assert cstats4b.alive() == 0
def test_large_holder():
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false)
assert stats.alive() == 2
assert s.set_ref(ref)
- assert s.set_holder(ref) # std::enable_shared_from_this can create a holder from a reference
+ assert s.set_holder(
+ ref
+ ) # std::enable_shared_from_this can create a holder from a reference
bad_wp = s.bad_wp # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true)
assert stats.alive() == 2
assert s.set_ref(copy)
assert s.set_holder(copy)
- holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)
+ holder_ref = (
+ s.holder_ref
+ ) # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
- holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)
+ holder_copy = (
+ s.holder_copy
+ ) # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
def test_move_only_holder():
a = m.TypeWithMoveOnlyHolder.make()
+ b = m.TypeWithMoveOnlyHolder.make_as_object()
stats = ConstructorStats.get(m.TypeWithMoveOnlyHolder)
+ assert stats.alive() == 2
+ del b
assert stats.alive() == 1
del a
assert stats.alive() == 0
instance = m.HeldByDefaultHolder()
with pytest.raises(RuntimeError) as excinfo:
m.HeldByDefaultHolder.load_shared_ptr(instance)
- assert "Unable to load a custom holder type from a " \
- "default-holder instance" in str(excinfo.value)
+ assert (
+ "Unable to load a custom holder type from a "
+ "default-holder instance" in str(excinfo.value)
+ )
def test_shared_ptr_gc():
#include <string>
// Test with `std::variant` in C++17 mode, or with `boost::variant` in C++11/14
-#if PYBIND11_HAS_VARIANT
+#if defined(PYBIND11_HAS_VARIANT)
using std::variant;
#elif defined(PYBIND11_TEST_BOOST) && (!defined(_MSC_VER) || _MSC_VER >= 1910)
# include <boost/variant.hpp>
namespace std {
template <>
struct hash<TplCtorClass> { size_t operator()(const TplCtorClass &) const { return 0; } };
-}
+} // namespace std
+
+
+template <template <typename> class OptionalImpl, typename T>
+struct OptionalHolder
+{
+ OptionalHolder() = default;
+ bool member_initialized() const {
+ return member && member->initialized;
+ }
+ OptionalImpl<T> member = T{};
+};
TEST_SUBMODULE(stl, m) {
.def(py::init<>())
.def(py::init<int>());
+
+ struct MoveOutDetector
+ {
+ MoveOutDetector() = default;
+ MoveOutDetector(const MoveOutDetector&) = default;
+ MoveOutDetector(MoveOutDetector&& other) noexcept
+ : initialized(other.initialized) {
+ // steal underlying resource
+ other.initialized = false;
+ }
+ bool initialized = true;
+ };
+ py::class_<MoveOutDetector>(m, "MoveOutDetector", "Class with move tracking")
+ .def(py::init<>())
+ .def_readonly("initialized", &MoveOutDetector::initialized);
+
+
#ifdef PYBIND11_HAS_OPTIONAL
// test_optional
m.attr("has_optional") = true;
m.def("nodefer_none_optional", [](std::optional<int>) { return true; });
m.def("nodefer_none_optional", [](py::none) { return false; });
+
+ using opt_holder = OptionalHolder<std::optional, MoveOutDetector>;
+ py::class_<opt_holder>(m, "OptionalHolder", "Class with optional member")
+ .def(py::init<>())
+ .def_readonly("member", &opt_holder::member)
+ .def("member_initialized", &opt_holder::member_initialized);
#endif
#ifdef PYBIND11_HAS_EXP_OPTIONAL
m.def("test_no_assign_exp", [](const exp_opt_no_assign &x) {
return x ? x->value : 42;
}, py::arg_v("x", std::experimental::nullopt, "None"));
+
+ using opt_exp_holder = OptionalHolder<std::experimental::optional, MoveOutDetector>;
+ py::class_<opt_exp_holder>(m, "OptionalExpHolder", "Class with optional member")
+ .def(py::init<>())
+ .def_readonly("member", &opt_exp_holder::member)
+ .def("member_initialized", &opt_exp_holder::member_initialized);
#endif
#ifdef PYBIND11_HAS_VARIANT
+# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import stl as m
assert m.cast_rv_nested() == [[[{"b": "rvalue", "c": "rvalue"}], [{"a": "rvalue"}]]]
assert m.cast_lv_nested() == {
"a": [[["lvalue", "lvalue"]], [["lvalue", "lvalue"]]],
- "b": [[["lvalue", "lvalue"], ["lvalue", "lvalue"]]]
+ "b": [[["lvalue", "lvalue"], ["lvalue", "lvalue"]]],
}
# Issue #853 test case:
assert [x.value for x in moved_out_list] == [0, 1, 2]
-@pytest.mark.skipif(not hasattr(m, "has_optional"), reason='no <optional>')
+@pytest.mark.skipif(not hasattr(m, "has_optional"), reason="no <optional>")
def test_optional():
assert m.double_or_zero(None) == 0
assert m.double_or_zero(42) == 84
- pytest.raises(TypeError, m.double_or_zero, 'foo')
+ pytest.raises(TypeError, m.double_or_zero, "foo")
assert m.half_or_none(0) is None
assert m.half_or_none(42) == 21
- pytest.raises(TypeError, m.half_or_none, 'foo')
+ pytest.raises(TypeError, m.half_or_none, "foo")
assert m.test_nullopt() == 42
assert m.test_nullopt(None) == 42
assert m.nodefer_none_optional(None)
+ holder = m.OptionalHolder()
+ mvalue = holder.member
+ assert mvalue.initialized
+ assert holder.member_initialized()
-@pytest.mark.skipif(not hasattr(m, "has_exp_optional"), reason='no <experimental/optional>')
+
+@pytest.mark.skipif(
+ not hasattr(m, "has_exp_optional"), reason="no <experimental/optional>"
+)
def test_exp_optional():
assert m.double_or_zero_exp(None) == 0
assert m.double_or_zero_exp(42) == 84
- pytest.raises(TypeError, m.double_or_zero_exp, 'foo')
+ pytest.raises(TypeError, m.double_or_zero_exp, "foo")
assert m.half_or_none_exp(0) is None
assert m.half_or_none_exp(42) == 21
- pytest.raises(TypeError, m.half_or_none_exp, 'foo')
+ pytest.raises(TypeError, m.half_or_none_exp, "foo")
assert m.test_nullopt_exp() == 42
assert m.test_nullopt_exp(None) == 42
assert m.test_no_assign_exp(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign_exp, 43)
+ holder = m.OptionalExpHolder()
+ mvalue = holder.member
+ assert mvalue.initialized
+ assert holder.member_initialized()
+
-@pytest.mark.skipif(not hasattr(m, "load_variant"), reason='no <variant>')
+@pytest.mark.skipif(not hasattr(m, "load_variant"), reason="no <variant>")
def test_variant(doc):
assert m.load_variant(1) == "int"
assert m.load_variant("1") == "std::string"
assert m.cast_variant() == (5, "Hello")
- assert doc(m.load_variant) == "load_variant(arg0: Union[int, str, float, None]) -> str"
+ assert (
+ doc(m.load_variant) == "load_variant(arg0: Union[int, str, float, None]) -> str"
+ )
def test_vec_of_reference_wrapper():
"""#171: Can't return reference wrappers (or STL structures containing them)"""
- assert str(m.return_vec_of_reference_wrapper(UserType(4))) == \
- "[UserType(1), UserType(2), UserType(3), UserType(4)]"
+ assert (
+ str(m.return_vec_of_reference_wrapper(UserType(4)))
+ == "[UserType(1), UserType(2), UserType(3), UserType(4)]"
+ )
def test_stl_pass_by_pointer(msg):
"""Passing nullptr or None to an STL container pointer is not expected to work"""
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer() # default value is `nullptr`
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with:
""" # noqa: E501 line too long
+ )
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer(None)
- assert msg(excinfo.value) == """
+ assert (
+ msg(excinfo.value)
+ == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with: None
""" # noqa: E501 line too long
+ )
assert m.stl_pass_by_pointer([1, 2, 3]) == [1, 2, 3]
<pybind11/stl.h> should result in a helpful suggestion in the error message"""
import pybind11_cross_module_tests as cm
- expected_message = ("Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
- "<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
- "conversions are optional and require extra headers to be included\n"
- "when compiling your pybind11 module.")
+ expected_message = (
+ "Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
+ "<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
+ "conversions are optional and require extra headers to be included\n"
+ "when compiling your pybind11 module."
+ )
with pytest.raises(TypeError) as excinfo:
cm.missing_header_arg([1.0, 2.0, 3.0])
def test_function_with_string_and_vector_string_arg():
"""Check if a string is NOT implicitly converted to a list, which was the
behavior before fix of issue #1258"""
- assert m.func_with_string_or_vector_string_arg_overload(('A', 'B', )) == 2
- assert m.func_with_string_or_vector_string_arg_overload(['A', 'B']) == 2
- assert m.func_with_string_or_vector_string_arg_overload('A') == 3
+ assert m.func_with_string_or_vector_string_arg_overload(("A", "B")) == 2
+ assert m.func_with_string_or_vector_string_arg_overload(["A", "B"]) == 2
+ assert m.func_with_string_or_vector_string_arg_overload("A") == 3
def test_stl_ownership():
def test_issue_1561():
""" check fix for issue #1561 """
bar = m.Issue1561Outer()
- bar.list = [m.Issue1561Inner('bar')]
+ bar.list = [m.Issue1561Inner("bar")]
bar.list
- assert bar.list[0].data == 'bar'
+ assert bar.list[0].data == "bar"
return m;
}
+template <class NestMap> NestMap *times_hundred(int n) {
+ auto m = new NestMap();
+ for (int i = 1; i <= n; i++)
+ for (int j = 1; j <= n; j++)
+ (*m)[i].emplace(int(j*10), E_nc(100*j));
+ return m;
+}
+
TEST_SUBMODULE(stl_binders, m) {
// test_vector_int
py::bind_vector<std::vector<unsigned int>>(m, "VectorInt", py::buffer_protocol());
// test_noncopyable_containers
py::bind_vector<std::vector<E_nc>>(m, "VectorENC");
- m.def("get_vnc", &one_to_n<std::vector<E_nc>>, py::return_value_policy::reference);
+ m.def("get_vnc", &one_to_n<std::vector<E_nc>>);
py::bind_vector<std::deque<E_nc>>(m, "DequeENC");
- m.def("get_dnc", &one_to_n<std::deque<E_nc>>, py::return_value_policy::reference);
+ m.def("get_dnc", &one_to_n<std::deque<E_nc>>);
py::bind_map<std::map<int, E_nc>>(m, "MapENC");
- m.def("get_mnc", ×_ten<std::map<int, E_nc>>, py::return_value_policy::reference);
+ m.def("get_mnc", ×_ten<std::map<int, E_nc>>);
py::bind_map<std::unordered_map<int, E_nc>>(m, "UmapENC");
- m.def("get_umnc", ×_ten<std::unordered_map<int, E_nc>>, py::return_value_policy::reference);
+ m.def("get_umnc", ×_ten<std::unordered_map<int, E_nc>>);
+ // Issue #1885: binding nested std::map<X, Container<E>> with E non-copyable
+ py::bind_map<std::map<int, std::vector<E_nc>>>(m, "MapVecENC");
+ m.def("get_nvnc", [](int n)
+ {
+ auto m = new std::map<int, std::vector<E_nc>>();
+ for (int i = 1; i <= n; i++)
+ for (int j = 1; j <= n; j++)
+ (*m)[i].emplace_back(j);
+ return m;
+ });
+ py::bind_map<std::map<int, std::map<int, E_nc>>>(m, "MapMapENC");
+ m.def("get_nmnc", ×_hundred<std::map<int, std::map<int, E_nc>>>);
+ py::bind_map<std::unordered_map<int, std::unordered_map<int, E_nc>>>(m, "UmapUmapENC");
+ m.def("get_numnc", ×_hundred<std::unordered_map<int, std::unordered_map<int, E_nc>>>);
// test_vector_buffer
py::bind_vector<std::vector<unsigned char>>(m, "VectorUChar", py::buffer_protocol());
});
// The rest depends on numpy:
- try { py::module::import("numpy"); }
+ try { py::module_::import("numpy"); }
catch (...) { return; }
// test_vector_buffer_numpy
+# -*- coding: utf-8 -*-
import pytest
-import sys
-from pybind11_tests import stl_binders as m
-with pytest.suppress(ImportError):
- import numpy as np
+import env # noqa: F401
+
+from pybind11_tests import stl_binders as m
def test_vector_int():
# test error handling, and that the vector is unchanged
with pytest.raises(RuntimeError):
- v_int2.extend([8, 'a'])
+ v_int2.extend([8, "a"])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
del v_int2[-1]
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88])
-# related to the PyPy's buffer protocol.
-@pytest.unsupported_on_pypy
+ v_int2.clear()
+ assert len(v_int2) == 0
+
+
+# Older PyPy's failed here, related to the PyPy's buffer protocol.
def test_vector_buffer():
b = bytearray([1, 2, 3, 4])
v = m.VectorUChar(b)
assert v[1] == 2
v[2] = 5
mv = memoryview(v) # We expose the buffer interface
- if sys.version_info.major > 2:
+ if not env.PY2:
assert mv[2] == 5
mv[2] = 6
else:
- assert mv[2] == '\x05'
- mv[2] = '\x06'
+ assert mv[2] == "\x05"
+ mv[2] = "\x06"
assert v[2] == 6
+ if not env.PY2:
+ mv = memoryview(b)
+ v = m.VectorUChar(mv[::2])
+ assert v[1] == 3
+
with pytest.raises(RuntimeError) as excinfo:
m.create_undeclstruct() # Undeclared struct contents, no buffer interface
assert "NumPy type info missing for " in str(excinfo.value)
-@pytest.unsupported_on_pypy
-@pytest.requires_numpy
def test_vector_buffer_numpy():
+ np = pytest.importorskip("numpy")
a = np.array([1, 2, 3, 4], dtype=np.int32)
with pytest.raises(TypeError):
m.VectorInt(a)
v = m.get_vectorstruct()
assert v[0].x == 5
ma = np.asarray(v)
- ma[1]['x'] = 99
+ ma[1]["x"] = 99
assert v[1].x == 99
- v = m.VectorStruct(np.zeros(3, dtype=np.dtype([('w', 'bool'), ('x', 'I'),
- ('y', 'float64'), ('z', 'bool')], align=True)))
+ v = m.VectorStruct(
+ np.zeros(
+ 3,
+ dtype=np.dtype(
+ [("w", "bool"), ("x", "I"), ("y", "float64"), ("z", "bool")], align=True
+ ),
+ )
+ )
assert len(v) == 3
+ b = np.array([1, 2, 3, 4], dtype=np.uint8)
+ v = m.VectorUChar(b[::2])
+ assert v[1] == 3
+
def test_vector_bool():
import pybind11_cross_module_tests as cm
def test_map_string_double():
mm = m.MapStringDouble()
- mm['a'] = 1
- mm['b'] = 2.5
+ mm["a"] = 1
+ mm["b"] = 2.5
- assert list(mm) == ['a', 'b']
- assert list(mm.items()) == [('a', 1), ('b', 2.5)]
+ assert list(mm) == ["a", "b"]
+ assert list(mm.items()) == [("a", 1), ("b", 2.5)]
assert str(mm) == "MapStringDouble{a: 1, b: 2.5}"
um = m.UnorderedMapStringDouble()
- um['ua'] = 1.1
- um['ub'] = 2.6
+ um["ua"] = 1.1
+ um["ub"] = 2.6
- assert sorted(list(um)) == ['ua', 'ub']
- assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
+ assert sorted(list(um)) == ["ua", "ub"]
+ assert sorted(list(um.items())) == [("ua", 1.1), ("ub", 2.6)]
assert "UnorderedMapStringDouble" in str(um)
def test_map_string_double_const():
mc = m.MapStringDoubleConst()
- mc['a'] = 10
- mc['b'] = 20.5
+ mc["a"] = 10
+ mc["b"] = 20.5
assert str(mc) == "MapStringDoubleConst{a: 10, b: 20.5}"
umc = m.UnorderedMapStringDoubleConst()
- umc['a'] = 11
- umc['b'] = 21.5
+ umc["a"] = 11
+ umc["b"] = 21.5
str(umc)
i = 1
for j in dnc:
- assert(j.value == i)
+ assert j.value == i
i += 1
# std::map
assert vsum == 150
+ # nested std::map<std::vector>
+ nvnc = m.get_nvnc(5)
+ for i in range(1, 6):
+ for j in range(0, 5):
+ assert nvnc[i][j].value == j + 1
+
+ # Note: maps do not have .values()
+ for _, v in nvnc.items():
+ for i, j in enumerate(v, start=1):
+ assert j.value == i
+
+ # nested std::map<std::map>
+ nmnc = m.get_nmnc(5)
+ for i in range(1, 6):
+ for j in range(10, 60, 10):
+ assert nmnc[i][j].value == 10 * j
+
+ vsum = 0
+ for _, v_o in nmnc.items():
+ for k_i, v_i in v_o.items():
+ assert v_i.value == 10 * k_i
+ vsum += v_i.value
+
+ assert vsum == 7500
+
+ # nested std::unordered_map<std::unordered_map>
+ numnc = m.get_numnc(5)
+ for i in range(1, 6):
+ for j in range(10, 60, 10):
+ assert numnc[i][j].value == 10 * j
+
+ vsum = 0
+ for _, v_o in numnc.items():
+ for k_i, v_i in v_o.items():
+ assert v_i.value == 10 * k_i
+ vsum += v_i.value
+
+ assert vsum == 7500
+
def test_map_delitem():
mm = m.MapStringDouble()
- mm['a'] = 1
- mm['b'] = 2.5
+ mm["a"] = 1
+ mm["b"] = 2.5
- assert list(mm) == ['a', 'b']
- assert list(mm.items()) == [('a', 1), ('b', 2.5)]
- del mm['a']
- assert list(mm) == ['b']
- assert list(mm.items()) == [('b', 2.5)]
+ assert list(mm) == ["a", "b"]
+ assert list(mm.items()) == [("a", 1), ("b", 2.5)]
+ del mm["a"]
+ assert list(mm) == ["b"]
+ assert list(mm.items()) == [("b", 2.5)]
um = m.UnorderedMapStringDouble()
- um['ua'] = 1.1
- um['ub'] = 2.6
-
- assert sorted(list(um)) == ['ua', 'ub']
- assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
- del um['ua']
- assert sorted(list(um)) == ['ub']
- assert sorted(list(um.items())) == [('ub', 2.6)]
+ um["ua"] = 1.1
+ um["ub"] = 2.6
+
+ assert sorted(list(um)) == ["ua", "ub"]
+ assert sorted(list(um.items())) == [("ua", 1.1), ("ub", 2.6)]
+ del um["ua"]
+ assert sorted(list(um)) == ["ub"]
+ assert sorted(list(um.items())) == [("ub", 2.6)]
struct Animal
{
+ // Make this type also a "standard" polymorphic type, to confirm that
+ // specializing polymorphic_type_hook using enable_if_t still works
+ // (https://github.com/pybind/pybind11/pull/2016/).
+ virtual ~Animal() = default;
+
+ // Enum for tag-based polymorphism.
enum class Kind {
Unknown = 0,
Dog = 100, Labrador, Chihuahua, LastDog = 199,
static const void *get(const itype *src, const std::type_info*& type)
{ type = src ? Animal::type_of_kind(src->kind) : nullptr; return src; }
};
-}
+} // namespace pybind11
TEST_SUBMODULE(tagbased_polymorphic, m) {
py::class_<Animal>(m, "Animal")
+# -*- coding: utf-8 -*-
from pybind11_tests import tagbased_polymorphic as m
def test_downcast():
zoo = m.create_zoo()
assert [type(animal) for animal in zoo] == [
- m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther
+ m.Labrador,
+ m.Dog,
+ m.Chihuahua,
+ m.Cat,
+ m.Panther,
]
assert [animal.name for animal in zoo] == [
- "Fido", "Ginger", "Hertzl", "Tiger", "Leo"
+ "Fido",
+ "Ginger",
+ "Hertzl",
+ "Tiger",
+ "Leo",
]
zoo[1].sound = "woooooo"
assert [dog.bark() for dog in zoo[:3]] == [
"Labrador Fido goes WOOF!",
"Dog Ginger goes woooooo",
- "Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles"
+ "Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles",
]
assert [cat.purr() for cat in zoo[3:]] == ["mrowr", "mrrrRRRRRR"]
zoo[0].excitement -= 1000
+# -*- coding: utf-8 -*-
from pybind11_tests import union_ as m
int run(int value) override {
/* Generate wrapping code that enables native function overloading */
- PYBIND11_OVERLOAD(
+ PYBIND11_OVERRIDE(
int, /* Return type */
ExampleVirt, /* Parent class */
run, /* Name of function */
}
bool run_bool() override {
- PYBIND11_OVERLOAD_PURE(
+ PYBIND11_OVERRIDE_PURE(
bool, /* Return type */
ExampleVirt, /* Parent class */
run_bool, /* Name of function */
}
void pure_virtual() override {
- PYBIND11_OVERLOAD_PURE(
+ PYBIND11_OVERRIDE_PURE(
void, /* Return type */
ExampleVirt, /* Parent class */
pure_virtual, /* Name of function */
// We can return reference types for compatibility with C++ virtual interfaces that do so, but
// note they have some significant limitations (see the documentation).
const std::string &get_string1() override {
- PYBIND11_OVERLOAD(
+ PYBIND11_OVERRIDE(
const std::string &, /* Return type */
ExampleVirt, /* Parent class */
get_string1, /* Name of function */
}
const std::string *get_string2() override {
- PYBIND11_OVERLOAD(
+ PYBIND11_OVERRIDE(
const std::string *, /* Return type */
ExampleVirt, /* Parent class */
get_string2, /* Name of function */
class NCVirt {
public:
- virtual ~NCVirt() { }
+ virtual ~NCVirt() = default;
+ NCVirt() = default;
+ NCVirt(const NCVirt&) = delete;
virtual NonCopyable get_noncopyable(int a, int b) { return NonCopyable(a, b); }
virtual Movable get_movable(int a, int b) = 0;
std::string print_movable(int a, int b) { return get_movable(a, b).get_value(); }
};
class NCVirtTrampoline : public NCVirt {
-#if !defined(__INTEL_COMPILER)
+#if !defined(__INTEL_COMPILER) && !defined(__CUDACC__) && !defined(__PGIC__)
NonCopyable get_noncopyable(int a, int b) override {
- PYBIND11_OVERLOAD(NonCopyable, NCVirt, get_noncopyable, a, b);
+ PYBIND11_OVERRIDE(NonCopyable, NCVirt, get_noncopyable, a, b);
}
#endif
Movable get_movable(int a, int b) override {
- PYBIND11_OVERLOAD_PURE(Movable, NCVirt, get_movable, a, b);
+ PYBIND11_OVERRIDE_PURE(Movable, NCVirt, get_movable, a, b);
}
};
/* for some reason MSVC2015 can't compile this if the function is pure virtual */
virtual std::string dispatch() const { return {}; };
virtual ~Base() = default;
+ Base() = default;
+ Base(const Base&) = delete;
};
struct DispatchIssue : Base {
- virtual std::string dispatch() const {
- PYBIND11_OVERLOAD_PURE(std::string, Base, dispatch, /* no arguments */);
+ std::string dispatch() const override {
+ PYBIND11_OVERRIDE_PURE(std::string, Base, dispatch, /* no arguments */);
}
};
// Forward declaration (so that we can put the main tests here; the inherited virtual approaches are
// rather long).
-void initialize_inherited_virtuals(py::module &m);
+void initialize_inherited_virtuals(py::module_ &m);
TEST_SUBMODULE(virtual_functions, m) {
// test_override
.def(py::init<int, int>());
// test_move_support
-#if !defined(__INTEL_COMPILER)
+#if !defined(__INTEL_COMPILER) && !defined(__CUDACC__) && !defined(__PGIC__)
py::class_<NCVirt, NCVirtTrampoline>(m, "NCVirt")
.def(py::init<>())
.def("get_noncopyable", &NCVirt::get_noncopyable)
// don't invoke Python dispatch classes by default when instantiating C++ classes
// that were not extended on the Python side
struct A {
- virtual ~A() {}
+ A() = default;
+ A(const A&) = delete;
+ virtual ~A() = default;
virtual void f() { py::print("A.f()"); }
};
struct PyA : A {
PyA() { py::print("PyA.PyA()"); }
- ~PyA() { py::print("PyA.~PyA()"); }
+ PyA(const PyA&) = delete;
+ ~PyA() override { py::print("PyA.~PyA()"); }
void f() override {
py::print("PyA.f()");
// This convolution just gives a `void`, but tests that PYBIND11_TYPE() works to protect
// a type containing a ,
- PYBIND11_OVERLOAD(PYBIND11_TYPE(typename std::enable_if<true, void>::type), A, f);
+ PYBIND11_OVERRIDE(PYBIND11_TYPE(typename std::enable_if<true, void>::type), A, f);
}
};
// test_alias_delay_initialization2
// ... unless we explicitly request it, as in this example:
struct A2 {
- virtual ~A2() {}
+ A2() = default;
+ A2(const A2&) = delete;
+ virtual ~A2() = default;
virtual void f() { py::print("A2.f()"); }
};
struct PyA2 : A2 {
PyA2() { py::print("PyA2.PyA2()"); }
- ~PyA2() { py::print("PyA2.~PyA2()"); }
+ PyA2(const PyA2&) = delete;
+ ~PyA2() override { py::print("PyA2.~PyA2()"); }
void f() override {
py::print("PyA2.f()");
- PYBIND11_OVERLOAD(void, A2, f);
+ PYBIND11_OVERRIDE(void, A2, f);
}
};
std::string v;
A a;
explicit OverrideTest(const std::string &v) : v{v} {}
+ OverrideTest() = default;
+ OverrideTest(const OverrideTest&) = delete;
virtual std::string str_value() { return v; }
virtual std::string &str_ref() { return v; }
virtual A A_value() { return a; }
class PyOverrideTest : public OverrideTest {
public:
using OverrideTest::OverrideTest;
- std::string str_value() override { PYBIND11_OVERLOAD(std::string, OverrideTest, str_value); }
+ std::string str_value() override { PYBIND11_OVERRIDE(std::string, OverrideTest, str_value); }
// Not allowed (uncommenting should hit a static_assert failure): we can't get a reference
// to a python numeric value, since we only copy values in the numeric type caster:
-// std::string &str_ref() override { PYBIND11_OVERLOAD(std::string &, OverrideTest, str_ref); }
+// std::string &str_ref() override { PYBIND11_OVERRIDE(std::string &, OverrideTest, str_ref); }
// But we can work around it like this:
private:
std::string _tmp;
- std::string str_ref_helper() { PYBIND11_OVERLOAD(std::string, OverrideTest, str_ref); }
+ std::string str_ref_helper() { PYBIND11_OVERRIDE(std::string, OverrideTest, str_ref); }
public:
std::string &str_ref() override { return _tmp = str_ref_helper(); }
- A A_value() override { PYBIND11_OVERLOAD(A, OverrideTest, A_value); }
- A &A_ref() override { PYBIND11_OVERLOAD(A &, OverrideTest, A_ref); }
+ A A_value() override { PYBIND11_OVERRIDE(A, OverrideTest, A_value); }
+ A &A_ref() override { PYBIND11_OVERRIDE(A &, OverrideTest, A_ref); }
};
py::class_<OverrideTest::A>(m, "OverrideTest_A")
return say_something(1) + " " + std::to_string(unlucky_number()); \
}
A_METHODS
+ A_Repeat() = default;
+ A_Repeat(const A_Repeat&) = delete;
virtual ~A_Repeat() = default;
};
class B_Repeat : public A_Repeat {
};
// Base classes for templated inheritance trampolines. Identical to the repeat-everything version:
-class A_Tpl { A_METHODS; virtual ~A_Tpl() = default; };
+class A_Tpl {
+ A_METHODS;
+ A_Tpl() = default;
+ A_Tpl(const A_Tpl&) = delete;
+ virtual ~A_Tpl() = default;
+};
class B_Tpl : public A_Tpl { B_METHODS };
class C_Tpl : public B_Tpl { C_METHODS };
class D_Tpl : public C_Tpl { D_METHODS };
class PyA_Repeat : public A_Repeat {
public:
using A_Repeat::A_Repeat;
- int unlucky_number() override { PYBIND11_OVERLOAD_PURE(int, A_Repeat, unlucky_number, ); }
- std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, A_Repeat, say_something, times); }
+ int unlucky_number() override { PYBIND11_OVERRIDE_PURE(int, A_Repeat, unlucky_number, ); }
+ std::string say_something(unsigned times) override { PYBIND11_OVERRIDE(std::string, A_Repeat, say_something, times); }
};
class PyB_Repeat : public B_Repeat {
public:
using B_Repeat::B_Repeat;
- int unlucky_number() override { PYBIND11_OVERLOAD(int, B_Repeat, unlucky_number, ); }
- std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, B_Repeat, say_something, times); }
- double lucky_number() override { PYBIND11_OVERLOAD(double, B_Repeat, lucky_number, ); }
+ int unlucky_number() override { PYBIND11_OVERRIDE(int, B_Repeat, unlucky_number, ); }
+ std::string say_something(unsigned times) override { PYBIND11_OVERRIDE(std::string, B_Repeat, say_something, times); }
+ double lucky_number() override { PYBIND11_OVERRIDE(double, B_Repeat, lucky_number, ); }
};
class PyC_Repeat : public C_Repeat {
public:
using C_Repeat::C_Repeat;
- int unlucky_number() override { PYBIND11_OVERLOAD(int, C_Repeat, unlucky_number, ); }
- std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, C_Repeat, say_something, times); }
- double lucky_number() override { PYBIND11_OVERLOAD(double, C_Repeat, lucky_number, ); }
+ int unlucky_number() override { PYBIND11_OVERRIDE(int, C_Repeat, unlucky_number, ); }
+ std::string say_something(unsigned times) override { PYBIND11_OVERRIDE(std::string, C_Repeat, say_something, times); }
+ double lucky_number() override { PYBIND11_OVERRIDE(double, C_Repeat, lucky_number, ); }
};
class PyD_Repeat : public D_Repeat {
public:
using D_Repeat::D_Repeat;
- int unlucky_number() override { PYBIND11_OVERLOAD(int, D_Repeat, unlucky_number, ); }
- std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, D_Repeat, say_something, times); }
- double lucky_number() override { PYBIND11_OVERLOAD(double, D_Repeat, lucky_number, ); }
+ int unlucky_number() override { PYBIND11_OVERRIDE(int, D_Repeat, unlucky_number, ); }
+ std::string say_something(unsigned times) override { PYBIND11_OVERRIDE(std::string, D_Repeat, say_something, times); }
+ double lucky_number() override { PYBIND11_OVERRIDE(double, D_Repeat, lucky_number, ); }
};
// Inheritance approach 2: templated trampoline classes.
class PyA_Tpl : public Base {
public:
using Base::Base; // Inherit constructors
- int unlucky_number() override { PYBIND11_OVERLOAD_PURE(int, Base, unlucky_number, ); }
- std::string say_something(unsigned times) override { PYBIND11_OVERLOAD(std::string, Base, say_something, times); }
+ int unlucky_number() override { PYBIND11_OVERRIDE_PURE(int, Base, unlucky_number, ); }
+ std::string say_something(unsigned times) override { PYBIND11_OVERRIDE(std::string, Base, say_something, times); }
};
template <class Base = B_Tpl>
class PyB_Tpl : public PyA_Tpl<Base> {
public:
using PyA_Tpl<Base>::PyA_Tpl; // Inherit constructors (via PyA_Tpl's inherited constructors)
- int unlucky_number() override { PYBIND11_OVERLOAD(int, Base, unlucky_number, ); }
- double lucky_number() override { PYBIND11_OVERLOAD(double, Base, lucky_number, ); }
+ int unlucky_number() override { PYBIND11_OVERRIDE(int, Base, unlucky_number, ); }
+ double lucky_number() override { PYBIND11_OVERRIDE(double, Base, lucky_number, ); }
};
// Since C_Tpl and D_Tpl don't declare any new virtual methods, we don't actually need these (we can
// use PyB_Tpl<C_Tpl> and PyB_Tpl<D_Tpl> for the trampoline classes instead):
};
*/
-void initialize_inherited_virtuals(py::module &m) {
+void initialize_inherited_virtuals(py::module_ &m) {
// test_inherited_virtuals
// Method 1: repeat
+# -*- coding: utf-8 -*-
import pytest
-from pybind11_tests import virtual_functions as m
-from pybind11_tests import ConstructorStats
+import env # noqa: F401
+
+m = pytest.importorskip("pybind11_tests.virtual_functions")
+from pybind11_tests import ConstructorStats # noqa: E402
def test_override(capture, msg):
self.data = "Hello world"
def run(self, value):
- print('ExtendedExampleVirt::run(%i), calling parent..' % value)
+ print("ExtendedExampleVirt::run(%i), calling parent.." % value)
return super(ExtendedExampleVirt, self).run(value + 1)
def run_bool(self):
- print('ExtendedExampleVirt::run_bool()')
+ print("ExtendedExampleVirt::run_bool()")
return False
def get_string1(self):
return "override1"
def pure_virtual(self):
- print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
+ print("ExtendedExampleVirt::pure_virtual(): %s" % self.data)
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
ex12 = m.ExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12, 20) == 30
- assert capture == """
+ assert (
+ capture
+ == """
Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
""" # noqa: E501 line too long
+ )
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
- assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
+ assert (
+ msg(excinfo.value)
+ == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
+ )
ex12p = ExtendedExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12p, 20) == 32
- assert capture == """
+ assert (
+ capture
+ == """
ExtendedExampleVirt::run(20), calling parent..
Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
""" # noqa: E501 line too long
+ )
with capture:
assert m.runExampleVirtBool(ex12p) is False
assert capture == "ExtendedExampleVirt::run_bool()"
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert m.runExampleVirt(ex12p2, 50) == 68
- assert capture == """
+ assert (
+ capture
+ == """
ExtendedExampleVirt::run(50), calling parent..
Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
""" # noqa: E501 line too long
+ )
cstats = ConstructorStats.get(m.ExampleVirt)
assert cstats.alive() == 3
del ex12, ex12p, ex12p2
assert cstats.alive() == 0
- assert cstats.values() == ['10', '11', '17']
+ assert cstats.values() == ["10", "11", "17"]
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 0
If we just create and use an A instance directly, the trampoline initialization is
bypassed and we only initialize an A() instead (for performance reasons).
"""
+
class B(m.A):
def __init__(self):
super(B, self).__init__()
m.call_f(b)
del b
pytest.gc_collect()
- assert capture == """
+ assert (
+ capture
+ == """
PyA.PyA()
PyA.f()
In python f()
PyA.~PyA()
"""
+ )
def test_alias_delay_initialization2(capture):
performance penalty, it also allows us to do more things with the trampoline
class such as defining local variables and performing construction/destruction.
"""
+
class B2(m.A2):
def __init__(self):
super(B2, self).__init__()
m.call_f(a3)
del a3
pytest.gc_collect()
- assert capture == """
+ assert (
+ capture
+ == """
PyA2.PyA2()
PyA2.f()
A2.f()
A2.f()
PyA2.~PyA2()
"""
+ )
# Python subclass version
with capture:
m.call_f(b2)
del b2
pytest.gc_collect()
- assert capture == """
+ assert (
+ capture
+ == """
PyA2.PyA2()
PyA2.f()
In python B2.f()
PyA2.~PyA2()
"""
+ )
# PyPy: Reference count > 1 causes call with noncopyable instance
# to fail in ncv1.print_nc()
-@pytest.unsupported_on_pypy
-@pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
+@pytest.mark.xfail("env.PYPY")
+@pytest.mark.skipif(
+ not hasattr(m, "NCVirt"), reason="NCVirt does not work on Intel/PGI/NVCC compilers"
+)
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
del ncv1, ncv2
assert nc_stats.alive() == 0
assert mv_stats.alive() == 0
- assert nc_stats.values() == ['4', '9', '9', '9']
- assert mv_stats.values() == ['4', '5', '7', '7']
+ assert nc_stats.values() == ["4", "9", "9", "9"]
+ assert mv_stats.values() == ["4", "5", "7", "7"]
assert nc_stats.copy_constructions == 0
assert mv_stats.copy_constructions == 1
assert nc_stats.move_constructions >= 0
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
+
class PyClass1(m.DispatchIssue):
def dispatch(self):
return "Yay.."
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
- assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
+ assert (
+ msg(excinfo.value)
+ == 'Tried to call pure virtual function "Base::dispatch"'
+ )
- p = PyClass1()
- return m.dispatch_issue_go(p)
+ return m.dispatch_issue_go(PyClass1())
b = PyClass2()
assert m.dispatch_issue_go(b) == "Yay.."
class DT(m.D_Tpl):
def say_something(self, times):
- return "DT says:" + (' quack' * times)
+ return "DT says:" + (" quack" * times)
def unlucky_number(self):
return 1234
class DT2(DT):
def say_something(self, times):
- return "DT2: " + ('QUACK' * times)
+ return "DT2: " + ("QUACK" * times)
def unlucky_number(self):
return -3
--- /dev/null
+# Valgrind suppression file for NumPy & SciPy errors and leaks in pybind11 tests
+
+{
+ Leaks when importing NumPy
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyObject_Malloc
+ fun:_PyObject_GC_Alloc
+ fun:_PyObject_GC_Malloc
+ fun:_PyObject_GC_NewVar
+ fun:tuple_alloc
+ fun:PyTuple_Pack
+ ...
+ fun:__pyx_pymod_exec_*
+}
+
+{
+ Leaks when importing NumPy (bis)
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyObject_Malloc
+ fun:_PyObject_New
+ fun:PyCode_NewWithPosOnlyArgs
+ fun:PyCode_New
+ ...
+ fun:__pyx_pymod_exec_*
+}
+
+{
+ Leaks when importing NumPy (tris)
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyObject_Malloc
+ fun:_PyObject_GC_Alloc
+ fun:_PyObject_GC_Malloc
+ fun:_PyObject_GC_NewVar
+ fun:tuple_alloc
+ fun:_PyTuple_FromArray
+ fun:_PyObject_MakeTpCall
+ fun:_PyObject_VectorcallTstate
+ fun:PyObject_Vectorcall
+ fun:call_function
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalFrame
+ fun:function_code_fastcall
+ fun:_PyFunction_Vectorcall
+}
+
+{
+ Leaks when importing NumPy (quater)
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyObject_Malloc
+ fun:_PyObject_GC_Alloc
+ fun:_PyObject_GC_Malloc
+ fun:_PyObject_GC_NewVar
+ fun:tuple_alloc
+ fun:PyTuple_New
+ fun:r_object
+ fun:r_object
+ fun:r_object
+ fun:r_object
+}
+
+{
+ Leaks when importing NumPy (quinquies)
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyObject_Malloc
+ fun:_PyObject_GC_Alloc
+ fun:_PyObject_GC_Malloc
+ fun:_PyObject_GC_NewVar
+ fun:tuple_alloc
+ fun:PyTuple_New
+ fun:dictiter_iternextitem
+ fun:list_extend
+ fun:_PyList_Extend
+ fun:PySequence_List
+}
+
+{
+ Leak when importing scipy.fft
+ Memcheck:Leak
+ fun:_Znwm
+ fun:PyInit_pypocketfft
+ fun:_PyImport_LoadDynamicModuleWithSpec
+ fun:_imp_create_dynamic_impl.constprop.3
+ fun:_imp_create_dynamic
+ fun:cfunction_vectorcall_FASTCALL
+ fun:PyVectorcall_Call
+ fun:_PyObject_Call
+ fun:PyObject_Call
+ fun:do_call_core
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalFrame
+ fun:_PyEval_EvalCode
+}
+
+{
+ NumPy leaks when spawning a subprocess
+ Memcheck:Leak
+ fun:malloc
+ ...
+ fun:_buffer_get_info
+ fun:array_getbuffer
+ fun:PyObject_GetBuffer
+ fun:__Pyx__GetBufferAndValidate*
+ fun:__pyx_f_5numpy_6random_13bit_generator_12SeedSequence_mix_entropy
+ fun:__pyx_pw_5numpy_6random_13bit_generator_12SeedSequence_1__init__
+ fun:type_call
+ fun:__Pyx__PyObject_CallOneArg
+ fun:__pyx_pw_5numpy_6random_13bit_generator_12BitGenerator_1__init__
+}
--- /dev/null
+# Valgrind suppression file for CPython errors and leaks in pybind11 tests
+
+# Taken verbatim from https://github.com/python/cpython/blob/3.9/Misc/valgrind-python.supp#L266-L272
+{
+ Uninitialised byte(s) false alarm, see bpo-35561
+ Memcheck:Param
+ epoll_ctl(event)
+ fun:epoll_ctl
+ fun:pyepoll_internal_ctl
+}
+
+{
+ Python leaks when spawning a subprocess
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyMem_RawMalloc
+ fun:PyThread_allocate_lock
+ fun:_PyEval_InitState
+ fun:PyInterpreterState_New
+ ...
+ fun:pyinit_core*
+ fun:Py_InitializeFromConfig
+ fun:pymain_init
+ fun:pymain_main
+}
+
+{
+ Python leaks when spawning a subprocess
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:_PyMem_DebugRawAlloc
+ fun:_PyMem_DebugRawMalloc
+ fun:PyMem_RawMalloc
+ fun:PyThread_allocate_lock
+ fun:_PyRuntimeState_Init_impl
+ fun:_PyRuntimeState_Init
+ fun:_PyRuntime_Initialize
+ fun:pymain_init
+ fun:pymain_main
+ fun:Py_BytesMain
+}
+
+{
+ Python leaks when spawning a subprocess
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyMem_RawMalloc
+ fun:PyThread_allocate_lock
+ fun:_PyImport_AcquireLock
+ fun:_imp_acquire_lock_impl*
+ fun:_imp_acquire_lock
+ fun:cfunction_vectorcall_NOARGS
+ fun:_PyObject_VectorcallTstate
+ fun:PyObject_Vectorcall
+ fun:call_function
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalFrame
+ fun:function_code_fastcall
+}
+
+{
+ Python leaks when spawning a subprocess
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyMem_RawMalloc
+ fun:PyThread_allocate_lock
+ fun:newlockobject
+ ...
+ fun:cfunction_vectorcall_NOARGS
+ fun:_PyObject_VectorcallTstate
+ fun:PyObject_Vectorcall
+ fun:call_function
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalFrame
+ fun:function_code_fastcall
+ fun:_PyFunction_Vectorcall
+}
+
+{
+ Python leaks when spawning a subprocess
+ Memcheck:Leak
+ fun:malloc
+ fun:_PyMem_RawMalloc
+ fun:PyMem_RawMalloc
+ fun:PyThread_allocate_lock
+ fun:rlock_new
+ fun:type_call
+ fun:_PyObject_Call
+ fun:PyObject_Call
+ fun:do_call_core
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalFrame
+ fun:_PyEval_EvalCode
+ fun:_PyFunction_Vectorcall
+}
+
+# Not really CPython-specific, see link
+{
+ dlopen leak (https://stackoverflow.com/questions/1542457/memory-leak-reported-by-valgrind-in-dlopen)
+ Memcheck:Leak
+ fun:malloc
+ ...
+ fun:dl_open_worker
+ fun:_dl_catch_exception
+ fun:_dl_open
+ fun:dlopen_doit
+ fun:_dl_catch_exception
+ fun:_dl_catch_error
+ fun:_dlerror_run
+ fun:dlopen@@GLIBC_2.2.5
+ fun:_PyImport_FindSharedFuncptr
+ fun:_PyImport_LoadDynamicModuleWithSpec
+}
+
+# Not really CPython-specific, see link
+{
+ dlopen leak (https://stackoverflow.com/questions/1542457/memory-leak-reported-by-valgrind-in-dlopen)
+ Memcheck:Leak
+ fun:malloc
+ ...
+ fun:dl_open_worker
+ fun:_dl_catch_exception
+ fun:_dl_open
+ fun:dlopen_doit
+ fun:_dl_catch_exception
+ fun:_dl_catch_error
+ fun:_dlerror_run
+ fun:dlopen@@GLIBC_2.2.5
+ fun:_PyImport_FindSharedFuncptr
+ fun:_PyImport_LoadDynamicModuleWithSpec
+}
# Extract the version number from catch.hpp
function(_get_catch_version)
- file(STRINGS "${CATCH_INCLUDE_DIR}/catch.hpp" version_line REGEX "Catch v.*" LIMIT_COUNT 1)
+ file(
+ STRINGS "${CATCH_INCLUDE_DIR}/catch.hpp" version_line
+ REGEX "Catch v.*"
+ LIMIT_COUNT 1)
if(version_line MATCHES "Catch v([0-9]+)\\.([0-9]+)\\.([0-9]+)")
- set(CATCH_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}" PARENT_SCOPE)
+ set(CATCH_VERSION
+ "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}"
+ PARENT_SCOPE)
endif()
endfunction()
if(error)
message(FATAL_ERROR "Could not download ${url}")
endif()
- set(CATCH_INCLUDE_DIR "${destination_dir}" CACHE INTERNAL "")
+ set(CATCH_INCLUDE_DIR
+ "${destination_dir}"
+ CACHE INTERNAL "")
endfunction()
# Look for catch locally
-find_path(CATCH_INCLUDE_DIR NAMES catch.hpp PATH_SUFFIXES catch)
+find_path(
+ CATCH_INCLUDE_DIR
+ NAMES catch.hpp
+ PATH_SUFFIXES catch2)
if(CATCH_INCLUDE_DIR)
_get_catch_version()
endif()
endif()
endif()
+add_library(Catch2::Catch2 IMPORTED INTERFACE)
+set_property(TARGET Catch2::Catch2 PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${CATCH_INCLUDE_DIR}")
+
set(CATCH_FOUND TRUE)
set(Eigen3_FIND_VERSION_PATCH 0)
endif(NOT Eigen3_FIND_VERSION_PATCH)
- set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}")
+ set(Eigen3_FIND_VERSION
+ "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}")
endif(NOT Eigen3_FIND_VERSION)
macro(_eigen3_check_version)
file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header)
- string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}")
+ string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match
+ "${_eigen3_version_header}")
set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}")
- string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}")
+ string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match
+ "${_eigen3_version_header}")
set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}")
- string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}")
+ string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match
+ "${_eigen3_version_header}")
set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}")
set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})
endif(NOT EIGEN3_VERSION_OK)
endmacro(_eigen3_check_version)
-if (EIGEN3_INCLUDE_DIR)
+if(EIGEN3_INCLUDE_DIR)
# in cache already
_eigen3_check_version()
set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})
-else (EIGEN3_INCLUDE_DIR)
+else(EIGEN3_INCLUDE_DIR)
+ if(NOT DEFINED KDE4_INCLUDE_DIR)
+ set(KDE4_INCLUDE_DIR "")
+ endif()
- find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library
- PATHS
- ${CMAKE_INSTALL_PREFIX}/include
- ${KDE4_INCLUDE_DIR}
- PATH_SUFFIXES eigen3 eigen
- )
+ find_path(
+ EIGEN3_INCLUDE_DIR
+ NAMES signature_of_eigen3_matrix_library
+ PATHS ${CMAKE_INSTALL_PREFIX}/include ${KDE4_INCLUDE_DIR}
+ PATH_SUFFIXES eigen3 eigen)
if(EIGEN3_INCLUDE_DIR)
_eigen3_check_version()
mark_as_advanced(EIGEN3_INCLUDE_DIR)
endif(EIGEN3_INCLUDE_DIR)
-
# Checking for the extension makes sure that `LibsNew` was found and not just `Libs`.
if(PYTHONLIBS_FOUND AND PYTHON_MODULE_EXTENSION)
- return()
+ return()
endif()
-# Use the Python interpreter to find the libs.
-if(PythonLibsNew_FIND_REQUIRED)
- find_package(PythonInterp ${PythonLibsNew_FIND_VERSION} REQUIRED)
+if(PythonLibsNew_FIND_QUIETLY)
+ set(_pythonlibs_quiet QUIET)
else()
- find_package(PythonInterp ${PythonLibsNew_FIND_VERSION})
+ set(_pythonlibs_quiet "")
endif()
+if(PythonLibsNew_FIND_REQUIRED)
+ set(_pythonlibs_required REQUIRED)
+endif()
+
+# Check to see if the `python` command is present and from a virtual
+# environment, conda, or GHA activation - if it is, try to use that.
+
+if(NOT DEFINED PYTHON_EXECUTABLE)
+ if(DEFINED ENV{VIRTUAL_ENV})
+ find_program(
+ PYTHON_EXECUTABLE python
+ PATHS "$ENV{VIRTUAL_ENV}" "$ENV{VIRTUAL_ENV}/bin"
+ NO_DEFAULT_PATH)
+ elseif(DEFINED ENV{CONDA_PREFIX})
+ find_program(
+ PYTHON_EXECUTABLE python
+ PATHS "$ENV{CONDA_PREFIX}" "$ENV{CONDA_PREFIX}/bin"
+ NO_DEFAULT_PATH)
+ elseif(DEFINED ENV{pythonLocation})
+ find_program(
+ PYTHON_EXECUTABLE python
+ PATHS "$ENV{pythonLocation}" "$ENV{pythonLocation}/bin"
+ NO_DEFAULT_PATH)
+ endif()
+ if(NOT PYTHON_EXECUTABLE)
+ unset(PYTHON_EXECUTABLE)
+ endif()
+endif()
+
+# Use the Python interpreter to find the libs.
+if(NOT PythonLibsNew_FIND_VERSION)
+ set(PythonLibsNew_FIND_VERSION "")
+endif()
+
+find_package(PythonInterp ${PythonLibsNew_FIND_VERSION} ${_pythonlibs_required}
+ ${_pythonlibs_quiet})
+
if(NOT PYTHONINTERP_FOUND)
- set(PYTHONLIBS_FOUND FALSE)
- set(PythonLibsNew_FOUND FALSE)
- return()
+ set(PYTHONLIBS_FOUND FALSE)
+ set(PythonLibsNew_FOUND FALSE)
+ return()
endif()
-# According to http://stackoverflow.com/questions/646518/python-how-to-detect-debug-interpreter
+# According to https://stackoverflow.com/questions/646518/python-how-to-detect-debug-interpreter
# testing whether sys has the gettotalrefcount function is a reliable, cross-platform
# way to detect a CPython debug interpreter.
#
# The library suffix is from the config var LDVERSION sometimes, otherwise
# VERSION. VERSION will typically be like "2.7" on unix, and "27" on windows.
-execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
- "from distutils import sysconfig as s;import sys;import struct;
+execute_process(
+ COMMAND
+ "${PYTHON_EXECUTABLE}" "-c" "from distutils import sysconfig as s;import sys;import struct;
print('.'.join(str(v) for v in sys.version_info));
print(sys.prefix);
print(s.get_python_inc(plat_specific=True));
print(s.get_python_lib(plat_specific=True));
-print(s.get_config_var('SO'));
+print(s.get_config_var('EXT_SUFFIX') or s.get_config_var('SO'));
print(hasattr(sys, 'gettotalrefcount')+0);
print(struct.calcsize('@P'));
print(s.get_config_var('LDVERSION') or s.get_config_var('VERSION'));
print(s.get_config_var('LIBDIR') or '');
print(s.get_config_var('MULTIARCH') or '');
"
- RESULT_VARIABLE _PYTHON_SUCCESS
- OUTPUT_VARIABLE _PYTHON_VALUES
- ERROR_VARIABLE _PYTHON_ERROR_VALUE)
+ RESULT_VARIABLE _PYTHON_SUCCESS
+ OUTPUT_VARIABLE _PYTHON_VALUES
+ ERROR_VARIABLE _PYTHON_ERROR_VALUE)
if(NOT _PYTHON_SUCCESS MATCHES 0)
- if(PythonLibsNew_FIND_REQUIRED)
- message(FATAL_ERROR
- "Python config failure:\n${_PYTHON_ERROR_VALUE}")
- endif()
- set(PYTHONLIBS_FOUND FALSE)
- set(PythonLibsNew_FOUND FALSE)
- return()
+ if(PythonLibsNew_FIND_REQUIRED)
+ message(FATAL_ERROR "Python config failure:\n${_PYTHON_ERROR_VALUE}")
+ endif()
+ set(PYTHONLIBS_FOUND FALSE)
+ set(PythonLibsNew_FOUND FALSE)
+ return()
endif()
# Convert the process output into a list
if(WIN32)
- string(REGEX REPLACE "\\\\" "/" _PYTHON_VALUES ${_PYTHON_VALUES})
+ string(REGEX REPLACE "\\\\" "/" _PYTHON_VALUES ${_PYTHON_VALUES})
endif()
string(REGEX REPLACE ";" "\\\\;" _PYTHON_VALUES ${_PYTHON_VALUES})
string(REGEX REPLACE "\n" ";" _PYTHON_VALUES ${_PYTHON_VALUES})
# Make sure the Python has the same pointer-size as the chosen compiler
# Skip if CMAKE_SIZEOF_VOID_P is not defined
if(CMAKE_SIZEOF_VOID_P AND (NOT "${PYTHON_SIZEOF_VOID_P}" STREQUAL "${CMAKE_SIZEOF_VOID_P}"))
- if(PythonLibsNew_FIND_REQUIRED)
- math(EXPR _PYTHON_BITS "${PYTHON_SIZEOF_VOID_P} * 8")
- math(EXPR _CMAKE_BITS "${CMAKE_SIZEOF_VOID_P} * 8")
- message(FATAL_ERROR
- "Python config failure: Python is ${_PYTHON_BITS}-bit, "
- "chosen compiler is ${_CMAKE_BITS}-bit")
- endif()
- set(PYTHONLIBS_FOUND FALSE)
- set(PythonLibsNew_FOUND FALSE)
- return()
+ if(PythonLibsNew_FIND_REQUIRED)
+ math(EXPR _PYTHON_BITS "${PYTHON_SIZEOF_VOID_P} * 8")
+ math(EXPR _CMAKE_BITS "${CMAKE_SIZEOF_VOID_P} * 8")
+ message(FATAL_ERROR "Python config failure: Python is ${_PYTHON_BITS}-bit, "
+ "chosen compiler is ${_CMAKE_BITS}-bit")
+ endif()
+ set(PYTHONLIBS_FOUND FALSE)
+ set(PythonLibsNew_FOUND FALSE)
+ return()
endif()
# The built-in FindPython didn't always give the version numbers
list(GET _PYTHON_VERSION_LIST 0 PYTHON_VERSION_MAJOR)
list(GET _PYTHON_VERSION_LIST 1 PYTHON_VERSION_MINOR)
list(GET _PYTHON_VERSION_LIST 2 PYTHON_VERSION_PATCH)
+set(PYTHON_VERSION "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}.${PYTHON_VERSION_PATCH}")
# Make sure all directory separators are '/'
-string(REGEX REPLACE "\\\\" "/" PYTHON_PREFIX ${PYTHON_PREFIX})
-string(REGEX REPLACE "\\\\" "/" PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE_DIR})
-string(REGEX REPLACE "\\\\" "/" PYTHON_SITE_PACKAGES ${PYTHON_SITE_PACKAGES})
-
-if(CMAKE_HOST_WIN32 AND NOT (MSYS OR MINGW))
- set(PYTHON_LIBRARY
- "${PYTHON_PREFIX}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib")
-
- # when run in a venv, PYTHON_PREFIX points to it. But the libraries remain in the
- # original python installation. They may be found relative to PYTHON_INCLUDE_DIR.
- if(NOT EXISTS "${PYTHON_LIBRARY}")
- get_filename_component(_PYTHON_ROOT ${PYTHON_INCLUDE_DIR} DIRECTORY)
- set(PYTHON_LIBRARY
- "${_PYTHON_ROOT}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib")
- endif()
+string(REGEX REPLACE "\\\\" "/" PYTHON_PREFIX "${PYTHON_PREFIX}")
+string(REGEX REPLACE "\\\\" "/" PYTHON_INCLUDE_DIR "${PYTHON_INCLUDE_DIR}")
+string(REGEX REPLACE "\\\\" "/" PYTHON_SITE_PACKAGES "${PYTHON_SITE_PACKAGES}")
- # raise an error if the python libs are still not found.
- if(NOT EXISTS "${PYTHON_LIBRARY}")
- message(FATAL_ERROR "Python libraries not found")
- endif()
+if(CMAKE_HOST_WIN32)
+ set(PYTHON_LIBRARY "${PYTHON_PREFIX}/libs/python${PYTHON_LIBRARY_SUFFIX}.lib")
-else()
+ # when run in a venv, PYTHON_PREFIX points to it. But the libraries remain in the
+ # original python installation. They may be found relative to PYTHON_INCLUDE_DIR.
+ if(NOT EXISTS "${PYTHON_LIBRARY}")
+ get_filename_component(_PYTHON_ROOT ${PYTHON_INCLUDE_DIR} DIRECTORY)
+ set(PYTHON_LIBRARY "${_PYTHON_ROOT}/libs/python${PYTHON_LIBRARY_SUFFIX}.lib")
+ endif()
+
+ # if we are in MSYS & MINGW, and we didn't find windows python lib, look for system python lib
+ if(DEFINED ENV{MSYSTEM}
+ AND MINGW
+ AND NOT EXISTS "${PYTHON_LIBRARY}")
if(PYTHON_MULTIARCH)
- set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}" "${PYTHON_LIBDIR}")
+ set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}" "${PYTHON_LIBDIR}")
else()
- set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}")
- endif()
- #message(STATUS "Searching for Python libs in ${_PYTHON_LIBS_SEARCH}")
- # Probably this needs to be more involved. It would be nice if the config
- # information the python interpreter itself gave us were more complete.
- find_library(PYTHON_LIBRARY
- NAMES "python${PYTHON_LIBRARY_SUFFIX}"
- PATHS ${_PYTHON_LIBS_SEARCH}
- NO_DEFAULT_PATH)
-
- # If all else fails, just set the name/version and let the linker figure out the path.
- if(NOT PYTHON_LIBRARY)
- set(PYTHON_LIBRARY python${PYTHON_LIBRARY_SUFFIX})
+ set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}")
endif()
+ unset(PYTHON_LIBRARY)
+ find_library(
+ PYTHON_LIBRARY
+ NAMES "python${PYTHON_LIBRARY_SUFFIX}"
+ PATHS ${_PYTHON_LIBS_SEARCH}
+ NO_DEFAULT_PATH)
+ endif()
+
+ # raise an error if the python libs are still not found.
+ if(NOT EXISTS "${PYTHON_LIBRARY}")
+ message(FATAL_ERROR "Python libraries not found")
+ endif()
+
+else()
+ if(PYTHON_MULTIARCH)
+ set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}" "${PYTHON_LIBDIR}")
+ else()
+ set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}")
+ endif()
+ #message(STATUS "Searching for Python libs in ${_PYTHON_LIBS_SEARCH}")
+ # Probably this needs to be more involved. It would be nice if the config
+ # information the python interpreter itself gave us were more complete.
+ find_library(
+ PYTHON_LIBRARY
+ NAMES "python${PYTHON_LIBRARY_SUFFIX}"
+ PATHS ${_PYTHON_LIBS_SEARCH}
+ NO_DEFAULT_PATH)
+
+ # If all else fails, just set the name/version and let the linker figure out the path.
+ if(NOT PYTHON_LIBRARY)
+ set(PYTHON_LIBRARY python${PYTHON_LIBRARY_SUFFIX})
+ endif()
endif()
-MARK_AS_ADVANCED(
- PYTHON_LIBRARY
- PYTHON_INCLUDE_DIR
-)
+mark_as_advanced(PYTHON_LIBRARY PYTHON_INCLUDE_DIR)
# We use PYTHON_INCLUDE_DIR, PYTHON_LIBRARY and PYTHON_DEBUG_LIBRARY for the
# cache entries because they are meant to specify the location of a single
# library. We now set the variables listed by the documentation for this
# module.
-SET(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
-SET(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
-SET(PYTHON_DEBUG_LIBRARIES "${PYTHON_DEBUG_LIBRARY}")
+set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
+set(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
+if(NOT PYTHON_DEBUG_LIBRARY)
+ set(PYTHON_DEBUG_LIBRARY "")
+endif()
+set(PYTHON_DEBUG_LIBRARIES "${PYTHON_DEBUG_LIBRARY}")
-find_package_message(PYTHON
- "Found PythonLibs: ${PYTHON_LIBRARY}"
- "${PYTHON_EXECUTABLE}${PYTHON_VERSION}")
+find_package_message(PYTHON "Found PythonLibs: ${PYTHON_LIBRARY}"
+ "${PYTHON_EXECUTABLE}${PYTHON_VERSION_STRING}")
set(PYTHONLIBS_FOUND TRUE)
set(PythonLibsNew_FOUND TRUE)
+
+if(NOT PYTHON_MODULE_PREFIX)
+ set(PYTHON_MODULE_PREFIX "")
+endif()
#
# This script currently checks for
#
-# 1. use of tabs instead of spaces
-# 2. MSDOS-style CRLF endings
-# 3. trailing spaces
-# 4. missing space between keyword and parenthesis, e.g.: for(, if(, while(
-# 5. Missing space between right parenthesis and brace, e.g. 'for (...){'
-# 6. opening brace on its own line. It should always be on the same line as the
+# 1. missing space between keyword and parenthesis, e.g.: for(, if(, while(
+# 2. Missing space between right parenthesis and brace, e.g. 'for (...){'
+# 3. opening brace on its own line. It should always be on the same line as the
# if/while/for/do statement.
#
-# Invoke as: tools/check-style.sh
+# Invoke as: tools/check-style.sh <filenames>
#
check_style_errors=0
IFS=$'\n'
-found="$( GREP_COLORS='mt=41' GREP_COLOR='41' grep $'\t' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )"
-if [ -n "$found" ]; then
- # The mt=41 sets a red background for matched tabs:
- echo -e '\033[31;01mError: found tab characters in the following files:\033[0m'
- check_style_errors=1
- echo "$found" | sed -e 's/^/ /'
-fi
-
-
-found="$( grep -IUlr $'\r' include tests/*.{cpp,py,h} docs/*.rst --color=always )"
-if [ -n "$found" ]; then
- echo -e '\033[31;01mError: found CRLF characters in the following files:\033[0m'
- check_style_errors=1
- echo "$found" | sed -e 's/^/ /'
-fi
-
-found="$(GREP_COLORS='mt=41' GREP_COLOR='41' grep '[[:blank:]]\+$' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )"
-if [ -n "$found" ]; then
- # The mt=41 sets a red background for matched trailing spaces
- echo -e '\033[31;01mError: found trailing spaces in the following files:\033[0m'
- check_style_errors=1
- echo "$found" | sed -e 's/^/ /'
-fi
-found="$(grep '\<\(if\|for\|while\|catch\)(\|){' include tests/*.{cpp,h} -rn --color=always)"
+found="$(grep '\<\(if\|for\|while\|catch\)(\|){' $@ -rn --color=always)"
if [ -n "$found" ]; then
echo -e '\033[31;01mError: found the following coding style problems:\033[0m'
check_style_errors=1
last=""
}
{ last = /(if|for|while|catch|switch)\s*\(.*\)\s*$/ ? $0 : "" }
-' $(find include -type f) tests/*.{cpp,h} docs/*.rst)"
+' $(find include -type f) $@)"
if [ -n "$found" ]; then
check_style_errors=1
echo -e '\033[31;01mError: braces should occur on the same line as the if/while/.. statement. Found issues in the following files:\033[0m'
--- /dev/null
+# Source: https://gitlab.kitware.com/cmake/community/-/wikis/FAQ#can-i-do-make-uninstall-with-cmake
+
+if(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt")
+ message(FATAL_ERROR "Cannot find install manifest: @CMAKE_BINARY_DIR@/install_manifest.txt")
+endif()
+
+file(READ "@CMAKE_BINARY_DIR@/install_manifest.txt" files)
+string(REGEX REPLACE "\n" ";" files "${files}")
+foreach(file ${files})
+ message(STATUS "Uninstalling $ENV{DESTDIR}${file}")
+ if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ exec_program(
+ "@CMAKE_COMMAND@" ARGS
+ "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval)
+ if(NOT "${rm_retval}" STREQUAL 0)
+ message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}")
+ endif()
+ else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ message(STATUS "File $ENV{DESTDIR}${file} does not exist.")
+ endif()
+endforeach()
+# -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import sys
libsize = os.path.getsize(lib)
-print("------", os.path.basename(lib), "file size:", libsize, end='')
+print("------", os.path.basename(lib), "file size:", libsize, end="")
if os.path.exists(save):
with open(save) as sf:
else:
print()
-with open(save, 'w') as sf:
+with open(save, "w") as sf:
sf.write(str(libsize))
-
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import re
+
+import ghapi.all
+
+from rich import print
+from rich.syntax import Syntax
+
+
+ENTRY = re.compile(
+ r"""
+ Suggested \s changelog \s entry:
+ .*
+ ```rst
+ \s*
+ (.*?)
+ \s*
+ ```
+""",
+ re.DOTALL | re.VERBOSE,
+)
+
+print()
+
+
+api = ghapi.all.GhApi(owner="pybind", repo="pybind11")
+
+issues = api.issues.list_for_repo(labels="needs changelog", state="closed")
+missing = []
+
+for issue in issues:
+ changelog = ENTRY.findall(issue.body)
+ if changelog:
+ (msg,) = changelog
+ if not msg.startswith("* "):
+ msg = "* " + msg
+ if not msg.endswith("."):
+ msg += "."
+
+ msg += f"\n `#{issue.number} <{issue.html_url}>`_"
+
+ print(Syntax(msg, "rst", theme="ansi_light"))
+ print()
+
+ else:
+ missing.append(issue)
+
+if missing:
+ print()
+ print("[blue]" + "-" * 30)
+ print()
+
+ for issue in missing:
+ print(f"[red bold]Missing:[/red bold][red] {issue.title}")
+ print(f"[red] {issue.html_url}\n")
+
+ print("[bold]Template:\n")
+ msg = "## Suggested changelog entry:\n\n```rst\n\n```"
+ print(Syntax(msg, "md", theme="ansi_light"))
+
+print()
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Syntax: mkdoc.py [-I<path> ..] [.. a list of header files ..]
-#
-# Extract documentation from C++ header files to use it in Python bindings
-#
-
-import os
-import sys
-import platform
-import re
-import textwrap
-
-from clang import cindex
-from clang.cindex import CursorKind
-from collections import OrderedDict
-from glob import glob
-from threading import Thread, Semaphore
-from multiprocessing import cpu_count
-
-RECURSE_LIST = [
- CursorKind.TRANSLATION_UNIT,
- CursorKind.NAMESPACE,
- CursorKind.CLASS_DECL,
- CursorKind.STRUCT_DECL,
- CursorKind.ENUM_DECL,
- CursorKind.CLASS_TEMPLATE
-]
-
-PRINT_LIST = [
- CursorKind.CLASS_DECL,
- CursorKind.STRUCT_DECL,
- CursorKind.ENUM_DECL,
- CursorKind.ENUM_CONSTANT_DECL,
- CursorKind.CLASS_TEMPLATE,
- CursorKind.FUNCTION_DECL,
- CursorKind.FUNCTION_TEMPLATE,
- CursorKind.CONVERSION_FUNCTION,
- CursorKind.CXX_METHOD,
- CursorKind.CONSTRUCTOR,
- CursorKind.FIELD_DECL
-]
-
-PREFIX_BLACKLIST = [
- CursorKind.TRANSLATION_UNIT
-]
-
-CPP_OPERATORS = {
- '<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array',
- '+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=':
- 'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift',
- '>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>':
- 'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot',
- '&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/':
- 'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call'
-}
-
-CPP_OPERATORS = OrderedDict(
- sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0])))
-
-job_count = cpu_count()
-job_semaphore = Semaphore(job_count)
-
-
-class NoFilenamesError(ValueError):
- pass
-
-
-def d(s):
- return s if isinstance(s, str) else s.decode('utf8')
-
-
-def sanitize_name(name):
- name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name)
- for k, v in CPP_OPERATORS.items():
- name = name.replace('operator%s' % k, 'operator_%s' % v)
- name = re.sub('<.*>', '', name)
- name = ''.join([ch if ch.isalnum() else '_' for ch in name])
- name = re.sub('_$', '', re.sub('_+', '_', name))
- return '__doc_' + name
-
-
-def process_comment(comment):
- result = ''
-
- # Remove C++ comment syntax
- leading_spaces = float('inf')
- for s in comment.expandtabs(tabsize=4).splitlines():
- s = s.strip()
- if s.startswith('/*'):
- s = s[2:].lstrip('*')
- elif s.endswith('*/'):
- s = s[:-2].rstrip('*')
- elif s.startswith('///'):
- s = s[3:]
- if s.startswith('*'):
- s = s[1:]
- if len(s) > 0:
- leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
- result += s + '\n'
-
- if leading_spaces != float('inf'):
- result2 = ""
- for s in result.splitlines():
- result2 += s[leading_spaces:] + '\n'
- result = result2
-
- # Doxygen tags
- cpp_group = '([\w:]+)'
- param_group = '([\[\w:\]]+)'
-
- s = result
- s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s)
- s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s)
- s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s)
- s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s)
- s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s)
- s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s)
- s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group),
- r'\n\n$Parameter ``\2``:\n\n', s)
- s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group),
- r'\n\n$Template parameter ``\2``:\n\n', s)
-
- for in_, out_ in {
- 'return': 'Returns',
- 'author': 'Author',
- 'authors': 'Authors',
- 'copyright': 'Copyright',
- 'date': 'Date',
- 'remark': 'Remark',
- 'sa': 'See also',
- 'see': 'See also',
- 'extends': 'Extends',
- 'throw': 'Throws',
- 'throws': 'Throws'
- }.items():
- s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)
-
- s = re.sub(r'\\details\s*', r'\n\n', s)
- s = re.sub(r'\\brief\s*', r'', s)
- s = re.sub(r'\\short\s*', r'', s)
- s = re.sub(r'\\ref\s*', r'', s)
-
- s = re.sub(r'\\code\s?(.*?)\s?\\endcode',
- r"```\n\1\n```\n", s, flags=re.DOTALL)
-
- # HTML/TeX tags
- s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL)
- s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL)
- s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL)
- s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL)
- s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL)
- s = re.sub(r'<li>', r'\n\n* ', s)
- s = re.sub(r'</?ul>', r'', s)
- s = re.sub(r'</li>', r'\n\n', s)
-
- s = s.replace('``true``', '``True``')
- s = s.replace('``false``', '``False``')
-
- # Re-flow text
- wrapper = textwrap.TextWrapper()
- wrapper.expand_tabs = True
- wrapper.replace_whitespace = True
- wrapper.drop_whitespace = True
- wrapper.width = 70
- wrapper.initial_indent = wrapper.subsequent_indent = ''
-
- result = ''
- in_code_segment = False
- for x in re.split(r'(```)', s):
- if x == '```':
- if not in_code_segment:
- result += '```\n'
- else:
- result += '\n```\n\n'
- in_code_segment = not in_code_segment
- elif in_code_segment:
- result += x.strip()
- else:
- for y in re.split(r'(?: *\n *){2,}', x):
- wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
- if len(wrapped) > 0 and wrapped[0] == '$':
- result += wrapped[1:] + '\n'
- wrapper.initial_indent = \
- wrapper.subsequent_indent = ' ' * 4
- else:
- if len(wrapped) > 0:
- result += wrapped + '\n\n'
- wrapper.initial_indent = wrapper.subsequent_indent = ''
- return result.rstrip().lstrip('\n')
-
-
-def extract(filename, node, prefix, output):
- if not (node.location.file is None or
- os.path.samefile(d(node.location.file.name), filename)):
- return 0
- if node.kind in RECURSE_LIST:
- sub_prefix = prefix
- if node.kind not in PREFIX_BLACKLIST:
- if len(sub_prefix) > 0:
- sub_prefix += '_'
- sub_prefix += d(node.spelling)
- for i in node.get_children():
- extract(filename, i, sub_prefix, output)
- if node.kind in PRINT_LIST:
- comment = d(node.raw_comment) if node.raw_comment is not None else ''
- comment = process_comment(comment)
- sub_prefix = prefix
- if len(sub_prefix) > 0:
- sub_prefix += '_'
- if len(node.spelling) > 0:
- name = sanitize_name(sub_prefix + d(node.spelling))
- output.append((name, filename, comment))
-
-
-class ExtractionThread(Thread):
- def __init__(self, filename, parameters, output):
- Thread.__init__(self)
- self.filename = filename
- self.parameters = parameters
- self.output = output
- job_semaphore.acquire()
-
- def run(self):
- print('Processing "%s" ..' % self.filename, file=sys.stderr)
- try:
- index = cindex.Index(
- cindex.conf.lib.clang_createIndex(False, True))
- tu = index.parse(self.filename, self.parameters)
- extract(self.filename, tu.cursor, '', self.output)
- finally:
- job_semaphore.release()
-
-
-def read_args(args):
- parameters = []
- filenames = []
- if "-x" not in args:
- parameters.extend(['-x', 'c++'])
- if not any(it.startswith("-std=") for it in args):
- parameters.append('-std=c++11')
-
- if platform.system() == 'Darwin':
- dev_path = '/Applications/Xcode.app/Contents/Developer/'
- lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/'
- sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs'
- libclang = lib_dir + 'libclang.dylib'
-
- if os.path.exists(libclang):
- cindex.Config.set_library_path(os.path.dirname(libclang))
-
- if os.path.exists(sdk_dir):
- sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0])
- parameters.append('-isysroot')
- parameters.append(sysroot_dir)
- elif platform.system() == 'Linux':
- # clang doesn't find its own base includes by default on Linux,
- # but different distros install them in different paths.
- # Try to autodetect, preferring the highest numbered version.
- def clang_folder_version(d):
- return [int(ver) for ver in re.findall(r'(?<!lib)(?<!\d)\d+', d)]
- clang_include_dir = max((
- path
- for libdir in ['lib64', 'lib', 'lib32']
- for path in glob('/usr/%s/clang/*/include' % libdir)
- if os.path.isdir(path)
- ), default=None, key=clang_folder_version)
- if clang_include_dir:
- parameters.extend(['-isystem', clang_include_dir])
-
- for item in args:
- if item.startswith('-'):
- parameters.append(item)
- else:
- filenames.append(item)
-
- if len(filenames) == 0:
- raise NoFilenamesError("args parameter did not contain any filenames")
-
- return parameters, filenames
-
-
-def extract_all(args):
- parameters, filenames = read_args(args)
- output = []
- for filename in filenames:
- thr = ExtractionThread(filename, parameters, output)
- thr.start()
-
- print('Waiting for jobs to finish ..', file=sys.stderr)
- for i in range(job_count):
- job_semaphore.acquire()
-
- return output
-
-
-def write_header(comments, out_file=sys.stdout):
- print('''/*
- This file contains docstrings for the Python bindings.
- Do not edit! These were automatically extracted by mkdoc.py
- */
-
-#define __EXPAND(x) x
-#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
-#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
-#define __CAT1(a, b) a ## b
-#define __CAT2(a, b) __CAT1(a, b)
-#define __DOC1(n1) __doc_##n1
-#define __DOC2(n1, n2) __doc_##n1##_##n2
-#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
-#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
-#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
-#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
-#define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
-#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
-
-#if defined(__GNUG__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-variable"
-#endif
-''', file=out_file)
-
-
- name_ctr = 1
- name_prev = None
- for name, _, comment in list(sorted(comments, key=lambda x: (x[0], x[1]))):
- if name == name_prev:
- name_ctr += 1
- name = name + "_%i" % name_ctr
- else:
- name_prev = name
- name_ctr = 1
- print('\nstatic const char *%s =%sR"doc(%s)doc";' %
- (name, '\n' if '\n' in comment else ' ', comment), file=out_file)
-
- print('''
-#if defined(__GNUG__)
-#pragma GCC diagnostic pop
-#endif
-''', file=out_file)
-
-
-def mkdoc(args):
- args = list(args)
- out_path = None
- for idx, arg in enumerate(args):
- if arg.startswith("-o"):
- args.remove(arg)
- try:
- out_path = arg[2:] or args.pop(idx)
- except IndexError:
- print("-o flag requires an argument")
- exit(-1)
- break
-
- comments = extract_all(args)
-
- if out_path:
- try:
- with open(out_path, 'w') as out_file:
- write_header(comments, out_file)
- except:
- # In the event of an error, don't leave a partially-written
- # output file.
- try:
- os.unlink(out_path)
- except:
- pass
- raise
- else:
- write_header(comments)
-
-
-if __name__ == '__main__':
- try:
- mkdoc(sys.argv[1:])
- except NoFilenamesError:
- print('Syntax: %s [.. a list of header files ..]' % sys.argv[0])
- exit(-1)
--- /dev/null
+#[======================================================[.rst
+
+Adds the following targets::
+
+ pybind11::pybind11 - link to headers and pybind11
+ pybind11::module - Adds module links
+ pybind11::embed - Adds embed links
+ pybind11::lto - Link time optimizations (manual selection)
+ pybind11::thin_lto - Link time optimizations (manual selection)
+ pybind11::python_link_helper - Adds link to Python libraries
+ pybind11::python2_no_register - Avoid warning/error with Python 2 + C++14/7
+ pybind11::windows_extras - MSVC bigobj and mp for building multithreaded
+ pybind11::opt_size - avoid optimizations that increase code size
+
+Adds the following functions::
+
+ pybind11_strip(target) - strip target after building on linux/macOS
+ pybind11_find_import(module) - See if a module is installed.
+
+#]======================================================]
+
+# CMake 3.10 has an include_guard command, but we can't use that yet
+if(TARGET pybind11::lto)
+ return()
+endif()
+
+# If we are in subdirectory mode, all IMPORTED targets must be GLOBAL. If we
+# are in CONFIG mode, they should be "normal" targets instead.
+# In CMake 3.11+ you can promote a target to global after you create it,
+# which might be simpler than this check.
+get_property(
+ is_config
+ TARGET pybind11::headers
+ PROPERTY IMPORTED)
+if(NOT is_config)
+ set(optional_global GLOBAL)
+endif()
+
+# If not run in Python mode, we still would like this to at least
+# include pybind11's include directory:
+set(pybind11_INCLUDE_DIRS
+ "${pybind11_INCLUDE_DIR}"
+ CACHE INTERNAL "Include directory for pybind11 (Python not requested)")
+
+# --------------------- Shared targets ----------------------------
+
+# Build an interface library target:
+add_library(pybind11::pybind11 IMPORTED INTERFACE ${optional_global})
+set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::headers)
+
+# Build a module target:
+add_library(pybind11::module IMPORTED INTERFACE ${optional_global})
+set_property(
+ TARGET pybind11::module
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11)
+
+# Build an embed library target:
+add_library(pybind11::embed IMPORTED INTERFACE ${optional_global})
+set_property(
+ TARGET pybind11::embed
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11)
+
+# ----------------------- no register ----------------------
+
+# Workaround for Python 2.7 and C++17 (C++14 as a warning) incompatibility
+# This adds the flags -Wno-register and -Wno-deprecated-register if the compiler
+# is Clang 3.9+ or AppleClang and the compile language is CXX, or /wd5033 for MSVC (all languages,
+# since MSVC didn't recognize COMPILE_LANGUAGE until CMake 3.11+).
+
+add_library(pybind11::python2_no_register INTERFACE IMPORTED ${optional_global})
+set(clang_4plus
+ "$<AND:$<CXX_COMPILER_ID:Clang>,$<NOT:$<VERSION_LESS:$<CXX_COMPILER_VERSION>,3.9>>>")
+set(no_register "$<OR:${clang_4plus},$<CXX_COMPILER_ID:AppleClang>>")
+
+if(MSVC AND CMAKE_VERSION VERSION_LESS 3.11)
+ set(cxx_no_register "${no_register}")
+else()
+ set(cxx_no_register "$<AND:$<COMPILE_LANGUAGE:CXX>,${no_register}>")
+endif()
+
+set(msvc "$<CXX_COMPILER_ID:MSVC>")
+
+set_property(
+ TARGET pybind11::python2_no_register
+ PROPERTY INTERFACE_COMPILE_OPTIONS
+ "$<${cxx_no_register}:-Wno-register;-Wno-deprecated-register>" "$<${msvc}:/wd5033>")
+
+# --------------------------- link helper ---------------------------
+
+add_library(pybind11::python_link_helper IMPORTED INTERFACE ${optional_global})
+
+if(CMAKE_VERSION VERSION_LESS 3.13)
+ # In CMake 3.11+, you can set INTERFACE properties via the normal methods, and
+ # this would be simpler.
+ set_property(
+ TARGET pybind11::python_link_helper
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES "$<$<PLATFORM_ID:Darwin>:-undefined dynamic_lookup>")
+else()
+ # link_options was added in 3.13+
+ # This is safer, because you are ensured the deduplication pass in CMake will not consider
+ # these separate and remove one but not the other.
+ set_property(
+ TARGET pybind11::python_link_helper
+ APPEND
+ PROPERTY INTERFACE_LINK_OPTIONS "$<$<PLATFORM_ID:Darwin>:LINKER:-undefined,dynamic_lookup>")
+endif()
+
+# ------------------------ Windows extras -------------------------
+
+add_library(pybind11::windows_extras IMPORTED INTERFACE ${optional_global})
+
+if(MSVC)
+ # /MP enables multithreaded builds (relevant when there are many files), /bigobj is
+ # needed for bigger binding projects due to the limit to 64k addressable sections
+ set_property(
+ TARGET pybind11::windows_extras
+ APPEND
+ PROPERTY INTERFACE_COMPILE_OPTIONS /bigobj)
+
+ if(CMAKE_VERSION VERSION_LESS 3.11)
+ set_property(
+ TARGET pybind11::windows_extras
+ APPEND
+ PROPERTY INTERFACE_COMPILE_OPTIONS $<$<NOT:$<CONFIG:Debug>>:/MP>)
+ else()
+ # Only set these options for C++ files. This is important so that, for
+ # instance, projects that include other types of source files like CUDA
+ # .cu files don't get these options propagated to nvcc since that would
+ # cause the build to fail.
+ set_property(
+ TARGET pybind11::windows_extras
+ APPEND
+ PROPERTY INTERFACE_COMPILE_OPTIONS $<$<NOT:$<CONFIG:Debug>>:$<$<COMPILE_LANGUAGE:CXX>:/MP>>)
+ endif()
+endif()
+
+# ----------------------- Optimize binary size --------------------------
+
+add_library(pybind11::opt_size IMPORTED INTERFACE ${optional_global})
+
+if(MSVC)
+ set(PYBIND11_OPT_SIZE /Os)
+else()
+ set(PYBIND11_OPT_SIZE -Os)
+endif()
+
+set_property(
+ TARGET pybind11::opt_size
+ APPEND
+ PROPERTY INTERFACE_COMPILE_OPTIONS $<$<CONFIG:Release>:${PYBIND11_OPT_SIZE}>
+ $<$<CONFIG:MinSizeRel>:${PYBIND11_OPT_SIZE}>
+ $<$<CONFIG:RelWithDebInfo>:${PYBIND11_OPT_SIZE}>)
+
+# ----------------------- Legacy option --------------------------
+
+# Warn or error if old variable name used
+if(PYBIND11_CPP_STANDARD)
+ string(REGEX MATCH [[..$]] VAL "${PYBIND11_CPP_STANDARD}")
+ if(CMAKE_CXX_STANDARD)
+ if(NOT CMAKE_CXX_STANDARD STREQUAL VAL)
+ message(WARNING "CMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} does not match "
+ "PYBIND11_CPP_STANDARD=${PYBIND11_CPP_STANDARD}, "
+ "please remove PYBIND11_CPP_STANDARD from your cache")
+ endif()
+ else()
+ set(supported_standards 11 14 17 20)
+ if("${VAL}" IN_LIST supported_standards)
+ message(WARNING "USE -DCMAKE_CXX_STANDARD=${VAL} instead of PYBIND11_CPP_STANDARD")
+ set(CMAKE_CXX_STANDARD
+ ${VAL}
+ CACHE STRING "From PYBIND11_CPP_STANDARD")
+ else()
+ message(FATAL_ERROR "PYBIND11_CPP_STANDARD should be replaced with CMAKE_CXX_STANDARD "
+ "(last two chars: ${VAL} not understood as a valid CXX std)")
+ endif()
+ endif()
+endif()
+
+# --------------------- Python specifics -------------------------
+
+# Check to see which Python mode we are in, new, old, or no python
+if(PYBIND11_NOPYTHON)
+ set(_pybind11_nopython ON)
+elseif(
+ PYBIND11_FINDPYTHON
+ OR Python_FOUND
+ OR Python2_FOUND
+ OR Python3_FOUND)
+ # New mode
+ include("${CMAKE_CURRENT_LIST_DIR}/pybind11NewTools.cmake")
+
+else()
+
+ # Classic mode
+ include("${CMAKE_CURRENT_LIST_DIR}/pybind11Tools.cmake")
+
+endif()
+
+# --------------------- pybind11_find_import -------------------------------
+
+if(NOT _pybind11_nopython)
+ # Check to see if modules are importable. Use REQUIRED to force an error if
+ # one of the modules is not found. <package_name>_FOUND will be set if the
+ # package was found (underscores replace dashes if present). QUIET will hide
+ # the found message, and VERSION will require a minimum version. A successful
+ # find will cache the result.
+ function(pybind11_find_import PYPI_NAME)
+ # CMake variables need underscores (PyPI doesn't care)
+ string(REPLACE "-" "_" NORM_PYPI_NAME "${PYPI_NAME}")
+
+ # Return if found previously
+ if(${NORM_PYPI_NAME}_FOUND)
+ return()
+ endif()
+
+ set(options "REQUIRED;QUIET")
+ set(oneValueArgs "VERSION")
+ cmake_parse_arguments(ARG "${options}" "${oneValueArgs}" "" ${ARGN})
+
+ if(ARG_REQUIRED)
+ set(status_level FATAL_ERROR)
+ else()
+ set(status_level WARNING)
+ endif()
+
+ execute_process(
+ COMMAND
+ ${${_Python}_EXECUTABLE} -c
+ "from pkg_resources import get_distribution; print(get_distribution('${PYPI_NAME}').version)"
+ RESULT_VARIABLE RESULT_PRESENT
+ OUTPUT_VARIABLE PKG_VERSION
+ ERROR_QUIET)
+
+ string(STRIP "${PKG_VERSION}" PKG_VERSION)
+
+ # If a result is present, this failed
+ if(RESULT_PRESENT)
+ set(${NORM_PYPI_NAME}_FOUND
+ ${NORM_PYPI_NAME}-NOTFOUND
+ CACHE INTERNAL "")
+ # Always warn or error
+ message(
+ ${status_level}
+ "Missing: ${PYPI_NAME} ${ARG_VERSION}\nTry: ${${_Python}_EXECUTABLE} -m pip install ${PYPI_NAME}"
+ )
+ else()
+ if(ARG_VERSION AND PKG_VERSION VERSION_LESS ARG_VERSION)
+ message(
+ ${status_level}
+ "Version incorrect: ${PYPI_NAME} ${PKG_VERSION} found, ${ARG_VERSION} required - try upgrading"
+ )
+ else()
+ set(${NORM_PYPI_NAME}_FOUND
+ YES
+ CACHE INTERNAL "")
+ set(${NORM_PYPI_NAME}_VERSION
+ ${PKG_VERSION}
+ CACHE INTERNAL "")
+ endif()
+ if(NOT ARG_QUIET)
+ message(STATUS "Found ${PYPI_NAME} ${PKG_VERSION}")
+ endif()
+ endif()
+ if(NOT ARG_VERSION OR (NOT PKG_VERSION VERSION_LESS ARG_VERSION))
+ # We have successfully found a good version, cache to avoid calling again.
+ endif()
+ endfunction()
+endif()
+
+# --------------------- LTO -------------------------------
+
+include(CheckCXXCompilerFlag)
+
+# Checks whether the given CXX/linker flags can compile and link a cxx file.
+# cxxflags and linkerflags are lists of flags to use. The result variable is a
+# unique variable name for each set of flags: the compilation result will be
+# cached base on the result variable. If the flags work, sets them in
+# cxxflags_out/linkerflags_out internal cache variables (in addition to
+# ${result}).
+function(_pybind11_return_if_cxx_and_linker_flags_work result cxxflags linkerflags cxxflags_out
+ linkerflags_out)
+ set(CMAKE_REQUIRED_LIBRARIES ${linkerflags})
+ check_cxx_compiler_flag("${cxxflags}" ${result})
+ if(${result})
+ set(${cxxflags_out}
+ "${cxxflags}"
+ PARENT_SCOPE)
+ set(${linkerflags_out}
+ "${linkerflags}"
+ PARENT_SCOPE)
+ endif()
+endfunction()
+
+function(_pybind11_generate_lto target prefer_thin_lto)
+ if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
+ set(cxx_append "")
+ set(linker_append "")
+ if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT APPLE)
+ # Clang Gold plugin does not support -Os; append -O3 to MinSizeRel builds to override it
+ set(linker_append ";$<$<CONFIG:MinSizeRel>:-O3>")
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+ set(cxx_append ";-fno-fat-lto-objects")
+ endif()
+
+ if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND prefer_thin_lto)
+ _pybind11_return_if_cxx_and_linker_flags_work(
+ HAS_FLTO_THIN "-flto=thin${cxx_append}" "-flto=thin${linker_append}"
+ PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
+ endif()
+
+ if(NOT HAS_FLTO_THIN)
+ _pybind11_return_if_cxx_and_linker_flags_work(
+ HAS_FLTO "-flto${cxx_append}" "-flto${linker_append}" PYBIND11_LTO_CXX_FLAGS
+ PYBIND11_LTO_LINKER_FLAGS)
+ endif()
+ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+ # Intel equivalent to LTO is called IPO
+ _pybind11_return_if_cxx_and_linker_flags_work(HAS_INTEL_IPO "-ipo" "-ipo"
+ PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
+ elseif(MSVC)
+ # cmake only interprets libraries as linker flags when they start with a - (otherwise it
+ # converts /LTCG to \LTCG as if it was a Windows path). Luckily MSVC supports passing flags
+ # with - instead of /, even if it is a bit non-standard:
+ _pybind11_return_if_cxx_and_linker_flags_work(HAS_MSVC_GL_LTCG "/GL" "-LTCG"
+ PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
+ endif()
+
+ # Enable LTO flags if found, except for Debug builds
+ if(PYBIND11_LTO_CXX_FLAGS)
+ # CONFIG takes multiple values in CMake 3.19+, until then we have to use OR
+ set(is_debug "$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>")
+ set(not_debug "$<NOT:${is_debug}>")
+ set(cxx_lang "$<COMPILE_LANGUAGE:CXX>")
+ if(MSVC AND CMAKE_VERSION VERSION_LESS 3.11)
+ set(genex "${not_debug}")
+ else()
+ set(genex "$<AND:${not_debug},${cxx_lang}>")
+ endif()
+ set_property(
+ TARGET ${target}
+ APPEND
+ PROPERTY INTERFACE_COMPILE_OPTIONS "$<${genex}:${PYBIND11_LTO_CXX_FLAGS}>")
+ if(CMAKE_PROJECT_NAME STREQUAL "pybind11")
+ message(STATUS "${target} enabled")
+ endif()
+ else()
+ if(CMAKE_PROJECT_NAME STREQUAL "pybind11")
+ message(STATUS "${target} disabled (not supported by the compiler and/or linker)")
+ endif()
+ endif()
+
+ if(PYBIND11_LTO_LINKER_FLAGS)
+ if(CMAKE_VERSION VERSION_LESS 3.11)
+ set_property(
+ TARGET ${target}
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES "$<${not_debug}:${PYBIND11_LTO_LINKER_FLAGS}>")
+ else()
+ set_property(
+ TARGET ${target}
+ APPEND
+ PROPERTY INTERFACE_LINK_OPTIONS "$<${not_debug}:${PYBIND11_LTO_LINKER_FLAGS}>")
+ endif()
+ endif()
+endfunction()
+
+add_library(pybind11::lto IMPORTED INTERFACE ${optional_global})
+_pybind11_generate_lto(pybind11::lto FALSE)
+
+add_library(pybind11::thin_lto IMPORTED INTERFACE ${optional_global})
+_pybind11_generate_lto(pybind11::thin_lto TRUE)
+
+# ---------------------- pybind11_strip -----------------------------
+
+function(pybind11_strip target_name)
+ # Strip unnecessary sections of the binary on Linux/macOS
+ if(CMAKE_STRIP)
+ if(APPLE)
+ set(x_opt -x)
+ endif()
+
+ add_custom_command(
+ TARGET ${target_name}
+ POST_BUILD
+ COMMAND ${CMAKE_STRIP} ${x_opt} $<TARGET_FILE:${target_name}>)
+ endif()
+endfunction()
-# pybind11Config.cmake
-# --------------------
-#
-# PYBIND11 cmake module.
-# This module sets the following variables in your project::
-#
-# pybind11_FOUND - true if pybind11 and all required components found on the system
-# pybind11_VERSION - pybind11 version in format Major.Minor.Release
-# pybind11_INCLUDE_DIRS - Directories where pybind11 and python headers are located.
-# pybind11_INCLUDE_DIR - Directory where pybind11 headers are located.
-# pybind11_DEFINITIONS - Definitions necessary to use pybind11, namely USING_pybind11.
-# pybind11_LIBRARIES - compile flags and python libraries (as needed) to link against.
-# pybind11_LIBRARY - empty.
-# CMAKE_MODULE_PATH - appends location of accompanying FindPythonLibsNew.cmake and
-# pybind11Tools.cmake modules.
-#
-#
-# Available components: None
-#
-#
-# Exported targets::
-#
-# If pybind11 is found, this module defines the following :prop_tgt:`IMPORTED`
-# interface library targets::
-#
-# pybind11::module - for extension modules
-# pybind11::embed - for embedding the Python interpreter
-#
-# Python headers, libraries (as needed by platform), and the C++ standard
-# are attached to the target. Set PythonLibsNew variables to influence
-# python detection and PYBIND11_CPP_STANDARD (-std=c++11 or -std=c++14) to
-# influence standard setting. ::
-#
-# find_package(pybind11 CONFIG REQUIRED)
-# message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}")
-#
-# # Create an extension module
-# add_library(mylib MODULE main.cpp)
-# target_link_libraries(mylib pybind11::module)
-#
-# # Or embed the Python interpreter into an executable
-# add_executable(myexe main.cpp)
-# target_link_libraries(myexe pybind11::embed)
-#
-# Suggested usage::
-#
-# find_package with version info is not recommended except for release versions. ::
-#
-# find_package(pybind11 CONFIG)
-# find_package(pybind11 2.0 EXACT CONFIG REQUIRED)
-#
-#
-# The following variables can be set to guide the search for this package::
-#
-# pybind11_DIR - CMake variable, set to directory containing this Config file
-# CMAKE_PREFIX_PATH - CMake variable, set to root directory of this package
-# PATH - environment variable, set to bin directory of this package
-# CMAKE_DISABLE_FIND_PACKAGE_pybind11 - CMake variable, disables
-# find_package(pybind11) when not REQUIRED, perhaps to force internal build
+#[=============================================================================[.rst:
-@PACKAGE_INIT@
+pybind11Config.cmake
+####################
+
+Exported variables
+==================
+
+This module sets the following variables in your project:
+
+``pybind11_FOUND``
+ true if pybind11 and all required components found on the system
+``pybind11_VERSION``
+ pybind11 version in format Major.Minor.Release
+``pybind11_VERSION_TYPE``
+ pybind11 version type (dev, release)
+``pybind11_INCLUDE_DIRS``
+ Directories where pybind11 and python headers are located.
+``pybind11_INCLUDE_DIR``
+ Directory where pybind11 headers are located.
+``pybind11_DEFINITIONS``
+ Definitions necessary to use pybind11, namely USING_pybind11.
+``pybind11_LIBRARIES``
+ Compile flags and python libraries (as needed) to link against.
+``pybind11_LIBRARY``
+ Empty.
+
+Available components: None
+
+
+Exported targets
+================
+
+If pybind11 is found, this module defines the following ``IMPORTED``
+interface library targets:
+
+``pybind11::module``
+ for extension modules.
+``pybind11::embed``
+ for embedding the Python interpreter.
+
+Python headers, libraries (as needed by platform), and the C++ standard
+are attached to the target.
+
+Advanced targets are also supplied - these are primary for users building
+complex applications, and they are available in all modes:
+
+``pybind11::headers``
+ Just the pybind11 headers and minimum compile requirements.
+``pybind11::pybind11``
+ Python headers too.
+``pybind11::python_link_helper``
+ Just the "linking" part of ``pybind11:module``, for CMake < 3.15.
+``pybind11::python2_no_register``
+ Quiets the warning/error when mixing C++14+ and Python 2, also included in ``pybind11::module``.
+``pybind11::thin_lto``
+ An alternative to ``INTERPROCEDURAL_OPTIMIZATION``.
+``pybind11::lto``
+ An alternative to ``INTERPROCEDURAL_OPTIMIZATION`` (also avoids thin LTO on clang).
+``pybind11::windows_extras``
+ Adds bigobj and mp for MSVC.
+
+Modes
+=====
+
+There are two modes provided; classic, which is built on the old Python
+discovery packages in CMake, or the new FindPython mode, which uses FindPython
+from 3.12+ forward (3.15+ _highly_ recommended).
+
+New FindPython mode
+^^^^^^^^^^^^^^^^^^^
+
+To activate this mode, either call ``find_package(Python COMPONENTS Interpreter Development)``
+before finding this package, or set the ``PYBIND11_FINDPYTHON`` variable to ON. In this mode,
+you can either use the basic targets, or use the FindPython tools:
+
+.. code-block:: cmake
+
+ find_package(Python COMPONENTS Interpreter Development)
+ find_package(pybind11 CONFIG)
+
+ # pybind11 method:
+ pybind11_add_module(MyModule1 src1.cpp)
+
+ # Python method:
+ Python_add_library(MyModule2 src2.cpp)
+ target_link_libraries(MyModule2 pybind11::headers)
+ set_target_properties(MyModule2 PROPERTIES
+ INTERPROCEDURAL_OPTIMIZATION ON
+ CXX_VISIBILITY_PRESET ON
+ VISIBLITY_INLINES_HIDDEN ON)
+
+If you build targets yourself, you may be interested in stripping the output
+for reduced size; this is the one other feature that the helper function gives you.
+
+Classic mode
+^^^^^^^^^^^^
+
+Set PythonLibsNew variables to influence python detection and
+CMAKE_CXX_STANDARD to influence standard setting.
+
+.. code-block:: cmake
+
+ find_package(pybind11 CONFIG REQUIRED)
+
+ # Create an extension module
+ add_library(mylib MODULE main.cpp)
+ target_link_libraries(mylib PUBLIC pybind11::module)
+
+ # Or embed the Python interpreter into an executable
+ add_executable(myexe main.cpp)
+ target_link_libraries(myexe PUBLIC pybind11::embed)
-set(PN pybind11)
-# location of pybind11/pybind11.h
-set(${PN}_INCLUDE_DIR "${PACKAGE_PREFIX_DIR}/@CMAKE_INSTALL_INCLUDEDIR@")
+Hints
+=====
-set(${PN}_LIBRARY "")
-set(${PN}_DEFINITIONS USING_${PN})
+The following variables can be set to guide the search for this package:
-check_required_components(${PN})
+``pybind11_DIR``
+ CMake variable, set to directory containing this Config file.
+``CMAKE_PREFIX_PATH``
+ CMake variable, set to root directory of this package.
+``PATH``
+ Environment variable, set to bin directory of this package.
+``CMAKE_DISABLE_FIND_PACKAGE_pybind11``
+ CMake variable, disables ``find_package(pybind11)`` when not ``REQUIRED``,
+ perhaps to force internal build.
-# make detectable the FindPythonLibsNew.cmake module
-list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR})
+Commands
+========
-include(pybind11Tools)
+pybind11_add_module
+^^^^^^^^^^^^^^^^^^^
-if(NOT (CMAKE_VERSION VERSION_LESS 3.0))
-#-----------------------------------------------------------------------------
-# Don't include targets if this file is being picked up by another
-# project which has already built this as a subproject
-#-----------------------------------------------------------------------------
-if(NOT TARGET ${PN}::pybind11)
- include("${CMAKE_CURRENT_LIST_DIR}/${PN}Targets.cmake")
+This module defines the following commands to assist with creating Python modules:
- find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} MODULE REQUIRED)
- set_property(TARGET ${PN}::pybind11 APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${PYTHON_INCLUDE_DIRS})
- set_property(TARGET ${PN}::embed APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${PYTHON_LIBRARIES})
- if(WIN32 OR CYGWIN)
- set_property(TARGET ${PN}::module APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${PYTHON_LIBRARIES})
- endif()
+.. code-block:: cmake
- if(CMAKE_VERSION VERSION_LESS 3.3)
- set_property(TARGET ${PN}::pybind11 APPEND PROPERTY INTERFACE_COMPILE_OPTIONS "${PYBIND11_CPP_STANDARD}")
- else()
- set_property(TARGET ${PN}::pybind11 APPEND PROPERTY INTERFACE_COMPILE_OPTIONS $<$<COMPILE_LANGUAGE:CXX>:${PYBIND11_CPP_STANDARD}>)
- endif()
+ pybind11_add_module(<target>
+ [STATIC|SHARED|MODULE]
+ [THIN_LTO] [OPT_SIZE] [NO_EXTRAS] [WITHOUT_SOBAI]
+ <files>...
+ )
- get_property(_iid TARGET ${PN}::pybind11 PROPERTY INTERFACE_INCLUDE_DIRECTORIES)
- get_property(_ill TARGET ${PN}::module PROPERTY INTERFACE_LINK_LIBRARIES)
- set(${PN}_INCLUDE_DIRS ${_iid})
- set(${PN}_LIBRARIES ${_ico} ${_ill})
+Add a module and setup all helpers. You can select the type of the library; the
+default is ``MODULE``. There are several options:
+
+``OPT_SIZE``
+ Optimize for size, even if the ``CMAKE_BUILD_TYPE`` is not ``RelSize``.
+``THIN_LTO``
+ Use thin TLO instead of regular if there's a choice (pybind11's selection
+ is disabled if ``CMAKE_INTERPROCEDURAL_OPTIMIZATIONS`` is set).
+``WITHOUT_SOABI``
+ Disable the SOABI component (``PYBIND11_NEWPYTHON`` mode only).
+``NO_EXTRAS``
+ Disable all extras, exit immediately after making the module.
+
+pybind11_strip
+^^^^^^^^^^^^^^
+
+.. code-block:: cmake
+
+ pybind11_strip(<target>)
+
+Strip a target after building it (linux/macOS), called by ``pybind11_add_module``.
+
+pybind11_extension
+^^^^^^^^^^^^^^^^^^
+
+.. code-block:: cmake
+
+ pybind11_extension(<target>)
+
+Sets the Python extension name correctly for Python on your platform, called by
+``pybind11_add_module``.
+
+pybind11_find_import(module)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: cmake
+
+ pybind11_find_import(<module> [VERSION <number>] [REQUIRED] [QUIET])
+
+See if a module is installed. Use the registered name (the one on PyPI). You
+can specify a ``VERSION``, and you can specify ``REQUIRED`` or ``QUIET``. Only available if
+``NOPYTHON`` mode is not active. Sets ``module_VERSION`` and ``module_FOUND``. Caches the
+result once a valid install is found.
+
+Suggested usage
+===============
+
+Using ``find_package`` with version info is not recommended except for release versions.
+
+.. code-block:: cmake
+
+ find_package(pybind11 CONFIG)
+ find_package(pybind11 2.0 EXACT CONFIG REQUIRED)
+
+#]=============================================================================]
+@PACKAGE_INIT@
+
+# Location of pybind11/pybind11.h
+set(pybind11_INCLUDE_DIR "${PACKAGE_PREFIX_DIR}/@CMAKE_INSTALL_INCLUDEDIR@")
+
+set(pybind11_LIBRARY "")
+set(pybind11_DEFINITIONS USING_pybind11)
+set(pybind11_VERSION_TYPE "@pybind11_VERSION_TYPE@")
+
+check_required_components(pybind11)
+
+if(TARGET pybind11::python_link_helper)
+ # This has already been setup elsewhere, such as with a previous call or
+ # add_subdirectory
+ return()
endif()
+
+include("${CMAKE_CURRENT_LIST_DIR}/pybind11Targets.cmake")
+
+# Easier to use / remember
+add_library(pybind11::headers IMPORTED INTERFACE)
+set_target_properties(pybind11::headers PROPERTIES INTERFACE_LINK_LIBRARIES
+ pybind11::pybind11_headers)
+
+include("${CMAKE_CURRENT_LIST_DIR}/pybind11Common.cmake")
+
+if(NOT pybind11_FIND_QUIETLY)
+ message(
+ STATUS
+ "Found pybind11: ${pybind11_INCLUDE_DIR} (found version \"${pybind11_VERSION}\" ${pybind11_VERSION_TYPE})"
+ )
endif()
--- /dev/null
+# tools/pybind11NewTools.cmake -- Build system for the pybind11 modules
+#
+# Copyright (c) 2020 Wenzel Jakob <wenzel@inf.ethz.ch> and Henry Schreiner
+#
+# All rights reserved. Use of this source code is governed by a
+# BSD-style license that can be found in the LICENSE file.
+
+get_property(
+ is_config
+ TARGET pybind11::headers
+ PROPERTY IMPORTED)
+
+if(pybind11_FIND_QUIETLY)
+ set(_pybind11_quiet QUIET)
+else()
+ set(_pybind11_quiet "")
+endif()
+
+if(CMAKE_VERSION VERSION_LESS 3.12)
+ message(FATAL_ERROR "You cannot use the new FindPython module with CMake < 3.12")
+endif()
+
+if(NOT Python_FOUND
+ AND NOT Python3_FOUND
+ AND NOT Python2_FOUND)
+ if(NOT DEFINED Python_FIND_IMPLEMENTATIONS)
+ set(Python_FIND_IMPLEMENTATIONS CPython PyPy)
+ endif()
+
+ # GitHub Actions like activation
+ if(NOT DEFINED Python_ROOT_DIR AND DEFINED ENV{pythonLocation})
+ set(Python_ROOT_DIR "$ENV{pythonLocation}")
+ endif()
+
+ find_package(Python REQUIRED COMPONENTS Interpreter Development ${_pybind11_quiet})
+
+ # If we are in submodule mode, export the Python targets to global targets.
+ # If this behavior is not desired, FindPython _before_ pybind11.
+ if(NOT is_config)
+ set_property(TARGET Python::Python PROPERTY IMPORTED_GLOBAL TRUE)
+ set_property(TARGET Python::Interpreter PROPERTY IMPORTED_GLOBAL TRUE)
+ if(TARGET Python::Module)
+ set_property(TARGET Python::Module PROPERTY IMPORTED_GLOBAL TRUE)
+ endif()
+ endif()
+endif()
+
+if(Python_FOUND)
+ set(_Python
+ Python
+ CACHE INTERNAL "" FORCE)
+elseif(Python3_FOUND AND NOT Python2_FOUND)
+ set(_Python
+ Python3
+ CACHE INTERNAL "" FORCE)
+elseif(Python2_FOUND AND NOT Python3_FOUND)
+ set(_Python
+ Python2
+ CACHE INTERNAL "" FORCE)
+else()
+ message(AUTHOR_WARNING "Python2 and Python3 both present, pybind11 in "
+ "PYBIND11_NOPYTHON mode (manually activate to silence warning)")
+ set(_pybind11_nopython ON)
+ return()
+endif()
+
+if(PYBIND11_MASTER_PROJECT)
+ if(${_Python}_INTERPRETER_ID MATCHES "PyPy")
+ message(STATUS "PyPy ${${_Python}_PyPy_VERSION} (Py ${${_Python}_VERSION})")
+ else()
+ message(STATUS "${_Python} ${${_Python}_VERSION}")
+ endif()
+endif()
+
+# If a user finds Python, they may forget to include the Interpreter component
+# and the following two steps require it. It is highly recommended by CMake
+# when finding development libraries anyway, so we will require it.
+if(NOT DEFINED ${_Python}_EXECUTABLE)
+ message(
+ FATAL_ERROR
+ "${_Python} was found without the Interpreter component. Pybind11 requires this component.")
+
+endif()
+
+if(NOT DEFINED PYTHON_IS_DEBUG)
+ # Debug check - see https://stackoverflow.com/questions/646518/python-how-to-detect-debug-Interpreter
+ execute_process(
+ COMMAND "${${_Python}_EXECUTABLE}" "-c"
+ "import sys; sys.exit(hasattr(sys, 'gettotalrefcount'))"
+ RESULT_VARIABLE _PYTHON_IS_DEBUG)
+ set(PYTHON_IS_DEBUG
+ "${_PYTHON_IS_DEBUG}"
+ CACHE INTERNAL "Python debug status")
+endif()
+
+# Get the suffix - SO is deprecated, should use EXT_SUFFIX, but this is
+# required for PyPy3 (as of 7.3.1)
+if(NOT DEFINED PYTHON_MODULE_EXTENSION)
+ execute_process(
+ COMMAND
+ "${${_Python}_EXECUTABLE}" "-c"
+ "from distutils import sysconfig as s;print(s.get_config_var('EXT_SUFFIX') or s.get_config_var('SO'))"
+ OUTPUT_VARIABLE _PYTHON_MODULE_EXTENSION
+ ERROR_VARIABLE _PYTHON_MODULE_EXTENSION_ERR
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+ if(_PYTHON_MODULE_EXTENSION STREQUAL "")
+ message(
+ FATAL_ERROR "pybind11 could not query the module file extension, likely the 'distutils'"
+ "package is not installed. Full error message:\n${_PYTHON_MODULE_EXTENSION_ERR}")
+ endif()
+
+ # This needs to be available for the pybind11_extension function
+ set(PYTHON_MODULE_EXTENSION
+ "${_PYTHON_MODULE_EXTENSION}"
+ CACHE INTERNAL "")
+endif()
+
+# Python debug libraries expose slightly different objects before 3.8
+# https://docs.python.org/3.6/c-api/intro.html#debugging-builds
+# https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
+if(PYTHON_IS_DEBUG)
+ set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
+endif()
+
+# Check on every access - since Python2 and Python3 could have been used - do nothing in that case.
+
+if(DEFINED ${_Python}_INCLUDE_DIRS)
+ # Only add Python for build - must be added during the import for config
+ # since it has to be re-discovered.
+ #
+ # This needs to be a target to be included after the local pybind11
+ # directory, just in case there there is an installed pybind11 sitting
+ # next to Python's includes. It also ensures Python is a SYSTEM library.
+ add_library(pybind11::python_headers INTERFACE IMPORTED)
+ set_property(
+ TARGET pybind11::python_headers PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+ "$<BUILD_INTERFACE:${${_Python}_INCLUDE_DIRS}>")
+ set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_headers)
+ set(pybind11_INCLUDE_DIRS
+ "${pybind11_INCLUDE_DIR}" "${${_Python}_INCLUDE_DIRS}"
+ CACHE INTERNAL "Directories where pybind11 and possibly Python headers are located")
+endif()
+
+if(DEFINED ${_Python}_VERSION AND ${_Python}_VERSION VERSION_LESS 3)
+ set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python2_no_register)
+endif()
+
+# In CMake 3.18+, you can find these separately, so include an if
+if(TARGET ${_Python}::Python)
+ set_property(
+ TARGET pybind11::embed
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::Python)
+endif()
+
+# CMake 3.15+ has this
+if(TARGET ${_Python}::Module)
+ set_property(
+ TARGET pybind11::module
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::Module)
+else()
+ set_property(
+ TARGET pybind11::module
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_link_helper)
+endif()
+
+# WITHOUT_SOABI and WITH_SOABI will disable the custom extension handling used by pybind11.
+# WITH_SOABI is passed on to python_add_library.
+function(pybind11_add_module target_name)
+ cmake_parse_arguments(PARSE_ARGV 1 ARG
+ "STATIC;SHARED;MODULE;THIN_LTO;OPT_SIZE;NO_EXTRAS;WITHOUT_SOABI" "" "")
+
+ if(ARG_STATIC)
+ set(lib_type STATIC)
+ elseif(ARG_SHARED)
+ set(lib_type SHARED)
+ else()
+ set(lib_type MODULE)
+ endif()
+
+ if("${_Python}" STREQUAL "Python")
+ python_add_library(${target_name} ${lib_type} ${ARG_UNPARSED_ARGUMENTS})
+ elseif("${_Python}" STREQUAL "Python3")
+ python3_add_library(${target_name} ${lib_type} ${ARG_UNPARSED_ARGUMENTS})
+ elseif("${_Python}" STREQUAL "Python2")
+ python2_add_library(${target_name} ${lib_type} ${ARG_UNPARSED_ARGUMENTS})
+ else()
+ message(FATAL_ERROR "Cannot detect FindPython version: ${_Python}")
+ endif()
+
+ target_link_libraries(${target_name} PRIVATE pybind11::headers)
+
+ if(lib_type STREQUAL "MODULE")
+ target_link_libraries(${target_name} PRIVATE pybind11::module)
+ else()
+ target_link_libraries(${target_name} PRIVATE pybind11::embed)
+ endif()
+
+ if(MSVC)
+ target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
+ endif()
+
+ if(DEFINED ${_Python}_VERSION AND ${_Python}_VERSION VERSION_LESS 3)
+ target_link_libraries(${target_name} PRIVATE pybind11::python2_no_register)
+ endif()
+
+ # -fvisibility=hidden is required to allow multiple modules compiled against
+ # different pybind versions to work properly, and for some features (e.g.
+ # py::module_local). We force it on everything inside the `pybind11`
+ # namespace; also turning it on for a pybind module compilation here avoids
+ # potential warnings or issues from having mixed hidden/non-hidden types.
+ if(NOT DEFINED CMAKE_CXX_VISIBILITY_PRESET)
+ set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden")
+ endif()
+
+ if(NOT DEFINED CMAKE_CUDA_VISIBILITY_PRESET)
+ set_target_properties(${target_name} PROPERTIES CUDA_VISIBILITY_PRESET "hidden")
+ endif()
+
+ # If we don't pass a WITH_SOABI or WITHOUT_SOABI, use our own default handling of extensions
+ if(NOT ARG_WITHOUT_SOABI OR NOT "WITH_SOABI" IN_LIST ARG_UNPARSED_ARGUMENTS)
+ pybind11_extension(${target_name})
+ endif()
+
+ if(ARG_NO_EXTRAS)
+ return()
+ endif()
+
+ if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
+ if(ARG_THIN_LTO)
+ target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
+ else()
+ target_link_libraries(${target_name} PRIVATE pybind11::lto)
+ endif()
+ endif()
+
+ if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
+ # Strip unnecessary sections of the binary on Linux/macOS
+ pybind11_strip(${target_name})
+ endif()
+
+ if(MSVC)
+ target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
+ endif()
+
+ if(ARG_OPT_SIZE)
+ target_link_libraries(${target_name} PRIVATE pybind11::opt_size)
+ endif()
+endfunction()
+
+function(pybind11_extension name)
+ # The extension is precomputed
+ set_target_properties(${name} PROPERTIES PREFIX "" SUFFIX "${PYTHON_MODULE_EXTENSION}")
+
+endfunction()
# tools/pybind11Tools.cmake -- Build system for the pybind11 modules
#
-# Copyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>
+# Copyright (c) 2020 Wenzel Jakob <wenzel.jakob@epfl.ch>
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
-cmake_minimum_required(VERSION 2.8.12)
+# Built-in in CMake 3.5+
+include(CMakeParseArguments)
-# Add a CMake parameter for choosing a desired Python version
-if(NOT PYBIND11_PYTHON_VERSION)
- set(PYBIND11_PYTHON_VERSION "" CACHE STRING "Python version to use for compiling modules")
+if(pybind11_FIND_QUIETLY)
+ set(_pybind11_quiet QUIET)
+else()
+ set(_pybind11_quiet "")
endif()
-set(Python_ADDITIONAL_VERSIONS 3.7 3.6 3.5 3.4)
-find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} REQUIRED)
-
-include(CheckCXXCompilerFlag)
-include(CMakeParseArguments)
-
-if(NOT PYBIND11_CPP_STANDARD AND NOT CMAKE_CXX_STANDARD)
- if(NOT MSVC)
- check_cxx_compiler_flag("-std=c++14" HAS_CPP14_FLAG)
+# If this is the first run, PYTHON_VERSION can stand in for PYBIND11_PYTHON_VERSION
+if(NOT DEFINED PYBIND11_PYTHON_VERSION AND DEFINED PYTHON_VERSION)
+ message(WARNING "Set PYBIND11_PYTHON_VERSION to search for a specific version, not "
+ "PYTHON_VERSION (which is an output). Assuming that is what you "
+ "meant to do and continuing anyway.")
+ set(PYBIND11_PYTHON_VERSION
+ "${PYTHON_VERSION}"
+ CACHE STRING "Python version to use for compiling modules")
+ unset(PYTHON_VERSION)
+ unset(PYTHON_VERSION CACHE)
+elseif(DEFINED PYBIND11_PYTHON_VERSION)
+ # If this is set as a normal variable, promote it
+ set(PYBIND11_PYTHON_VERSION
+ "${PYBIND11_PYTHON_VERSION}"
+ CACHE STRING "Python version to use for compiling modules")
+else()
+ # Make an empty cache variable.
+ set(PYBIND11_PYTHON_VERSION
+ ""
+ CACHE STRING "Python version to use for compiling modules")
+endif()
- if (HAS_CPP14_FLAG)
- set(PYBIND11_CPP_STANDARD -std=c++14)
- else()
- check_cxx_compiler_flag("-std=c++11" HAS_CPP11_FLAG)
- if (HAS_CPP11_FLAG)
- set(PYBIND11_CPP_STANDARD -std=c++11)
- else()
- message(FATAL_ERROR "Unsupported compiler -- pybind11 requires C++11 support!")
- endif()
+# A user can set versions manually too
+set(Python_ADDITIONAL_VERSIONS
+ "3.10;3.9;3.8;3.7;3.6;3.5;3.4"
+ CACHE INTERNAL "")
+
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}")
+find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} MODULE REQUIRED ${_pybind11_quiet})
+list(REMOVE_AT CMAKE_MODULE_PATH -1)
+
+# Cache variables so pybind11_add_module can be used in parent projects
+set(PYTHON_INCLUDE_DIRS
+ ${PYTHON_INCLUDE_DIRS}
+ CACHE INTERNAL "")
+set(PYTHON_LIBRARIES
+ ${PYTHON_LIBRARIES}
+ CACHE INTERNAL "")
+set(PYTHON_MODULE_PREFIX
+ ${PYTHON_MODULE_PREFIX}
+ CACHE INTERNAL "")
+set(PYTHON_MODULE_EXTENSION
+ ${PYTHON_MODULE_EXTENSION}
+ CACHE INTERNAL "")
+set(PYTHON_VERSION_MAJOR
+ ${PYTHON_VERSION_MAJOR}
+ CACHE INTERNAL "")
+set(PYTHON_VERSION_MINOR
+ ${PYTHON_VERSION_MINOR}
+ CACHE INTERNAL "")
+set(PYTHON_VERSION
+ ${PYTHON_VERSION}
+ CACHE INTERNAL "")
+set(PYTHON_IS_DEBUG
+ "${PYTHON_IS_DEBUG}"
+ CACHE INTERNAL "")
+
+if(PYBIND11_MASTER_PROJECT)
+ if(PYTHON_MODULE_EXTENSION MATCHES "pypy")
+ if(NOT DEFINED PYPY_VERSION)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c
+ [=[import sys; sys.stdout.write(".".join(map(str, sys.pypy_version_info[:3])))]=]
+ OUTPUT_VARIABLE pypy_version)
+ set(PYPY_VERSION
+ ${pypy_version}
+ CACHE INTERNAL "")
endif()
- elseif(MSVC)
- set(PYBIND11_CPP_STANDARD /std:c++14)
+ message(STATUS "PYPY ${PYPY_VERSION} (Py ${PYTHON_VERSION})")
+ else()
+ message(STATUS "PYTHON ${PYTHON_VERSION}")
endif()
-
- set(PYBIND11_CPP_STANDARD ${PYBIND11_CPP_STANDARD} CACHE STRING
- "C++ standard flag, e.g. -std=c++11, -std=c++14, /std:c++14. Defaults to C++14 mode." FORCE)
endif()
-# Checks whether the given CXX/linker flags can compile and link a cxx file. cxxflags and
-# linkerflags are lists of flags to use. The result variable is a unique variable name for each set
-# of flags: the compilation result will be cached base on the result variable. If the flags work,
-# sets them in cxxflags_out/linkerflags_out internal cache variables (in addition to ${result}).
-function(_pybind11_return_if_cxx_and_linker_flags_work result cxxflags linkerflags cxxflags_out linkerflags_out)
- set(CMAKE_REQUIRED_LIBRARIES ${linkerflags})
- check_cxx_compiler_flag("${cxxflags}" ${result})
- if (${result})
- set(${cxxflags_out} "${cxxflags}" CACHE INTERNAL "" FORCE)
- set(${linkerflags_out} "${linkerflags}" CACHE INTERNAL "" FORCE)
- endif()
-endfunction()
+# Only add Python for build - must be added during the import for config since
+# it has to be re-discovered.
+#
+# This needs to be an target to it is included after the local pybind11
+# directory, just in case there are multiple versions of pybind11, we want the
+# one we expect.
+add_library(pybind11::python_headers INTERFACE IMPORTED)
+set_property(TARGET pybind11::python_headers PROPERTY INTERFACE_INCLUDE_DIRECTORIES
+ "$<BUILD_INTERFACE:${PYTHON_INCLUDE_DIRS}>")
+set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_headers)
+
+set(pybind11_INCLUDE_DIRS
+ "${pybind11_INCLUDE_DIR}" "${PYTHON_INCLUDE_DIRS}"
+ CACHE INTERNAL "Directories where pybind11 and possibly Python headers are located")
+
+# Python debug libraries expose slightly different objects before 3.8
+# https://docs.python.org/3.6/c-api/intro.html#debugging-builds
+# https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
+if(PYTHON_IS_DEBUG)
+ set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
+endif()
-# Internal: find the appropriate link time optimization flags for this compiler
-function(_pybind11_add_lto_flags target_name prefer_thin_lto)
- if (NOT DEFINED PYBIND11_LTO_CXX_FLAGS)
- set(PYBIND11_LTO_CXX_FLAGS "" CACHE INTERNAL "")
- set(PYBIND11_LTO_LINKER_FLAGS "" CACHE INTERNAL "")
-
- if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
- set(cxx_append "")
- set(linker_append "")
- if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT APPLE)
- # Clang Gold plugin does not support -Os; append -O3 to MinSizeRel builds to override it
- set(linker_append ";$<$<CONFIG:MinSizeRel>:-O3>")
- elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
- set(cxx_append ";-fno-fat-lto-objects")
- endif()
-
- if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND prefer_thin_lto)
- _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO_THIN
- "-flto=thin${cxx_append}" "-flto=thin${linker_append}"
- PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
- endif()
-
- if (NOT HAS_FLTO_THIN)
- _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO
- "-flto${cxx_append}" "-flto${linker_append}"
- PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
- endif()
- elseif (CMAKE_CXX_COMPILER_ID MATCHES "Intel")
- # Intel equivalent to LTO is called IPO
- _pybind11_return_if_cxx_and_linker_flags_work(HAS_INTEL_IPO
- "-ipo" "-ipo" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
- elseif(MSVC)
- # cmake only interprets libraries as linker flags when they start with a - (otherwise it
- # converts /LTCG to \LTCG as if it was a Windows path). Luckily MSVC supports passing flags
- # with - instead of /, even if it is a bit non-standard:
- _pybind11_return_if_cxx_and_linker_flags_work(HAS_MSVC_GL_LTCG
- "/GL" "-LTCG" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
- endif()
+set_property(
+ TARGET pybind11::module
+ APPEND
+ PROPERTY
+ INTERFACE_LINK_LIBRARIES pybind11::python_link_helper
+ "$<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:Cygwin>>:$<BUILD_INTERFACE:${PYTHON_LIBRARIES}>>")
+
+if(PYTHON_VERSION VERSION_LESS 3)
+ set_property(
+ TARGET pybind11::pybind11
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python2_no_register)
+endif()
- if (PYBIND11_LTO_CXX_FLAGS)
- message(STATUS "LTO enabled")
- else()
- message(STATUS "LTO disabled (not supported by the compiler and/or linker)")
- endif()
- endif()
+set_property(
+ TARGET pybind11::embed
+ APPEND
+ PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11 $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)
- # Enable LTO flags if found, except for Debug builds
- if (PYBIND11_LTO_CXX_FLAGS)
- target_compile_options(${target_name} PRIVATE "$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_CXX_FLAGS}>")
- endif()
- if (PYBIND11_LTO_LINKER_FLAGS)
- target_link_libraries(${target_name} PRIVATE "$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_LINKER_FLAGS}>")
- endif()
+function(pybind11_extension name)
+ # The prefix and extension are provided by FindPythonLibsNew.cmake
+ set_target_properties(${name} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
+ SUFFIX "${PYTHON_MODULE_EXTENSION}")
endfunction()
# Build a Python extension module:
# pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]
-# [NO_EXTRAS] [SYSTEM] [THIN_LTO] source1 [source2 ...])
+# [NO_EXTRAS] [THIN_LTO] [OPT_SIZE] source1 [source2 ...])
#
function(pybind11_add_module target_name)
- set(options MODULE SHARED EXCLUDE_FROM_ALL NO_EXTRAS SYSTEM THIN_LTO)
+ set(options "MODULE;SHARED;EXCLUDE_FROM_ALL;NO_EXTRAS;SYSTEM;THIN_LTO;OPT_SIZE")
cmake_parse_arguments(ARG "${options}" "" "" ${ARGN})
if(ARG_MODULE AND ARG_SHARED)
if(ARG_EXCLUDE_FROM_ALL)
set(exclude_from_all EXCLUDE_FROM_ALL)
+ else()
+ set(exclude_from_all "")
endif()
add_library(${target_name} ${lib_type} ${exclude_from_all} ${ARG_UNPARSED_ARGUMENTS})
- if(ARG_SYSTEM)
- set(inc_isystem SYSTEM)
- endif()
+ target_link_libraries(${target_name} PRIVATE pybind11::module)
- target_include_directories(${target_name} ${inc_isystem}
- PRIVATE ${PYBIND11_INCLUDE_DIR} # from project CMakeLists.txt
- PRIVATE ${pybind11_INCLUDE_DIR} # from pybind11Config
- PRIVATE ${PYTHON_INCLUDE_DIRS})
-
- # Python debug libraries expose slightly different objects
- # https://docs.python.org/3.6/c-api/intro.html#debugging-builds
- # https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
- if(PYTHON_IS_DEBUG)
- target_compile_definitions(${target_name} PRIVATE Py_DEBUG)
+ if(ARG_SYSTEM)
+ message(
+ STATUS
+ "Warning: this does not have an effect - use NO_SYSTEM_FROM_IMPORTED if using imported targets"
+ )
endif()
- # The prefix and extension are provided by FindPythonLibsNew.cmake
- set_target_properties(${target_name} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}")
- set_target_properties(${target_name} PROPERTIES SUFFIX "${PYTHON_MODULE_EXTENSION}")
+ pybind11_extension(${target_name})
# -fvisibility=hidden is required to allow multiple modules compiled against
# different pybind versions to work properly, and for some features (e.g.
# py::module_local). We force it on everything inside the `pybind11`
# namespace; also turning it on for a pybind module compilation here avoids
# potential warnings or issues from having mixed hidden/non-hidden types.
- set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden")
- set_target_properties(${target_name} PROPERTIES CUDA_VISIBILITY_PRESET "hidden")
-
- if(WIN32 OR CYGWIN)
- # Link against the Python shared library on Windows
- target_link_libraries(${target_name} PRIVATE ${PYTHON_LIBRARIES})
- elseif(APPLE)
- # It's quite common to have multiple copies of the same Python version
- # installed on one's system. E.g.: one copy from the OS and another copy
- # that's statically linked into an application like Blender or Maya.
- # If we link our plugin library against the OS Python here and import it
- # into Blender or Maya later on, this will cause segfaults when multiple
- # conflicting Python instances are active at the same time (even when they
- # are of the same version).
-
- # Windows is not affected by this issue since it handles DLL imports
- # differently. The solution for Linux and Mac OS is simple: we just don't
- # link against the Python library. The resulting shared library will have
- # missing symbols, but that's perfectly fine -- they will be resolved at
- # import time.
-
- target_link_libraries(${target_name} PRIVATE "-undefined dynamic_lookup")
-
- if(ARG_SHARED)
- # Suppress CMake >= 3.0 warning for shared libraries
- set_target_properties(${target_name} PROPERTIES MACOSX_RPATH ON)
- endif()
+ if(NOT DEFINED CMAKE_CXX_VISIBILITY_PRESET)
+ set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden")
endif()
- # Make sure C++11/14 are enabled
- if(CMAKE_VERSION VERSION_LESS 3.3)
- target_compile_options(${target_name} PUBLIC ${PYBIND11_CPP_STANDARD})
- else()
- target_compile_options(${target_name} PUBLIC $<$<COMPILE_LANGUAGE:CXX>:${PYBIND11_CPP_STANDARD}>)
+ if(NOT DEFINED CMAKE_CUDA_VISIBILITY_PRESET)
+ set_target_properties(${target_name} PROPERTIES CUDA_VISIBILITY_PRESET "hidden")
endif()
if(ARG_NO_EXTRAS)
return()
endif()
- _pybind11_add_lto_flags(${target_name} ${ARG_THIN_LTO})
-
- if (NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
- # Strip unnecessary sections of the binary on Linux/Mac OS
- if(CMAKE_STRIP)
- if(APPLE)
- add_custom_command(TARGET ${target_name} POST_BUILD
- COMMAND ${CMAKE_STRIP} -x $<TARGET_FILE:${target_name}>)
- else()
- add_custom_command(TARGET ${target_name} POST_BUILD
- COMMAND ${CMAKE_STRIP} $<TARGET_FILE:${target_name}>)
- endif()
+ if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
+ if(ARG_THIN_LTO)
+ target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
+ else()
+ target_link_libraries(${target_name} PRIVATE pybind11::lto)
endif()
endif()
+ if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
+ pybind11_strip(${target_name})
+ endif()
+
if(MSVC)
- # /MP enables multithreaded builds (relevant when there are many files), /bigobj is
- # needed for bigger binding projects due to the limit to 64k addressable sections
- target_compile_options(${target_name} PRIVATE /bigobj)
- if(CMAKE_VERSION VERSION_LESS 3.11)
- target_compile_options(${target_name} PRIVATE $<$<NOT:$<CONFIG:Debug>>:/MP>)
- else()
- # Only set these options for C++ files. This is important so that, for
- # instance, projects that include other types of source files like CUDA
- # .cu files don't get these options propagated to nvcc since that would
- # cause the build to fail.
- target_compile_options(${target_name} PRIVATE $<$<NOT:$<CONFIG:Debug>>:$<$<COMPILE_LANGUAGE:CXX>:/MP>>)
- endif()
+ target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
+ endif()
+
+ if(ARG_OPT_SIZE)
+ target_link_libraries(${target_name} PRIVATE pybind11::opt_size)
endif()
endfunction()
+
+# Provide general way to call common Python commands in "common" file.
+set(_Python
+ PYTHON
+ CACHE INTERNAL "" FORCE)
--- /dev/null
+[build-system]
+requires = ["setuptools>=42", "wheel"]
+build-backend = "setuptools.build_meta"
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Setup script for pybind11-global (in the sdist or in tools/setup_global.py in the repository)
+# This package is targeted for easy use from CMake.
+
+import contextlib
+import glob
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+
+# Setuptools has to be before distutils
+from setuptools import setup
+
+from distutils.command.install_headers import install_headers
+
+class InstallHeadersNested(install_headers):
+ def run(self):
+ headers = self.distribution.headers or []
+ for header in headers:
+ # Remove pybind11/include/
+ short_header = header.split("/", 2)[-1]
+
+ dst = os.path.join(self.install_dir, os.path.dirname(short_header))
+ self.mkpath(dst)
+ (out, _) = self.copy_file(header, dst)
+ self.outfiles.append(out)
+
+
+main_headers = glob.glob("pybind11/include/pybind11/*.h")
+detail_headers = glob.glob("pybind11/include/pybind11/detail/*.h")
+cmake_files = glob.glob("pybind11/share/cmake/pybind11/*.cmake")
+headers = main_headers + detail_headers
+
+cmdclass = {"install_headers": InstallHeadersNested}
+$extra_cmd
+
+# This will _not_ affect installing from wheels,
+# only building wheels or installing from SDist.
+# Primarily intended on Windows, where this is sometimes
+# customized (for example, conda-forge uses Library/)
+base = os.environ.get("PYBIND11_GLOBAL_PREFIX", "")
+
+# Must have a separator
+if base and not base.endswith("/"):
+ base += "/"
+
+setup(
+ name="pybind11_global",
+ version="$version",
+ packages=[],
+ headers=headers,
+ data_files=[
+ (base + "share/cmake/pybind11", cmake_files),
+ (base + "include/pybind11", main_headers),
+ (base + "include/pybind11/detail", detail_headers),
+ ],
+ cmdclass=cmdclass,
+)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Setup script (in the sdist or in tools/setup_main.py in the repository)
+
+from setuptools import setup
+
+cmdclass = {}
+$extra_cmd
+
+setup(
+ name="pybind11",
+ version="$version",
+ download_url='https://github.com/pybind/pybind11/tarball/v$version',
+ packages=[
+ "pybind11",
+ "pybind11.include.pybind11",
+ "pybind11.include.pybind11.detail",
+ "pybind11.share.cmake.pybind11",
+ ],
+ package_data={
+ "pybind11": ["py.typed", "*.pyi"],
+ "pybind11.include.pybind11": ["*.h"],
+ "pybind11.include.pybind11.detail": ["*.h"],
+ "pybind11.share.cmake.pybind11": ["*.cmake"],
+ },
+ extras_require={
+ "global": ["pybind11_global==$version"]
+ },
+ entry_points={
+ "console_scripts": [
+ "pybind11-config = pybind11.__main__:main",
+ ]
+ },
+ cmdclass=cmdclass
+)
{
void
-arm_fast_model_pybind(pybind11::module &m_internal)
+arm_fast_model_pybind(pybind11::module_ &m_internal)
{
auto arm_fast_model = m_internal.def_submodule("arm_fast_model");
arm_fast_model
}
void
-pybind_init_tracers(py::module &m_native)
+pybind_init_tracers(py::module_ &m_native)
{
using namespace pybind11::literals;
- py::module m = m_native.def_submodule("trace");
+ py::module_ m = m_native.def_submodule("trace");
py::class_<BaseGen, std::shared_ptr<BaseGen>> c_base(m, "BaseGen");
}
code('''namespace py = pybind11;
static void
-module_init(py::module &m_internal)
+module_init(py::module_ &m_internal)
{
- py::module m = m_internal.def_submodule("param_${cls}");
+ py::module_ m = m_internal.def_submodule("param_${cls}");
''')
code.indent()
if cls._base:
namespace py = pybind11;
static void
-module_init(py::module &m_internal)
+module_init(py::module_ &m_internal)
{
- py::module m = m_internal.def_submodule("enum_${name}");
+ py::module_ m = m_internal.def_submodule("enum_${name}");
''')
if cls.is_class:
PybindSimObjectResolver::resolveSimObject(const std::string &name)
{
// TODO
- py::module m = py::module::import("m5.SimObject");
+ py::module_ m = py::module_::import("m5.SimObject");
auto f = m.attr("resolveSimObject");
return f(name).cast<SimObject *>();
const bool flag_TRACING_ON = TRACING_ON;
static void
-init_drain(py::module &m_native)
+init_drain(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("drain");
+ py::module_ m = m_native.def_submodule("drain");
py::enum_<DrainState>(m, "DrainState")
.value("Running", DrainState::Running)
}
static void
-init_serialize(py::module &m_native)
+init_serialize(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("serialize");
+ py::module_ m = m_native.def_submodule("serialize");
py::class_<Serializable, std::unique_ptr<Serializable, py::nodelete>>(
m, "Serializable")
}
static void
-init_range(py::module &m_native)
+init_range(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("range");
+ py::module_ m = m_native.def_submodule("range");
py::class_<AddrRange>(m, "AddrRange")
.def(py::init<>())
}
static void
-init_net(py::module &m_native)
+init_net(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("net");
+ py::module_ m = m_native.def_submodule("net");
py::class_<Net::EthAddr>(m, "EthAddr")
.def(py::init<>())
}
static void
-init_loader(py::module &m_native)
+init_loader(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("loader");
+ py::module_ m = m_native.def_submodule("loader");
m.def("setInterpDir", &Loader::setInterpDir);
}
void
-pybind_init_core(py::module &m_native)
+pybind_init_core(py::module_ &m_native)
{
- py::module m_core = m_native.def_submodule("core");
+ py::module_ m_core = m_native.def_submodule("core");
py::class_<Cycles>(m_core, "Cycles")
.def(py::init<>())
}
void
-pybind_init_debug(py::module &m_native)
+pybind_init_debug(py::module_ &m_native)
{
- py::module m_debug = m_native.def_submodule("debug");
+ py::module_ m_debug = m_native.def_submodule("debug");
m_debug
.def("getAllFlagsVersion", []() { return Debug::allFlagsVersion; })
;
- py::module m_trace = m_native.def_submodule("trace");
+ py::module_ m_trace = m_native.def_submodule("trace");
m_trace
.def("output", &output)
.def("ignore", &ignore)
};
void
-pybind_init_event(py::module &m_native)
+pybind_init_event(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("event");
+ py::module_ m = m_native.def_submodule("event");
m.def("simulate", &simulate,
py::arg("ticks") = MaxTick);
{
void
-objectfile_pybind(py::module &m_internal)
+objectfile_pybind(py::module_ &m_internal)
{
- py::module m = m_internal.def_submodule("object_file");
+ py::module_ m = m_internal.def_submodule("object_file");
py::class_<Loader::ObjectFile>(m, "ObjectFile")
.def("get_arch", [](const Loader::ObjectFile &obj) {
#include "pybind11/pybind11.h"
-void pybind_init_core(pybind11::module &m_native);
-void pybind_init_debug(pybind11::module &m_native);
+void pybind_init_core(pybind11::module_ &m_native);
+void pybind_init_debug(pybind11::module_ &m_native);
-void pybind_init_event(pybind11::module &m_native);
-void pybind_init_stats(pybind11::module &m_native);
+void pybind_init_event(pybind11::module_ &m_native);
+void pybind_init_stats(pybind11::module_ &m_native);
#endif
void
pythonDump()
{
- py::module m = py::module::import("m5.stats");
+ py::module_ m = py::module_::import("m5.stats");
m.attr("dump")();
}
void
pythonReset()
{
- py::module m = py::module::import("m5.stats");
+ py::module_ m = py::module_::import("m5.stats");
m.attr("reset")();
}
}
void
-pybind_init_stats(py::module &m_native)
+pybind_init_stats(py::module_ &m_native)
{
- py::module m = m_native.def_submodule("stats");
+ py::module_ m = m_native.def_submodule("stats");
m
.def("initSimStats", &Stats::initSimStats)
}
EmbeddedPyBind::EmbeddedPyBind(const char *_name,
- void (*init_func)(py::module &),
+ void (*init_func)(py::module_ &),
const char *_base)
: initFunc(init_func), registered(false), name(_name), base(_base)
{
}
EmbeddedPyBind::EmbeddedPyBind(const char *_name,
- void (*init_func)(py::module &))
+ void (*init_func)(py::module_ &))
: initFunc(init_func), registered(false), name(_name), base("")
{
getMap()[_name] = this;
}
void
-EmbeddedPyBind::init(py::module &m)
+EmbeddedPyBind::init(py::module_ &m)
{
if (!registered) {
initFunc(m);
{
std::list<EmbeddedPyBind *> pending;
- py::module m_m5 = py::module("_m5");
+ py::module_ m_m5 = py::module_("_m5");
m_m5.attr("__package__") = py::cast("_m5");
pybind_init_core(m_m5);
{
public:
EmbeddedPyBind(const char *_name,
- void (*init_func)(pybind11::module &),
+ void (*init_func)(pybind11::module_ &),
const char *_base);
EmbeddedPyBind(const char *_name,
- void (*init_func)(pybind11::module &));
+ void (*init_func)(pybind11::module_ &));
#if PY_MAJOR_VERSION >= 3
static PyObject *initAll();
#endif
private:
- void (*initFunc)(pybind11::module &);
+ void (*initFunc)(pybind11::module_ &);
bool depsReady() const;
- void init(pybind11::module &m);
+ void init(pybind11::module_ &m);
bool registered;
const std::string name;
{
void
-sim_pybind(pybind11::module &m_internal)
+sim_pybind(pybind11::module_ &m_internal)
{
- pybind11::module m = m_internal.def_submodule("sim");
+ pybind11::module_ m = m_internal.def_submodule("sim");
pybind11::class_<
Port, std::unique_ptr<Port, pybind11::nodelete>>(m, "Port")
.def("bind", &Port::bind)
}
void
-systemc_pybind(pybind11::module &m_internal)
+systemc_pybind(pybind11::module_ &m_internal)
{
- pybind11::module m = m_internal.def_submodule("systemc");
+ pybind11::module_ m = m_internal.def_submodule("systemc");
m.def("python_ready", &python_ready);
for (auto ptr = firstInitFunc(); ptr; ptr = ptr->next)
ptr->run(m);
PythonInitFunc();
~PythonInitFunc() {}
- virtual void run(pybind11::module &systemc) = 0;
+ virtual void run(pybind11::module_ &systemc) = 0;
};
} // namespace sc_gem5
struct InstallScMain : public ::sc_gem5::PythonInitFunc
{
void
- run(pybind11::module &systemc) override
+ run(pybind11::module_ &systemc) override
{
systemc.def("sc_main", &sc_main);
systemc.def("sc_main_result_code", &sc_main_result_code);
struct InstallScTime : public ::sc_gem5::PythonInitFunc
{
void
- run(pybind11::module &systemc) override
+ run(pybind11::module_ &systemc) override
{
pybind11::class_<sc_core::sc_time> sc_time(systemc, "sc_time");
sc_time
struct InstallTlmGlobalQuantum : public ::sc_gem5::PythonInitFunc
{
void
- run(pybind11::module &systemc) override
+ run(pybind11::module_ &systemc) override
{
pybind11::class_<tlm::tlm_global_quantum>(
systemc, "tlm_global_quantum")
}
static void
-stattest_init_pybind(py::module &m_internal)
+stattest_init_pybind(py::module_ &m_internal)
{
- py::module m = m_internal.def_submodule("stattest");
+ py::module_ m = m_internal.def_submodule("stattest");
m
.def("stattest_init", []() { __stattest().init(); })