mirror of
https://github.com/aykhans/AzSuicideDataVisualization.git
synced 2025-07-02 06:22:25 +00:00
first commit
This commit is contained in:
119
.venv/Lib/site-packages/numpy/distutils/__config__.py
Normal file
119
.venv/Lib/site-packages/numpy/distutils/__config__.py
Normal file
@ -0,0 +1,119 @@
|
||||
# This file is generated by numpy's setup.py
|
||||
# It contains system_info results at the time of building this package.
|
||||
__all__ = ["get_info","show"]
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
|
||||
|
||||
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
|
||||
if sys.version_info >= (3, 8):
|
||||
os.add_dll_directory(extra_dll_dir)
|
||||
else:
|
||||
os.environ.setdefault('PATH', '')
|
||||
os.environ['PATH'] += os.pathsep + extra_dll_dir
|
||||
|
||||
openblas64__info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas64__info'], 'libraries': ['openblas64__info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)]}
|
||||
blas_ilp64_opt_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas64__info'], 'libraries': ['openblas64__info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)]}
|
||||
openblas64__lapack_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas64__lapack_info'], 'libraries': ['openblas64__lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)]}
|
||||
lapack_ilp64_opt_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas64__lapack_info'], 'libraries': ['openblas64__lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)]}
|
||||
|
||||
def get_info(name):
|
||||
g = globals()
|
||||
return g.get(name, g.get(name + "_info", {}))
|
||||
|
||||
def show():
|
||||
"""
|
||||
Show libraries in the system on which NumPy was built.
|
||||
|
||||
Print information about various resources (libraries, library
|
||||
directories, include directories, etc.) in the system on which
|
||||
NumPy was built.
|
||||
|
||||
See Also
|
||||
--------
|
||||
get_include : Returns the directory containing NumPy C
|
||||
header files.
|
||||
|
||||
Notes
|
||||
-----
|
||||
1. Classes specifying the information to be printed are defined
|
||||
in the `numpy.distutils.system_info` module.
|
||||
|
||||
Information may include:
|
||||
|
||||
* ``language``: language used to write the libraries (mostly
|
||||
C or f77)
|
||||
* ``libraries``: names of libraries found in the system
|
||||
* ``library_dirs``: directories containing the libraries
|
||||
* ``include_dirs``: directories containing library header files
|
||||
* ``src_dirs``: directories containing library source files
|
||||
* ``define_macros``: preprocessor macros used by
|
||||
``distutils.setup``
|
||||
* ``baseline``: minimum CPU features required
|
||||
* ``found``: dispatched features supported in the system
|
||||
* ``not found``: dispatched features that are not supported
|
||||
in the system
|
||||
|
||||
2. NumPy BLAS/LAPACK Installation Notes
|
||||
|
||||
Installing a numpy wheel (``pip install numpy`` or force it
|
||||
via ``pip install numpy --only-binary :numpy: numpy``) includes
|
||||
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
|
||||
APIs. In this case, ``library_dirs`` reports the original build
|
||||
time configuration as compiled with gcc/gfortran; at run time
|
||||
the OpenBLAS library is in
|
||||
``site-packages/numpy.libs/`` (linux), or
|
||||
``site-packages/numpy/.dylibs/`` (macOS), or
|
||||
``site-packages/numpy/.libs/`` (windows).
|
||||
|
||||
Installing numpy from source
|
||||
(``pip install numpy --no-binary numpy``) searches for BLAS and
|
||||
LAPACK dynamic link libraries at build time as influenced by
|
||||
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
|
||||
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
|
||||
or the optional file ``~/.numpy-site.cfg``.
|
||||
NumPy remembers those locations and expects to load the same
|
||||
libraries at run-time.
|
||||
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
|
||||
library) is in the default build-time search order after
|
||||
'openblas'.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.show_config()
|
||||
blas_opt_info:
|
||||
language = c
|
||||
define_macros = [('HAVE_CBLAS', None)]
|
||||
libraries = ['openblas', 'openblas']
|
||||
library_dirs = ['/usr/local/lib']
|
||||
"""
|
||||
from numpy.core._multiarray_umath import (
|
||||
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
|
||||
)
|
||||
for name,info_dict in globals().items():
|
||||
if name[0] == "_" or type(info_dict) is not type({}): continue
|
||||
print(name + ":")
|
||||
if not info_dict:
|
||||
print(" NOT AVAILABLE")
|
||||
for k,v in info_dict.items():
|
||||
v = str(v)
|
||||
if k == "sources" and len(v) > 200:
|
||||
v = v[:60] + " ...\n... " + v[-60:]
|
||||
print(" %s = %s" % (k,v))
|
||||
|
||||
features_found, features_not_found = [], []
|
||||
for feature in __cpu_dispatch__:
|
||||
if __cpu_features__[feature]:
|
||||
features_found.append(feature)
|
||||
else:
|
||||
features_not_found.append(feature)
|
||||
|
||||
print("Supported SIMD extensions in this NumPy install:")
|
||||
print(" baseline = %s" % (','.join(__cpu_baseline__)))
|
||||
print(" found = %s" % (','.join(features_found)))
|
||||
print(" not found = %s" % (','.join(features_not_found)))
|
||||
|
51
.venv/Lib/site-packages/numpy/distutils/__init__.py
Normal file
51
.venv/Lib/site-packages/numpy/distutils/__init__.py
Normal file
@ -0,0 +1,51 @@
|
||||
"""
|
||||
An enhanced distutils, providing support for Fortran compilers, for BLAS,
|
||||
LAPACK and other common libraries for numerical computing, and more.
|
||||
|
||||
Public submodules are::
|
||||
|
||||
misc_util
|
||||
system_info
|
||||
cpu_info
|
||||
log
|
||||
exec_command
|
||||
|
||||
For details, please see the *Packaging* and *NumPy Distutils User Guide*
|
||||
sections of the NumPy Reference Guide.
|
||||
|
||||
For configuring the preference for and location of libraries like BLAS and
|
||||
LAPACK, and for setting include paths and similar build options, please see
|
||||
``site.cfg.example`` in the root of the NumPy repository or sdist.
|
||||
|
||||
"""
|
||||
|
||||
# Must import local ccompiler ASAP in order to get
|
||||
# customized CCompiler.spawn effective.
|
||||
from . import ccompiler
|
||||
from . import unixccompiler
|
||||
|
||||
from .npy_pkg_config import *
|
||||
|
||||
# If numpy is installed, add distutils.test()
|
||||
try:
|
||||
from . import __config__
|
||||
# Normally numpy is installed if the above import works, but an interrupted
|
||||
# in-place build could also have left a __config__.py. In that case the
|
||||
# next import may still fail, so keep it inside the try block.
|
||||
from numpy._pytesttester import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def customized_fcompiler(plat=None, compiler=None):
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
c = new_fcompiler(plat=plat, compiler=compiler)
|
||||
c.customize()
|
||||
return c
|
||||
|
||||
def customized_ccompiler(plat=None, compiler=None, verbose=1):
|
||||
c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
|
||||
c.customize('')
|
||||
return c
|
4
.venv/Lib/site-packages/numpy/distutils/__init__.pyi
Normal file
4
.venv/Lib/site-packages/numpy/distutils/__init__.pyi
Normal file
@ -0,0 +1,4 @@
|
||||
from typing import Any
|
||||
|
||||
# TODO: remove when the full numpy namespace is defined
|
||||
def __getattr__(name: str) -> Any: ...
|
91
.venv/Lib/site-packages/numpy/distutils/_shell_utils.py
Normal file
91
.venv/Lib/site-packages/numpy/distutils/_shell_utils.py
Normal file
@ -0,0 +1,91 @@
|
||||
"""
|
||||
Helper functions for interacting with the shell, and consuming shell-style
|
||||
parameters provided in config files.
|
||||
"""
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
|
||||
|
||||
|
||||
class CommandLineParser:
|
||||
"""
|
||||
An object that knows how to split and join command-line arguments.
|
||||
|
||||
It must be true that ``argv == split(join(argv))`` for all ``argv``.
|
||||
The reverse neednt be true - `join(split(cmd))` may result in the addition
|
||||
or removal of unnecessary escaping.
|
||||
"""
|
||||
@staticmethod
|
||||
def join(argv):
|
||||
""" Join a list of arguments into a command line string """
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def split(cmd):
|
||||
""" Split a command line string into a list of arguments """
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class WindowsParser:
|
||||
"""
|
||||
The parsing behavior used by `subprocess.call("string")` on Windows, which
|
||||
matches the Microsoft C/C++ runtime.
|
||||
|
||||
Note that this is _not_ the behavior of cmd.
|
||||
"""
|
||||
@staticmethod
|
||||
def join(argv):
|
||||
# note that list2cmdline is specific to the windows syntax
|
||||
return subprocess.list2cmdline(argv)
|
||||
|
||||
@staticmethod
|
||||
def split(cmd):
|
||||
import ctypes # guarded import for systems without ctypes
|
||||
try:
|
||||
ctypes.windll
|
||||
except AttributeError:
|
||||
raise NotImplementedError
|
||||
|
||||
# Windows has special parsing rules for the executable (no quotes),
|
||||
# that we do not care about - insert a dummy element
|
||||
if not cmd:
|
||||
return []
|
||||
cmd = 'dummy ' + cmd
|
||||
|
||||
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
|
||||
CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
|
||||
CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
|
||||
|
||||
nargs = ctypes.c_int()
|
||||
lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
|
||||
args = [lpargs[i] for i in range(nargs.value)]
|
||||
assert not ctypes.windll.kernel32.LocalFree(lpargs)
|
||||
|
||||
# strip the element we inserted
|
||||
assert args[0] == "dummy"
|
||||
return args[1:]
|
||||
|
||||
|
||||
class PosixParser:
|
||||
"""
|
||||
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
|
||||
"""
|
||||
@staticmethod
|
||||
def join(argv):
|
||||
return ' '.join(quote(arg) for arg in argv)
|
||||
|
||||
@staticmethod
|
||||
def split(cmd):
|
||||
return shlex.split(cmd, posix=True)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
NativeParser = WindowsParser
|
||||
elif os.name == 'posix':
|
||||
NativeParser = PosixParser
|
28
.venv/Lib/site-packages/numpy/distutils/armccompiler.py
Normal file
28
.venv/Lib/site-packages/numpy/distutils/armccompiler.py
Normal file
@ -0,0 +1,28 @@
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from distutils.unixccompiler import UnixCCompiler
|
||||
|
||||
class ArmCCompiler(UnixCCompiler):
|
||||
|
||||
"""
|
||||
Arm compiler.
|
||||
"""
|
||||
|
||||
compiler_type = 'arm'
|
||||
cc_exe = 'armclang'
|
||||
cxx_exe = 'armclang++'
|
||||
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
UnixCCompiler.__init__(self, verbose, dry_run, force)
|
||||
cc_compiler = self.cc_exe
|
||||
cxx_compiler = self.cxx_exe
|
||||
self.set_executables(compiler=cc_compiler +
|
||||
' -O3 -fPIC',
|
||||
compiler_so=cc_compiler +
|
||||
' -O3 -fPIC',
|
||||
compiler_cxx=cxx_compiler +
|
||||
' -O3 -fPIC',
|
||||
linker_exe=cc_compiler +
|
||||
' -lamath',
|
||||
linker_so=cc_compiler +
|
||||
' -lamath -shared')
|
807
.venv/Lib/site-packages/numpy/distutils/ccompiler.py
Normal file
807
.venv/Lib/site-packages/numpy/distutils/ccompiler.py
Normal file
@ -0,0 +1,807 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shlex
|
||||
import time
|
||||
import subprocess
|
||||
from copy import copy
|
||||
from distutils import ccompiler
|
||||
from distutils.ccompiler import (
|
||||
compiler_class, gen_lib_options, get_default_compiler, new_compiler,
|
||||
CCompiler
|
||||
)
|
||||
from distutils.errors import (
|
||||
DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
|
||||
CompileError, UnknownFileError
|
||||
)
|
||||
from distutils.sysconfig import customize_compiler
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.exec_command import (
|
||||
filepath_from_subprocess_output, forward_bytes_to_stdout
|
||||
)
|
||||
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
|
||||
get_num_build_jobs, \
|
||||
_commandline_dep_string, \
|
||||
sanitize_cxx_flags
|
||||
|
||||
# globals for parallel build management
|
||||
import threading
|
||||
|
||||
_job_semaphore = None
|
||||
_global_lock = threading.Lock()
|
||||
_processing_files = set()
|
||||
|
||||
|
||||
def _needs_build(obj, cc_args, extra_postargs, pp_opts):
|
||||
"""
|
||||
Check if an objects needs to be rebuild based on its dependencies
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : str
|
||||
object file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
# defined in unixcompiler.py
|
||||
dep_file = obj + '.d'
|
||||
if not os.path.exists(dep_file):
|
||||
return True
|
||||
|
||||
# dep_file is a makefile containing 'object: dependencies'
|
||||
# formatted like posix shell (spaces escaped, \ line continuations)
|
||||
# the last line contains the compiler commandline arguments as some
|
||||
# projects may compile an extension multiple times with different
|
||||
# arguments
|
||||
with open(dep_file, "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
|
||||
last_cmdline = lines[-1]
|
||||
if last_cmdline != cmdline:
|
||||
return True
|
||||
|
||||
contents = ''.join(lines[:-1])
|
||||
deps = [x for x in shlex.split(contents, posix=True)
|
||||
if x != "\n" and not x.endswith(":")]
|
||||
|
||||
try:
|
||||
t_obj = os.stat(obj).st_mtime
|
||||
|
||||
# check if any of the dependencies is newer than the object
|
||||
# the dependencies includes the source used to create the object
|
||||
for f in deps:
|
||||
if os.stat(f).st_mtime > t_obj:
|
||||
return True
|
||||
except OSError:
|
||||
# no object counts as newer (shouldn't happen if dep_file exists)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def replace_method(klass, method_name, func):
|
||||
# Py3k does not have unbound method anymore, MethodType does not work
|
||||
m = lambda self, *args, **kw: func(self, *args, **kw)
|
||||
setattr(klass, method_name, m)
|
||||
|
||||
|
||||
######################################################################
|
||||
## Method that subclasses may redefine. But don't call this method,
|
||||
## it i private to CCompiler class and may return unexpected
|
||||
## results if used elsewhere. So, you have been warned..
|
||||
|
||||
def CCompiler_find_executables(self):
|
||||
"""
|
||||
Does nothing here, but is called by the get_version method and can be
|
||||
overridden by subclasses. In particular it is redefined in the `FCompiler`
|
||||
class where more documentation can be found.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
|
||||
|
||||
|
||||
# Using customized CCompiler.spawn.
|
||||
def CCompiler_spawn(self, cmd, display=None, env=None):
|
||||
"""
|
||||
Execute a command in a sub-process.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cmd : str
|
||||
The command to execute.
|
||||
display : str or sequence of str, optional
|
||||
The text to add to the log file kept by `numpy.distutils`.
|
||||
If not given, `display` is equal to `cmd`.
|
||||
env: a dictionary for environment variables, optional
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Raises
|
||||
------
|
||||
DistutilsExecError
|
||||
If the command failed, i.e. the exit status was not 0.
|
||||
|
||||
"""
|
||||
env = env if env is not None else dict(os.environ)
|
||||
if display is None:
|
||||
display = cmd
|
||||
if is_sequence(display):
|
||||
display = ' '.join(list(display))
|
||||
log.info(display)
|
||||
try:
|
||||
if self.verbose:
|
||||
subprocess.check_output(cmd, env=env)
|
||||
else:
|
||||
subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
o = exc.output
|
||||
s = exc.returncode
|
||||
except OSError as e:
|
||||
# OSError doesn't have the same hooks for the exception
|
||||
# output, but exec_command() historically would use an
|
||||
# empty string for EnvironmentError (base class for
|
||||
# OSError)
|
||||
# o = b''
|
||||
# still that would make the end-user lost in translation!
|
||||
o = f"\n\n{e}\n\n\n"
|
||||
try:
|
||||
o = o.encode(sys.stdout.encoding)
|
||||
except AttributeError:
|
||||
o = o.encode('utf8')
|
||||
# status previously used by exec_command() for parent
|
||||
# of OSError
|
||||
s = 127
|
||||
else:
|
||||
# use a convenience return here so that any kind of
|
||||
# caught exception will execute the default code after the
|
||||
# try / except block, which handles various exceptions
|
||||
return None
|
||||
|
||||
if is_sequence(cmd):
|
||||
cmd = ' '.join(list(cmd))
|
||||
|
||||
if self.verbose:
|
||||
forward_bytes_to_stdout(o)
|
||||
|
||||
if re.search(b'Too many open files', o):
|
||||
msg = '\nTry rerunning setup command until build succeeds.'
|
||||
else:
|
||||
msg = ''
|
||||
raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
|
||||
(cmd, s, msg))
|
||||
|
||||
replace_method(CCompiler, 'spawn', CCompiler_spawn)
|
||||
|
||||
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
|
||||
"""
|
||||
Return the name of the object files for the given source files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
source_filenames : list of str
|
||||
The list of paths to source files. Paths can be either relative or
|
||||
absolute, this is handled transparently.
|
||||
strip_dir : bool, optional
|
||||
Whether to strip the directory from the returned paths. If True,
|
||||
the file name prepended by `output_dir` is returned. Default is False.
|
||||
output_dir : str, optional
|
||||
If given, this path is prepended to the returned paths to the
|
||||
object files.
|
||||
|
||||
Returns
|
||||
-------
|
||||
obj_names : list of str
|
||||
The list of paths to the object files corresponding to the source
|
||||
files in `source_filenames`.
|
||||
|
||||
"""
|
||||
if output_dir is None:
|
||||
output_dir = ''
|
||||
obj_names = []
|
||||
for src_name in source_filenames:
|
||||
base, ext = os.path.splitext(os.path.normpath(src_name))
|
||||
base = os.path.splitdrive(base)[1] # Chop off the drive
|
||||
base = base[os.path.isabs(base):] # If abs, chop off leading /
|
||||
if base.startswith('..'):
|
||||
# Resolve starting relative path components, middle ones
|
||||
# (if any) have been handled by os.path.normpath above.
|
||||
i = base.rfind('..')+2
|
||||
d = base[:i]
|
||||
d = os.path.basename(os.path.abspath(d))
|
||||
base = d + base[i:]
|
||||
if ext not in self.src_extensions:
|
||||
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
|
||||
if strip_dir:
|
||||
base = os.path.basename(base)
|
||||
obj_name = os.path.join(output_dir, base + self.obj_extension)
|
||||
obj_names.append(obj_name)
|
||||
return obj_names
|
||||
|
||||
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
|
||||
|
||||
def CCompiler_compile(self, sources, output_dir=None, macros=None,
|
||||
include_dirs=None, debug=0, extra_preargs=None,
|
||||
extra_postargs=None, depends=None):
|
||||
"""
|
||||
Compile one or more source files.
|
||||
|
||||
Please refer to the Python distutils API reference for more details.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sources : list of str
|
||||
A list of filenames
|
||||
output_dir : str, optional
|
||||
Path to the output directory.
|
||||
macros : list of tuples
|
||||
A list of macro definitions.
|
||||
include_dirs : list of str, optional
|
||||
The directories to add to the default include file search path for
|
||||
this compilation only.
|
||||
debug : bool, optional
|
||||
Whether or not to output debug symbols in or alongside the object
|
||||
file(s).
|
||||
extra_preargs, extra_postargs : ?
|
||||
Extra pre- and post-arguments.
|
||||
depends : list of str, optional
|
||||
A list of file names that all targets depend on.
|
||||
|
||||
Returns
|
||||
-------
|
||||
objects : list of str
|
||||
A list of object file names, one per source file `sources`.
|
||||
|
||||
Raises
|
||||
------
|
||||
CompileError
|
||||
If compilation fails.
|
||||
|
||||
"""
|
||||
global _job_semaphore
|
||||
|
||||
jobs = get_num_build_jobs()
|
||||
|
||||
# setup semaphore to not exceed number of compile jobs when parallelized at
|
||||
# extension level (python >= 3.5)
|
||||
with _global_lock:
|
||||
if _job_semaphore is None:
|
||||
_job_semaphore = threading.Semaphore(jobs)
|
||||
|
||||
if not sources:
|
||||
return []
|
||||
from numpy.distutils.fcompiler import (FCompiler, is_f_file,
|
||||
has_f90_header)
|
||||
if isinstance(self, FCompiler):
|
||||
display = []
|
||||
for fc in ['f77', 'f90', 'fix']:
|
||||
fcomp = getattr(self, 'compiler_'+fc)
|
||||
if fcomp is None:
|
||||
continue
|
||||
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
|
||||
display = '\n'.join(display)
|
||||
else:
|
||||
ccomp = self.compiler_so
|
||||
display = "C compiler: %s\n" % (' '.join(ccomp),)
|
||||
log.info(display)
|
||||
macros, objects, extra_postargs, pp_opts, build = \
|
||||
self._setup_compile(output_dir, macros, include_dirs, sources,
|
||||
depends, extra_postargs)
|
||||
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
|
||||
display = "compile options: '%s'" % (' '.join(cc_args))
|
||||
if extra_postargs:
|
||||
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
|
||||
log.info(display)
|
||||
|
||||
def single_compile(args):
|
||||
obj, (src, ext) = args
|
||||
if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
|
||||
return
|
||||
|
||||
# check if we are currently already processing the same object
|
||||
# happens when using the same source in multiple extensions
|
||||
while True:
|
||||
# need explicit lock as there is no atomic check and add with GIL
|
||||
with _global_lock:
|
||||
# file not being worked on, start working
|
||||
if obj not in _processing_files:
|
||||
_processing_files.add(obj)
|
||||
break
|
||||
# wait for the processing to end
|
||||
time.sleep(0.1)
|
||||
|
||||
try:
|
||||
# retrieve slot from our #job semaphore and build
|
||||
with _job_semaphore:
|
||||
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
|
||||
finally:
|
||||
# register being done processing
|
||||
with _global_lock:
|
||||
_processing_files.remove(obj)
|
||||
|
||||
|
||||
if isinstance(self, FCompiler):
|
||||
objects_to_build = list(build.keys())
|
||||
f77_objects, other_objects = [], []
|
||||
for obj in objects:
|
||||
if obj in objects_to_build:
|
||||
src, ext = build[obj]
|
||||
if self.compiler_type=='absoft':
|
||||
obj = cyg2win32(obj)
|
||||
src = cyg2win32(src)
|
||||
if is_f_file(src) and not has_f90_header(src):
|
||||
f77_objects.append((obj, (src, ext)))
|
||||
else:
|
||||
other_objects.append((obj, (src, ext)))
|
||||
|
||||
# f77 objects can be built in parallel
|
||||
build_items = f77_objects
|
||||
# build f90 modules serial, module files are generated during
|
||||
# compilation and may be used by files later in the list so the
|
||||
# ordering is important
|
||||
for o in other_objects:
|
||||
single_compile(o)
|
||||
else:
|
||||
build_items = build.items()
|
||||
|
||||
if len(build) > 1 and jobs > 1:
|
||||
# build parallel
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
with ThreadPoolExecutor(jobs) as pool:
|
||||
res = pool.map(single_compile, build_items)
|
||||
list(res) # access result to raise errors
|
||||
else:
|
||||
# build serial
|
||||
for o in build_items:
|
||||
single_compile(o)
|
||||
|
||||
# Return *all* object filenames, not just the ones we just built.
|
||||
return objects
|
||||
|
||||
replace_method(CCompiler, 'compile', CCompiler_compile)
|
||||
|
||||
def CCompiler_customize_cmd(self, cmd, ignore=()):
|
||||
"""
|
||||
Customize compiler using distutils command.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cmd : class instance
|
||||
An instance inheriting from `distutils.cmd.Command`.
|
||||
ignore : sequence of str, optional
|
||||
List of `CCompiler` commands (without ``'set_'``) that should not be
|
||||
altered. Strings that are checked for are:
|
||||
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
|
||||
'rpath', 'link_objects')``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
log.info('customize %s using %s' % (self.__class__.__name__,
|
||||
cmd.__class__.__name__))
|
||||
|
||||
if hasattr(self, 'compiler') and 'clang' in self.compiler[0]:
|
||||
# clang defaults to a non-strict floating error point model.
|
||||
# Since NumPy and most Python libs give warnings for these, override:
|
||||
self.compiler.append('-ftrapping-math')
|
||||
self.compiler_so.append('-ftrapping-math')
|
||||
|
||||
def allow(attr):
|
||||
return getattr(cmd, attr, None) is not None and attr not in ignore
|
||||
|
||||
if allow('include_dirs'):
|
||||
self.set_include_dirs(cmd.include_dirs)
|
||||
if allow('define'):
|
||||
for (name, value) in cmd.define:
|
||||
self.define_macro(name, value)
|
||||
if allow('undef'):
|
||||
for macro in cmd.undef:
|
||||
self.undefine_macro(macro)
|
||||
if allow('libraries'):
|
||||
self.set_libraries(self.libraries + cmd.libraries)
|
||||
if allow('library_dirs'):
|
||||
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
|
||||
if allow('rpath'):
|
||||
self.set_runtime_library_dirs(cmd.rpath)
|
||||
if allow('link_objects'):
|
||||
self.set_link_objects(cmd.link_objects)
|
||||
|
||||
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
|
||||
|
||||
def _compiler_to_string(compiler):
|
||||
props = []
|
||||
mx = 0
|
||||
keys = list(compiler.executables.keys())
|
||||
for key in ['version', 'libraries', 'library_dirs',
|
||||
'object_switch', 'compile_switch',
|
||||
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
|
||||
if key not in keys:
|
||||
keys.append(key)
|
||||
for key in keys:
|
||||
if hasattr(compiler, key):
|
||||
v = getattr(compiler, key)
|
||||
mx = max(mx, len(key))
|
||||
props.append((key, repr(v)))
|
||||
fmt = '%-' + repr(mx+1) + 's = %s'
|
||||
lines = [fmt % prop for prop in props]
|
||||
return '\n'.join(lines)
|
||||
|
||||
def CCompiler_show_customization(self):
|
||||
"""
|
||||
Print the compiler customizations to stdout.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
Printing is only done if the distutils log threshold is < 2.
|
||||
|
||||
"""
|
||||
try:
|
||||
self.get_version()
|
||||
except Exception:
|
||||
pass
|
||||
if log._global_log.threshold<2:
|
||||
print('*'*80)
|
||||
print(self.__class__)
|
||||
print(_compiler_to_string(self))
|
||||
print('*'*80)
|
||||
|
||||
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
|
||||
|
||||
def CCompiler_customize(self, dist, need_cxx=0):
|
||||
"""
|
||||
Do any platform-specific customization of a compiler instance.
|
||||
|
||||
This method calls `distutils.sysconfig.customize_compiler` for
|
||||
platform-specific customization, as well as optionally remove a flag
|
||||
to suppress spurious warnings in case C++ code is being compiled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dist : object
|
||||
This parameter is not used for anything.
|
||||
need_cxx : bool, optional
|
||||
Whether or not C++ has to be compiled. If so (True), the
|
||||
``"-Wstrict-prototypes"`` option is removed to prevent spurious
|
||||
warnings. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
All the default options used by distutils can be extracted with::
|
||||
|
||||
from distutils import sysconfig
|
||||
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
|
||||
'CCSHARED', 'LDSHARED', 'SO')
|
||||
|
||||
"""
|
||||
# See FCompiler.customize for suggested usage.
|
||||
log.info('customize %s' % (self.__class__.__name__))
|
||||
customize_compiler(self)
|
||||
if need_cxx:
|
||||
# In general, distutils uses -Wstrict-prototypes, but this option is
|
||||
# not valid for C++ code, only for C. Remove it if it's there to
|
||||
# avoid a spurious warning on every compilation.
|
||||
try:
|
||||
self.compiler_so.remove('-Wstrict-prototypes')
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
|
||||
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
|
||||
if not self.compiler_cxx:
|
||||
if self.compiler[0].startswith('gcc'):
|
||||
a, b = 'gcc', 'g++'
|
||||
else:
|
||||
a, b = 'cc', 'c++'
|
||||
self.compiler_cxx = [self.compiler[0].replace(a, b)]\
|
||||
+ self.compiler[1:]
|
||||
else:
|
||||
if hasattr(self, 'compiler'):
|
||||
log.warn("#### %s #######" % (self.compiler,))
|
||||
if not hasattr(self, 'compiler_cxx'):
|
||||
log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
|
||||
|
||||
|
||||
# check if compiler supports gcc style automatic dependencies
|
||||
# run on every extension so skip for known good compilers
|
||||
if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
|
||||
'g++' in self.compiler[0] or
|
||||
'clang' in self.compiler[0]):
|
||||
self._auto_depends = True
|
||||
elif os.name == 'posix':
|
||||
import tempfile
|
||||
import shutil
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
fn = os.path.join(tmpdir, "file.c")
|
||||
with open(fn, "w") as f:
|
||||
f.write("int a;\n")
|
||||
self.compile([fn], output_dir=tmpdir,
|
||||
extra_preargs=['-MMD', '-MF', fn + '.d'])
|
||||
self._auto_depends = True
|
||||
except CompileError:
|
||||
self._auto_depends = False
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
return
|
||||
|
||||
replace_method(CCompiler, 'customize', CCompiler_customize)
|
||||
|
||||
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
|
||||
"""
|
||||
Simple matching of version numbers, for use in CCompiler and FCompiler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pat : str, optional
|
||||
A regular expression matching version numbers.
|
||||
Default is ``r'[-.\\d]+'``.
|
||||
ignore : str, optional
|
||||
A regular expression matching patterns to skip.
|
||||
Default is ``''``, in which case nothing is skipped.
|
||||
start : str, optional
|
||||
A regular expression matching the start of where to start looking
|
||||
for version numbers.
|
||||
Default is ``''``, in which case searching is started at the
|
||||
beginning of the version string given to `matcher`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
matcher : callable
|
||||
A function that is appropriate to use as the ``.version_match``
|
||||
attribute of a `CCompiler` class. `matcher` takes a single parameter,
|
||||
a version string.
|
||||
|
||||
"""
|
||||
def matcher(self, version_string):
|
||||
# version string may appear in the second line, so getting rid
|
||||
# of new lines:
|
||||
version_string = version_string.replace('\n', ' ')
|
||||
pos = 0
|
||||
if start:
|
||||
m = re.match(start, version_string)
|
||||
if not m:
|
||||
return None
|
||||
pos = m.end()
|
||||
while True:
|
||||
m = re.search(pat, version_string[pos:])
|
||||
if not m:
|
||||
return None
|
||||
if ignore and re.match(ignore, m.group(0)):
|
||||
pos = m.end()
|
||||
continue
|
||||
break
|
||||
return m.group(0)
|
||||
return matcher
|
||||
|
||||
def CCompiler_get_version(self, force=False, ok_status=[0]):
|
||||
"""
|
||||
Return compiler version, or None if compiler is not available.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
force : bool, optional
|
||||
If True, force a new determination of the version, even if the
|
||||
compiler already has a version attribute. Default is False.
|
||||
ok_status : list of int, optional
|
||||
The list of status values returned by the version look-up process
|
||||
for which a version string is returned. If the status value is not
|
||||
in `ok_status`, None is returned. Default is ``[0]``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
version : str or None
|
||||
Version string, in the format of `distutils.version.LooseVersion`.
|
||||
|
||||
"""
|
||||
if not force and hasattr(self, 'version'):
|
||||
return self.version
|
||||
self.find_executables()
|
||||
try:
|
||||
version_cmd = self.version_cmd
|
||||
except AttributeError:
|
||||
return None
|
||||
if not version_cmd or not version_cmd[0]:
|
||||
return None
|
||||
try:
|
||||
matcher = self.version_match
|
||||
except AttributeError:
|
||||
try:
|
||||
pat = self.version_pattern
|
||||
except AttributeError:
|
||||
return None
|
||||
def matcher(version_string):
|
||||
m = re.match(pat, version_string)
|
||||
if not m:
|
||||
return None
|
||||
version = m.group('version')
|
||||
return version
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
output = exc.output
|
||||
status = exc.returncode
|
||||
except OSError:
|
||||
# match the historical returns for a parent
|
||||
# exception class caught by exec_command()
|
||||
status = 127
|
||||
output = b''
|
||||
else:
|
||||
# output isn't actually a filepath but we do this
|
||||
# for now to match previous distutils behavior
|
||||
output = filepath_from_subprocess_output(output)
|
||||
status = 0
|
||||
|
||||
version = None
|
||||
if status in ok_status:
|
||||
version = matcher(output)
|
||||
if version:
|
||||
version = LooseVersion(version)
|
||||
self.version = version
|
||||
return version
|
||||
|
||||
replace_method(CCompiler, 'get_version', CCompiler_get_version)
|
||||
|
||||
def CCompiler_cxx_compiler(self):
|
||||
"""
|
||||
Return the C++ compiler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
cxx : class instance
|
||||
The C++ compiler, as a `CCompiler` instance.
|
||||
|
||||
"""
|
||||
if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
|
||||
return self
|
||||
|
||||
cxx = copy(self)
|
||||
cxx.compiler_cxx = cxx.compiler_cxx
|
||||
cxx.compiler_so = [cxx.compiler_cxx[0]] + \
|
||||
sanitize_cxx_flags(cxx.compiler_so[1:])
|
||||
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
|
||||
# AIX needs the ld_so_aix script included with Python
|
||||
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
|
||||
+ cxx.linker_so[2:]
|
||||
else:
|
||||
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
|
||||
return cxx
|
||||
|
||||
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
|
||||
|
||||
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
|
||||
"Intel C Compiler for 32-bit applications")
|
||||
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
|
||||
"Intel C Itanium Compiler for Itanium-based applications")
|
||||
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
|
||||
"Intel C Compiler for 64-bit applications")
|
||||
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
|
||||
"Intel C Compiler for 32-bit applications on Windows")
|
||||
compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
|
||||
"Intel C Compiler for 64-bit applications on Windows")
|
||||
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
|
||||
"PathScale Compiler for SiCortex-based applications")
|
||||
compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
|
||||
"Arm C Compiler")
|
||||
|
||||
ccompiler._default_compilers += (('linux.*', 'intel'),
|
||||
('linux.*', 'intele'),
|
||||
('linux.*', 'intelem'),
|
||||
('linux.*', 'pathcc'),
|
||||
('nt', 'intelw'),
|
||||
('nt', 'intelemw'))
|
||||
|
||||
if sys.platform == 'win32':
|
||||
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
|
||||
"Mingw32 port of GNU C Compiler for Win32"\
|
||||
"(for MSC built Python)")
|
||||
if mingw32():
|
||||
# On windows platforms, we want to default to mingw32 (gcc)
|
||||
# because msvc can't build blitz stuff.
|
||||
log.info('Setting mingw32 as default compiler for nt.')
|
||||
ccompiler._default_compilers = (('nt', 'mingw32'),) \
|
||||
+ ccompiler._default_compilers
|
||||
|
||||
|
||||
_distutils_new_compiler = new_compiler
|
||||
def new_compiler (plat=None,
|
||||
compiler=None,
|
||||
verbose=None,
|
||||
dry_run=0,
|
||||
force=0):
|
||||
# Try first C compilers from numpy.distutils.
|
||||
if verbose is None:
|
||||
verbose = log.get_threshold() <= log.INFO
|
||||
if plat is None:
|
||||
plat = os.name
|
||||
try:
|
||||
if compiler is None:
|
||||
compiler = get_default_compiler(plat)
|
||||
(module_name, class_name, long_description) = compiler_class[compiler]
|
||||
except KeyError:
|
||||
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
|
||||
if compiler is not None:
|
||||
msg = msg + " with '%s' compiler" % compiler
|
||||
raise DistutilsPlatformError(msg)
|
||||
module_name = "numpy.distutils." + module_name
|
||||
try:
|
||||
__import__ (module_name)
|
||||
except ImportError as e:
|
||||
msg = str(e)
|
||||
log.info('%s in numpy.distutils; trying from distutils',
|
||||
str(msg))
|
||||
module_name = module_name[6:]
|
||||
try:
|
||||
__import__(module_name)
|
||||
except ImportError as e:
|
||||
msg = str(e)
|
||||
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
|
||||
module_name)
|
||||
try:
|
||||
module = sys.modules[module_name]
|
||||
klass = vars(module)[class_name]
|
||||
except KeyError:
|
||||
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
|
||||
"in module '%s'") % (class_name, module_name))
|
||||
compiler = klass(None, dry_run, force)
|
||||
compiler.verbose = verbose
|
||||
log.debug('new_compiler returns %s' % (klass))
|
||||
return compiler
|
||||
|
||||
ccompiler.new_compiler = new_compiler
|
||||
|
||||
_distutils_gen_lib_options = gen_lib_options
|
||||
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
|
||||
# the version of this function provided by CPython allows the following
|
||||
# to return lists, which are unpacked automatically:
|
||||
# - compiler.runtime_library_dir_option
|
||||
# our version extends the behavior to:
|
||||
# - compiler.library_dir_option
|
||||
# - compiler.library_option
|
||||
# - compiler.find_library_file
|
||||
r = _distutils_gen_lib_options(compiler, library_dirs,
|
||||
runtime_library_dirs, libraries)
|
||||
lib_opts = []
|
||||
for i in r:
|
||||
if is_sequence(i):
|
||||
lib_opts.extend(list(i))
|
||||
else:
|
||||
lib_opts.append(i)
|
||||
return lib_opts
|
||||
ccompiler.gen_lib_options = gen_lib_options
|
||||
|
||||
# Also fix up the various compiler modules, which do
|
||||
# from distutils.ccompiler import gen_lib_options
|
||||
# Don't bother with mwerks, as we don't support Classic Mac.
|
||||
for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
|
||||
_m = sys.modules.get('distutils.' + _cc + 'compiler')
|
||||
if _m is not None:
|
||||
setattr(_m, 'gen_lib_options', gen_lib_options)
|
||||
|
2588
.venv/Lib/site-packages/numpy/distutils/ccompiler_opt.py
Normal file
2588
.venv/Lib/site-packages/numpy/distutils/ccompiler_opt.py
Normal file
File diff suppressed because it is too large
Load Diff
25
.venv/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c
Normal file
25
.venv/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c
Normal file
@ -0,0 +1,25 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float32x4_t v1 = vdupq_n_f32(1.0f), v2 = vdupq_n_f32(2.0f);
|
||||
/* MAXMIN */
|
||||
int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
|
||||
ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
|
||||
/* ROUNDING */
|
||||
ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
|
||||
#ifdef __aarch64__
|
||||
{
|
||||
float64x2_t vd1 = vdupq_n_f64(1.0), vd2 = vdupq_n_f64(2.0);
|
||||
/* MAXMIN */
|
||||
ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
|
||||
ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
|
||||
/* ROUNDING */
|
||||
ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
15
.venv/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c
Normal file
15
.venv/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c
Normal file
@ -0,0 +1,15 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
uint8x16_t v1 = vdupq_n_u8((unsigned char)1), v2 = vdupq_n_u8((unsigned char)2);
|
||||
uint32x4_t va = vdupq_n_u32(3);
|
||||
int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
|
||||
#ifdef __aarch64__
|
||||
ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float16x8_t vhp = vdupq_n_f16((float16_t)1);
|
||||
float16x4_t vlhp = vdup_n_f16((float16_t)1);
|
||||
float32x4_t vf = vdupq_n_f32(1.0f);
|
||||
float32x2_t vlf = vdup_n_f32(1.0f);
|
||||
|
||||
int ret = (int)vget_lane_f32(vfmlal_low_u32(vlf, vlhp, vlhp), 0);
|
||||
ret += (int)vgetq_lane_f32(vfmlslq_high_u32(vf, vhp, vhp), 0);
|
||||
|
||||
return ret;
|
||||
}
|
14
.venv/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c
Normal file
14
.venv/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c
Normal file
@ -0,0 +1,14 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float16x8_t vhp = vdupq_n_f16((float16_t)-1);
|
||||
float16x4_t vlhp = vdup_n_f16((float16_t)-1);
|
||||
|
||||
int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
|
||||
ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
|
||||
return ret;
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_avx.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_avx.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX__
|
||||
#error "HOST/ARCH doesn't support AVX"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
|
||||
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX2__
|
||||
#error "HOST/ARCH doesn't support AVX2"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
|
||||
return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX512VNNI__
|
||||
#error "HOST/ARCH doesn't support CascadeLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* VNNI */
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
|
||||
#error "HOST/ARCH doesn't support CannonLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
/* IFMA */
|
||||
a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
|
||||
/* VMBI */
|
||||
a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
|
||||
#error "HOST/ARCH doesn't support IceLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
/* VBMI2 */
|
||||
a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
|
||||
/* BITLAG */
|
||||
a = _mm512_popcnt_epi8(a);
|
||||
/* VPOPCNTDQ */
|
||||
a = _mm512_popcnt_epi64(a);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512ER__) || !defined(__AVX512PF__)
|
||||
#error "HOST/ARCH doesn't support Knights Landing AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int base[128];
|
||||
__m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
|
||||
/* ER */
|
||||
__m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
|
||||
/* PF */
|
||||
_mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
|
||||
return base[0];
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
|
||||
#error "HOST/ARCH doesn't support Knights Mill AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
__m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
|
||||
|
||||
/* 4FMAPS */
|
||||
b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
|
||||
/* 4VNNIW */
|
||||
a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
|
||||
/* VPOPCNTDQ */
|
||||
a = _mm512_popcnt_epi64(a);
|
||||
|
||||
a = _mm512_add_epi32(a, _mm512_castps_si512(b));
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
|
||||
#error "HOST/ARCH doesn't support SkyLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
||||
/* VL */
|
||||
__m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
|
||||
/* DQ */
|
||||
__m512i b = _mm512_broadcast_i32x8(a);
|
||||
/* BW */
|
||||
b = _mm512_abs_epi16(b);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX512CD__
|
||||
#error "HOST/ARCH doesn't support AVX512CD"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX512F__
|
||||
#error "HOST/ARCH doesn't support AVX512F"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
22
.venv/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c
Normal file
22
.venv/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c
Normal file
@ -0,0 +1,22 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __F16C__
|
||||
#error "HOST/ARCH doesn't support F16C"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
|
||||
__m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
|
||||
return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
|
||||
}
|
22
.venv/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c
Normal file
22
.venv/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c
Normal file
@ -0,0 +1,22 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__FMA__) && !defined(__AVX2__)
|
||||
#error "HOST/ARCH doesn't support FMA3"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <xmmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
||||
a = _mm256_fmadd_ps(a, a, a);
|
||||
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
||||
}
|
13
.venv/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c
Normal file
13
.venv/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c
Normal file
@ -0,0 +1,13 @@
|
||||
#include <immintrin.h>
|
||||
#ifdef _MSC_VER
|
||||
#include <ammintrin.h>
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
||||
a = _mm256_macc_ps(a, a, a);
|
||||
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
||||
}
|
15
.venv/Lib/site-packages/numpy/distutils/checks/cpu_neon.c
Normal file
15
.venv/Lib/site-packages/numpy/distutils/checks/cpu_neon.c
Normal file
@ -0,0 +1,15 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float32x4_t v1 = vdupq_n_f32(1.0f), v2 = vdupq_n_f32(2.0f);
|
||||
int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
|
||||
#ifdef __aarch64__
|
||||
float64x2_t vd1 = vdupq_n_f64(1.0), vd2 = vdupq_n_f64(2.0);
|
||||
ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
short z4[] = {0, 0, 0, 0, 0, 0, 0, 0};
|
||||
float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16((const short*)z4));
|
||||
return (int)vgetq_lane_f32(v_z4, 0);
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float32x4_t v1 = vdupq_n_f32(1.0f);
|
||||
float32x4_t v2 = vdupq_n_f32(2.0f);
|
||||
float32x4_t v3 = vdupq_n_f32(3.0f);
|
||||
int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
|
||||
#ifdef __aarch64__
|
||||
float64x2_t vd1 = vdupq_n_f64(1.0);
|
||||
float64x2_t vd2 = vdupq_n_f64(2.0);
|
||||
float64x2_t vd3 = vdupq_n_f64(3.0);
|
||||
ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
32
.venv/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c
Normal file
32
.venv/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c
Normal file
@ -0,0 +1,32 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__SSE4_2__) && !defined(__POPCNT__)
|
||||
#error "HOST/ARCH doesn't support POPCNT"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <nmmintrin.h>
|
||||
#else
|
||||
#include <popcntintrin.h>
|
||||
#endif
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// To make sure popcnt instructions are generated
|
||||
// and been tested against the assembler
|
||||
unsigned long long a = *((unsigned long long*)argv[argc-1]);
|
||||
unsigned int b = *((unsigned int*)argv[argc-2]);
|
||||
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
a = _mm_popcnt_u64(a);
|
||||
#endif
|
||||
b = _mm_popcnt_u32(b);
|
||||
return (int)a + b;
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE__
|
||||
#error "HOST/ARCH doesn't support SSE"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE2__
|
||||
#error "HOST/ARCH doesn't support SSE2"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <emmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
||||
return _mm_cvtsi128_si32(a);
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE3__
|
||||
#error "HOST/ARCH doesn't support SSE3"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <pmmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE4_1__
|
||||
#error "HOST/ARCH doesn't support SSE41"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <smmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_floor_ps(_mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE4_2__
|
||||
#error "HOST/ARCH doesn't support SSE42"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <smmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c
Normal file
20
.venv/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c
Normal file
@ -0,0 +1,20 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSSE3__
|
||||
#error "HOST/ARCH doesn't support SSSE3"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <tmmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
||||
return (int)_mm_cvtsi128_si32(a);
|
||||
}
|
21
.venv/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c
Normal file
21
.venv/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c
Normal file
@ -0,0 +1,21 @@
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
||||
#define vsx_ld vec_vsx_ld
|
||||
#define vsx_st vec_vsx_st
|
||||
#else
|
||||
#define vsx_ld vec_xl
|
||||
#define vsx_st vec_xst
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
unsigned int zout[4];
|
||||
unsigned int z4[] = {0, 0, 0, 0};
|
||||
__vector unsigned int v_z4 = vsx_ld(0, z4);
|
||||
vsx_st(v_z4, 0, zout);
|
||||
return zout[0];
|
||||
}
|
13
.venv/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c
Normal file
13
.venv/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
typedef __vector unsigned long long v_uint64x2;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
v_uint64x2 z2 = (v_uint64x2){0, 0};
|
||||
z2 = (v_uint64x2)vec_cmpeq(z2, z2);
|
||||
return (int)vec_extract(z2, 0);
|
||||
}
|
13
.venv/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c
Normal file
13
.venv/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
typedef __vector unsigned int v_uint32x4;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
|
||||
z4 = vec_absd(z4, z4);
|
||||
return (int)vec_extract(z4, 0);
|
||||
}
|
12
.venv/Lib/site-packages/numpy/distutils/checks/cpu_xop.c
Normal file
12
.venv/Lib/site-packages/numpy/distutils/checks/cpu_xop.c
Normal file
@ -0,0 +1,12 @@
|
||||
#include <immintrin.h>
|
||||
#ifdef _MSC_VER
|
||||
#include <ammintrin.h>
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
|
||||
return _mm_cvtsi128_si32(a);
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
#include <immintrin.h>
|
||||
/**
|
||||
* Test BW mask operations due to:
|
||||
* - MSVC has supported it since vs2019 see,
|
||||
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
|
||||
* - Clang >= v8.0
|
||||
* - GCC >= v7.1
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
__mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
|
||||
m64 = _kor_mask64(m64, m64);
|
||||
m64 = _kxor_mask64(m64, m64);
|
||||
m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
|
||||
m64 = _mm512_kunpackd(m64, m64);
|
||||
m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
|
||||
return (int)_cvtmask64_u64(m64);
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
#include <immintrin.h>
|
||||
/**
|
||||
* Test DQ mask operations due to:
|
||||
* - MSVC has supported it since vs2019 see,
|
||||
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
|
||||
* - Clang >= v8.0
|
||||
* - GCC >= v7.1
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
__mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
|
||||
m8 = _kor_mask8(m8, m8);
|
||||
m8 = _kxor_mask8(m8, m8);
|
||||
m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
|
||||
return (int)_cvtmask8_u32(m8);
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
#include <immintrin.h>
|
||||
/**
|
||||
* The following intrinsics don't have direct native support but compilers
|
||||
* tend to emulate them.
|
||||
* They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
__m512 one_ps = _mm512_set1_ps(1.0f);
|
||||
__m512d one_pd = _mm512_set1_pd(1.0);
|
||||
__m512i one_i64 = _mm512_set1_epi64(1);
|
||||
// add
|
||||
float sum_ps = _mm512_reduce_add_ps(one_ps);
|
||||
double sum_pd = _mm512_reduce_add_pd(one_pd);
|
||||
int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
|
||||
sum_int += (int)_mm512_reduce_add_epi32(one_i64);
|
||||
// mul
|
||||
sum_ps += _mm512_reduce_mul_ps(one_ps);
|
||||
sum_pd += _mm512_reduce_mul_pd(one_pd);
|
||||
sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
|
||||
sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
|
||||
// min
|
||||
sum_ps += _mm512_reduce_min_ps(one_ps);
|
||||
sum_pd += _mm512_reduce_min_pd(one_pd);
|
||||
sum_int += (int)_mm512_reduce_min_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_min_epu32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_min_epi64(one_i64);
|
||||
// max
|
||||
sum_ps += _mm512_reduce_max_ps(one_ps);
|
||||
sum_pd += _mm512_reduce_max_pd(one_pd);
|
||||
sum_int += (int)_mm512_reduce_max_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_max_epu32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_max_epi64(one_i64);
|
||||
// and
|
||||
sum_int += (int)_mm512_reduce_and_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_and_epi64(one_i64);
|
||||
// or
|
||||
sum_int += (int)_mm512_reduce_or_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_or_epi64(one_i64);
|
||||
return (int)sum_ps + (int)sum_pd + sum_int;
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
/**
|
||||
* Testing ASM VSX register number fixer '%x<n>'
|
||||
*
|
||||
* old versions of CLANG doesn't support %x<n> in the inline asm template
|
||||
* which fixes register number when using any of the register constraints wa, wd, wf.
|
||||
*
|
||||
* xref:
|
||||
* - https://bugs.llvm.org/show_bug.cgi?id=31837
|
||||
* - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
|
||||
*/
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
||||
#define vsx_ld vec_vsx_ld
|
||||
#define vsx_st vec_vsx_st
|
||||
#else
|
||||
#define vsx_ld vec_xl
|
||||
#define vsx_st vec_xst
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float z4[] = {0, 0, 0, 0};
|
||||
signed int zout[] = {0, 0, 0, 0};
|
||||
|
||||
__vector float vz4 = vsx_ld(0, z4);
|
||||
__vector signed int asm_ret = vsx_ld(0, zout);
|
||||
|
||||
__asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
|
||||
|
||||
vsx_st(asm_ret, 0, zout);
|
||||
return zout[0];
|
||||
}
|
@ -0,0 +1 @@
|
||||
int test_flags;
|
41
.venv/Lib/site-packages/numpy/distutils/command/__init__.py
Normal file
41
.venv/Lib/site-packages/numpy/distutils/command/__init__.py
Normal file
@ -0,0 +1,41 @@
|
||||
"""distutils.command
|
||||
|
||||
Package containing implementation of all the standard Distutils
|
||||
commands.
|
||||
|
||||
"""
|
||||
def test_na_writable_attributes_deletion():
|
||||
a = np.NA(2)
|
||||
attr = ['payload', 'dtype']
|
||||
for s in attr:
|
||||
assert_raises(AttributeError, delattr, a, s)
|
||||
|
||||
|
||||
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
|
||||
|
||||
distutils_all = [ #'build_py',
|
||||
'clean',
|
||||
'install_clib',
|
||||
'install_scripts',
|
||||
'bdist',
|
||||
'bdist_dumb',
|
||||
'bdist_wininst',
|
||||
]
|
||||
|
||||
__import__('distutils.command', globals(), locals(), distutils_all)
|
||||
|
||||
__all__ = ['build',
|
||||
'config_compiler',
|
||||
'config',
|
||||
'build_src',
|
||||
'build_py',
|
||||
'build_ext',
|
||||
'build_clib',
|
||||
'build_scripts',
|
||||
'install',
|
||||
'install_data',
|
||||
'install_headers',
|
||||
'install_lib',
|
||||
'bdist_rpm',
|
||||
'sdist',
|
||||
] + distutils_all
|
148
.venv/Lib/site-packages/numpy/distutils/command/autodist.py
Normal file
148
.venv/Lib/site-packages/numpy/distutils/command/autodist.py
Normal file
@ -0,0 +1,148 @@
|
||||
"""This module implements additional tests ala autoconf which can be useful.
|
||||
|
||||
"""
|
||||
import textwrap
|
||||
|
||||
# We put them here since they could be easily reused outside numpy.distutils
|
||||
|
||||
def check_inline(cmd):
|
||||
"""Return the inline identifier (may be empty)."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#ifndef __cplusplus
|
||||
static %(inline)s int static_func (void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
%(inline)s int nostatic_func (void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif""")
|
||||
|
||||
for kw in ['inline', '__inline__', '__inline']:
|
||||
st = cmd.try_compile(body % {'inline': kw}, None, None)
|
||||
if st:
|
||||
return kw
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
def check_restrict(cmd):
|
||||
"""Return the restrict identifier (may be empty)."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
static int static_func (char * %(restrict)s a)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
|
||||
for kw in ['restrict', '__restrict__', '__restrict']:
|
||||
st = cmd.try_compile(body % {'restrict': kw}, None, None)
|
||||
if st:
|
||||
return kw
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
def check_compiler_gcc(cmd):
|
||||
"""Check if the compiler is GCC."""
|
||||
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
int
|
||||
main()
|
||||
{
|
||||
#if (! defined __GNUC__)
|
||||
#error gcc required
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
return cmd.try_compile(body, None, None)
|
||||
|
||||
|
||||
def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
|
||||
"""
|
||||
Check that the gcc version is at least the specified version."""
|
||||
|
||||
cmd._check_compiler()
|
||||
version = '.'.join([str(major), str(minor), str(patchlevel)])
|
||||
body = textwrap.dedent("""
|
||||
int
|
||||
main()
|
||||
{
|
||||
#if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
|
||||
(__GNUC_MINOR__ < %(minor)d) || \\
|
||||
(__GNUC_PATCHLEVEL__ < %(patchlevel)d)
|
||||
#error gcc >= %(version)s required
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
kw = {'version': version, 'major': major, 'minor': minor,
|
||||
'patchlevel': patchlevel}
|
||||
|
||||
return cmd.try_compile(body % kw, None, None)
|
||||
|
||||
|
||||
def check_gcc_function_attribute(cmd, attribute, name):
|
||||
"""Return True if the given function attribute is supported."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#pragma GCC diagnostic error "-Wattributes"
|
||||
#pragma clang diagnostic error "-Wattributes"
|
||||
|
||||
int %s %s(void* unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""") % (attribute, name)
|
||||
return cmd.try_compile(body, None, None) != 0
|
||||
|
||||
|
||||
def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
|
||||
include):
|
||||
"""Return True if the given function attribute is supported with
|
||||
intrinsics."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#include<%s>
|
||||
int %s %s(void)
|
||||
{
|
||||
%s;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""") % (include, attribute, name, code)
|
||||
return cmd.try_compile(body, None, None) != 0
|
||||
|
||||
|
||||
def check_gcc_variable_attribute(cmd, attribute):
|
||||
"""Return True if the given variable attribute is supported."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#pragma GCC diagnostic error "-Wattributes"
|
||||
#pragma clang diagnostic error "-Wattributes"
|
||||
|
||||
int %s foo;
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""") % (attribute, )
|
||||
return cmd.try_compile(body, None, None) != 0
|
22
.venv/Lib/site-packages/numpy/distutils/command/bdist_rpm.py
Normal file
22
.venv/Lib/site-packages/numpy/distutils/command/bdist_rpm.py
Normal file
@ -0,0 +1,22 @@
|
||||
import os
|
||||
import sys
|
||||
if 'setuptools' in sys.modules:
|
||||
from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
|
||||
else:
|
||||
from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
|
||||
|
||||
class bdist_rpm(old_bdist_rpm):
|
||||
|
||||
def _make_spec_file(self):
|
||||
spec_file = old_bdist_rpm._make_spec_file(self)
|
||||
|
||||
# Replace hardcoded setup.py script name
|
||||
# with the real setup script name.
|
||||
setup_py = os.path.basename(sys.argv[0])
|
||||
if setup_py == 'setup.py':
|
||||
return spec_file
|
||||
new_spec_file = []
|
||||
for line in spec_file:
|
||||
line = line.replace('setup.py', setup_py)
|
||||
new_spec_file.append(line)
|
||||
return new_spec_file
|
61
.venv/Lib/site-packages/numpy/distutils/command/build.py
Normal file
61
.venv/Lib/site-packages/numpy/distutils/command/build.py
Normal file
@ -0,0 +1,61 @@
|
||||
import os
|
||||
import sys
|
||||
from distutils.command.build import build as old_build
|
||||
from distutils.util import get_platform
|
||||
from numpy.distutils.command.config_compiler import show_fortran_compilers
|
||||
|
||||
class build(old_build):
|
||||
|
||||
sub_commands = [('config_cc', lambda *args: True),
|
||||
('config_fc', lambda *args: True),
|
||||
('build_src', old_build.has_ext_modules),
|
||||
] + old_build.sub_commands
|
||||
|
||||
user_options = old_build.user_options + [
|
||||
('fcompiler=', None,
|
||||
"specify the Fortran compiler type"),
|
||||
('warn-error', None,
|
||||
"turn all warnings into errors (-Werror)"),
|
||||
('cpu-baseline=', None,
|
||||
"specify a list of enabled baseline CPU optimizations"),
|
||||
('cpu-dispatch=', None,
|
||||
"specify a list of dispatched CPU optimizations"),
|
||||
('disable-optimization', None,
|
||||
"disable CPU optimized code(dispatch,simd,fast...)"),
|
||||
('simd-test=', None,
|
||||
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
|
||||
]
|
||||
|
||||
help_options = old_build.help_options + [
|
||||
('help-fcompiler', None, "list available Fortran compilers",
|
||||
show_fortran_compilers),
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
old_build.initialize_options(self)
|
||||
self.fcompiler = None
|
||||
self.warn_error = False
|
||||
self.cpu_baseline = "min"
|
||||
self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
|
||||
self.disable_optimization = False
|
||||
"""
|
||||
the '_simd' module is a very large. Adding more dispatched features
|
||||
will increase binary size and compile time. By default we minimize
|
||||
the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
|
||||
NOTE: any specified features will be ignored if they're:
|
||||
- part of the baseline(--cpu-baseline)
|
||||
- not part of dispatch-able features(--cpu-dispatch)
|
||||
- not supported by compiler or platform
|
||||
"""
|
||||
self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD"
|
||||
|
||||
def finalize_options(self):
|
||||
build_scripts = self.build_scripts
|
||||
old_build.finalize_options(self)
|
||||
plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
|
||||
if build_scripts is None:
|
||||
self.build_scripts = os.path.join(self.build_base,
|
||||
'scripts' + plat_specifier)
|
||||
|
||||
def run(self):
|
||||
old_build.run(self)
|
468
.venv/Lib/site-packages/numpy/distutils/command/build_clib.py
Normal file
468
.venv/Lib/site-packages/numpy/distutils/command/build_clib.py
Normal file
@ -0,0 +1,468 @@
|
||||
""" Modified version of build_clib that handles fortran source files.
|
||||
"""
|
||||
import os
|
||||
from glob import glob
|
||||
import shutil
|
||||
from distutils.command.build_clib import build_clib as old_build_clib
|
||||
from distutils.errors import DistutilsSetupError, DistutilsError, \
|
||||
DistutilsFileError
|
||||
|
||||
from numpy.distutils import log
|
||||
from distutils.dep_util import newer_group
|
||||
from numpy.distutils.misc_util import (
|
||||
filter_sources, get_lib_source_files, get_numpy_include_dirs,
|
||||
has_cxx_sources, has_f_sources, is_sequence
|
||||
)
|
||||
from numpy.distutils.ccompiler_opt import new_ccompiler_opt
|
||||
|
||||
# Fix Python distutils bug sf #1718574:
|
||||
_l = old_build_clib.user_options
|
||||
for _i in range(len(_l)):
|
||||
if _l[_i][0] in ['build-clib', 'build-temp']:
|
||||
_l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
|
||||
#
|
||||
|
||||
|
||||
class build_clib(old_build_clib):
|
||||
|
||||
description = "build C/C++/F libraries used by Python extensions"
|
||||
|
||||
user_options = old_build_clib.user_options + [
|
||||
('fcompiler=', None,
|
||||
"specify the Fortran compiler type"),
|
||||
('inplace', 'i', 'Build in-place'),
|
||||
('parallel=', 'j',
|
||||
"number of parallel jobs"),
|
||||
('warn-error', None,
|
||||
"turn all warnings into errors (-Werror)"),
|
||||
('cpu-baseline=', None,
|
||||
"specify a list of enabled baseline CPU optimizations"),
|
||||
('cpu-dispatch=', None,
|
||||
"specify a list of dispatched CPU optimizations"),
|
||||
('disable-optimization', None,
|
||||
"disable CPU optimized code(dispatch,simd,fast...)"),
|
||||
]
|
||||
|
||||
boolean_options = old_build_clib.boolean_options + \
|
||||
['inplace', 'warn-error', 'disable-optimization']
|
||||
|
||||
def initialize_options(self):
|
||||
old_build_clib.initialize_options(self)
|
||||
self.fcompiler = None
|
||||
self.inplace = 0
|
||||
self.parallel = None
|
||||
self.warn_error = None
|
||||
self.cpu_baseline = None
|
||||
self.cpu_dispatch = None
|
||||
self.disable_optimization = None
|
||||
|
||||
|
||||
def finalize_options(self):
|
||||
if self.parallel:
|
||||
try:
|
||||
self.parallel = int(self.parallel)
|
||||
except ValueError as e:
|
||||
raise ValueError("--parallel/-j argument must be an integer") from e
|
||||
old_build_clib.finalize_options(self)
|
||||
self.set_undefined_options('build',
|
||||
('parallel', 'parallel'),
|
||||
('warn_error', 'warn_error'),
|
||||
('cpu_baseline', 'cpu_baseline'),
|
||||
('cpu_dispatch', 'cpu_dispatch'),
|
||||
('disable_optimization', 'disable_optimization')
|
||||
)
|
||||
|
||||
def have_f_sources(self):
|
||||
for (lib_name, build_info) in self.libraries:
|
||||
if has_f_sources(build_info.get('sources', [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
def have_cxx_sources(self):
|
||||
for (lib_name, build_info) in self.libraries:
|
||||
if has_cxx_sources(build_info.get('sources', [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
if not self.libraries:
|
||||
return
|
||||
|
||||
# Make sure that library sources are complete.
|
||||
languages = []
|
||||
|
||||
# Make sure that extension sources are complete.
|
||||
self.run_command('build_src')
|
||||
|
||||
for (lib_name, build_info) in self.libraries:
|
||||
l = build_info.get('language', None)
|
||||
if l and l not in languages:
|
||||
languages.append(l)
|
||||
|
||||
from distutils.ccompiler import new_compiler
|
||||
self.compiler = new_compiler(compiler=self.compiler,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force)
|
||||
self.compiler.customize(self.distribution,
|
||||
need_cxx=self.have_cxx_sources())
|
||||
|
||||
if self.warn_error:
|
||||
self.compiler.compiler.append('-Werror')
|
||||
self.compiler.compiler_so.append('-Werror')
|
||||
|
||||
libraries = self.libraries
|
||||
self.libraries = None
|
||||
self.compiler.customize_cmd(self)
|
||||
self.libraries = libraries
|
||||
|
||||
self.compiler.show_customization()
|
||||
|
||||
if not self.disable_optimization:
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
|
||||
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
|
||||
opt_cache_path = os.path.abspath(
|
||||
os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py')
|
||||
)
|
||||
if hasattr(self, "compiler_opt"):
|
||||
# By default `CCompilerOpt` update the cache at the exit of
|
||||
# the process, which may lead to duplicate building
|
||||
# (see build_extension()/force_rebuild) if run() called
|
||||
# multiple times within the same os process/thread without
|
||||
# giving the chance the previous instances of `CCompilerOpt`
|
||||
# to update the cache.
|
||||
self.compiler_opt.cache_flush()
|
||||
|
||||
self.compiler_opt = new_ccompiler_opt(
|
||||
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
|
||||
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
|
||||
cache_path=opt_cache_path
|
||||
)
|
||||
def report(copt):
|
||||
log.info("\n########### CLIB COMPILER OPTIMIZATION ###########")
|
||||
log.info(copt.report(full=True))
|
||||
|
||||
import atexit
|
||||
atexit.register(report, self.compiler_opt)
|
||||
|
||||
if self.have_f_sources():
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90='f90' in languages,
|
||||
c_compiler=self.compiler)
|
||||
if self._f_compiler is not None:
|
||||
self._f_compiler.customize(self.distribution)
|
||||
|
||||
libraries = self.libraries
|
||||
self.libraries = None
|
||||
self._f_compiler.customize_cmd(self)
|
||||
self.libraries = libraries
|
||||
|
||||
self._f_compiler.show_customization()
|
||||
else:
|
||||
self._f_compiler = None
|
||||
|
||||
self.build_libraries(self.libraries)
|
||||
|
||||
if self.inplace:
|
||||
for l in self.distribution.installed_libraries:
|
||||
libname = self.compiler.library_filename(l.name)
|
||||
source = os.path.join(self.build_clib, libname)
|
||||
target = os.path.join(l.target_dir, libname)
|
||||
self.mkpath(l.target_dir)
|
||||
shutil.copy(source, target)
|
||||
|
||||
def get_source_files(self):
|
||||
self.check_library_list(self.libraries)
|
||||
filenames = []
|
||||
for lib in self.libraries:
|
||||
filenames.extend(get_lib_source_files(lib))
|
||||
return filenames
|
||||
|
||||
def build_libraries(self, libraries):
|
||||
for (lib_name, build_info) in libraries:
|
||||
self.build_a_library(build_info, lib_name, libraries)
|
||||
|
||||
def assemble_flags(self, in_flags):
|
||||
""" Assemble flags from flag list
|
||||
|
||||
Parameters
|
||||
----------
|
||||
in_flags : None or sequence
|
||||
None corresponds to empty list. Sequence elements can be strings
|
||||
or callables that return lists of strings. Callable takes `self` as
|
||||
single parameter.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out_flags : list
|
||||
"""
|
||||
if in_flags is None:
|
||||
return []
|
||||
out_flags = []
|
||||
for in_flag in in_flags:
|
||||
if callable(in_flag):
|
||||
out_flags += in_flag(self)
|
||||
else:
|
||||
out_flags.append(in_flag)
|
||||
return out_flags
|
||||
|
||||
def build_a_library(self, build_info, lib_name, libraries):
|
||||
# default compilers
|
||||
compiler = self.compiler
|
||||
fcompiler = self._f_compiler
|
||||
|
||||
sources = build_info.get('sources')
|
||||
if sources is None or not is_sequence(sources):
|
||||
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
|
||||
"'sources' must be present and must be " +
|
||||
"a list of source filenames") % lib_name)
|
||||
sources = list(sources)
|
||||
|
||||
c_sources, cxx_sources, f_sources, fmodule_sources \
|
||||
= filter_sources(sources)
|
||||
requiref90 = not not fmodule_sources or \
|
||||
build_info.get('language', 'c') == 'f90'
|
||||
|
||||
# save source type information so that build_ext can use it.
|
||||
source_languages = []
|
||||
if c_sources:
|
||||
source_languages.append('c')
|
||||
if cxx_sources:
|
||||
source_languages.append('c++')
|
||||
if requiref90:
|
||||
source_languages.append('f90')
|
||||
elif f_sources:
|
||||
source_languages.append('f77')
|
||||
build_info['source_languages'] = source_languages
|
||||
|
||||
lib_file = compiler.library_filename(lib_name,
|
||||
output_dir=self.build_clib)
|
||||
depends = sources + build_info.get('depends', [])
|
||||
|
||||
force_rebuild = self.force
|
||||
if not self.disable_optimization and not self.compiler_opt.is_cached():
|
||||
log.debug("Detected changes on compiler optimizations")
|
||||
force_rebuild = True
|
||||
if not (force_rebuild or newer_group(depends, lib_file, 'newer')):
|
||||
log.debug("skipping '%s' library (up-to-date)", lib_name)
|
||||
return
|
||||
else:
|
||||
log.info("building '%s' library", lib_name)
|
||||
|
||||
config_fc = build_info.get('config_fc', {})
|
||||
if fcompiler is not None and config_fc:
|
||||
log.info('using additional config_fc from setup script '
|
||||
'for fortran compiler: %s'
|
||||
% (config_fc,))
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90=requiref90,
|
||||
c_compiler=self.compiler)
|
||||
if fcompiler is not None:
|
||||
dist = self.distribution
|
||||
base_config_fc = dist.get_option_dict('config_fc').copy()
|
||||
base_config_fc.update(config_fc)
|
||||
fcompiler.customize(base_config_fc)
|
||||
|
||||
# check availability of Fortran compilers
|
||||
if (f_sources or fmodule_sources) and fcompiler is None:
|
||||
raise DistutilsError("library %s has Fortran sources"
|
||||
" but no Fortran compiler found" % (lib_name))
|
||||
|
||||
if fcompiler is not None:
|
||||
fcompiler.extra_f77_compile_args = build_info.get(
|
||||
'extra_f77_compile_args') or []
|
||||
fcompiler.extra_f90_compile_args = build_info.get(
|
||||
'extra_f90_compile_args') or []
|
||||
|
||||
macros = build_info.get('macros')
|
||||
if macros is None:
|
||||
macros = []
|
||||
include_dirs = build_info.get('include_dirs')
|
||||
if include_dirs is None:
|
||||
include_dirs = []
|
||||
# Flags can be strings, or callables that return a list of strings.
|
||||
extra_postargs = self.assemble_flags(
|
||||
build_info.get('extra_compiler_args'))
|
||||
extra_cflags = self.assemble_flags(
|
||||
build_info.get('extra_cflags'))
|
||||
extra_cxxflags = self.assemble_flags(
|
||||
build_info.get('extra_cxxflags'))
|
||||
|
||||
include_dirs.extend(get_numpy_include_dirs())
|
||||
# where compiled F90 module files are:
|
||||
module_dirs = build_info.get('module_dirs') or []
|
||||
module_build_dir = os.path.dirname(lib_file)
|
||||
if requiref90:
|
||||
self.mkpath(module_build_dir)
|
||||
|
||||
if compiler.compiler_type == 'msvc':
|
||||
# this hack works around the msvc compiler attributes
|
||||
# problem, msvc uses its own convention :(
|
||||
c_sources += cxx_sources
|
||||
cxx_sources = []
|
||||
|
||||
# filtering C dispatch-table sources when optimization is not disabled,
|
||||
# otherwise treated as normal sources.
|
||||
copt_c_sources = []
|
||||
copt_cxx_sources = []
|
||||
copt_baseline_flags = []
|
||||
copt_macros = []
|
||||
if not self.disable_optimization:
|
||||
bsrc_dir = self.get_finalized_command("build_src").build_src
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include")
|
||||
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
|
||||
include_dirs.append(dispatch_hpath)
|
||||
|
||||
copt_build_src = None if self.inplace else bsrc_dir
|
||||
for _srcs, _dst, _ext in (
|
||||
((c_sources,), copt_c_sources, ('.dispatch.c',)),
|
||||
((c_sources, cxx_sources), copt_cxx_sources,
|
||||
('.dispatch.cpp', '.dispatch.cxx'))
|
||||
):
|
||||
for _src in _srcs:
|
||||
_dst += [
|
||||
_src.pop(_src.index(s))
|
||||
for s in _src[:] if s.endswith(_ext)
|
||||
]
|
||||
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
|
||||
else:
|
||||
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
|
||||
|
||||
objects = []
|
||||
if copt_cxx_sources:
|
||||
log.info("compiling C++ dispatch-able sources")
|
||||
objects += self.compiler_opt.try_dispatch(
|
||||
copt_c_sources,
|
||||
output_dir=self.build_temp,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs + extra_cxxflags,
|
||||
ccompiler=cxx_compiler
|
||||
)
|
||||
|
||||
if copt_c_sources:
|
||||
log.info("compiling C dispatch-able sources")
|
||||
objects += self.compiler_opt.try_dispatch(
|
||||
copt_c_sources,
|
||||
output_dir=self.build_temp,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs + extra_cflags)
|
||||
|
||||
if c_sources:
|
||||
log.info("compiling C sources")
|
||||
objects += compiler.compile(
|
||||
c_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=(extra_postargs +
|
||||
copt_baseline_flags +
|
||||
extra_cflags))
|
||||
|
||||
if cxx_sources:
|
||||
log.info("compiling C++ sources")
|
||||
cxx_compiler = compiler.cxx_compiler()
|
||||
cxx_objects = cxx_compiler.compile(
|
||||
cxx_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=(extra_postargs +
|
||||
copt_baseline_flags +
|
||||
extra_cxxflags))
|
||||
objects.extend(cxx_objects)
|
||||
|
||||
if f_sources or fmodule_sources:
|
||||
extra_postargs = []
|
||||
f_objects = []
|
||||
|
||||
if requiref90:
|
||||
if fcompiler.module_dir_switch is None:
|
||||
existing_modules = glob('*.mod')
|
||||
extra_postargs += fcompiler.module_options(
|
||||
module_dirs, module_build_dir)
|
||||
|
||||
if fmodule_sources:
|
||||
log.info("compiling Fortran 90 module sources")
|
||||
f_objects += fcompiler.compile(fmodule_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs)
|
||||
|
||||
if requiref90 and self._f_compiler.module_dir_switch is None:
|
||||
# move new compiled F90 module files to module_build_dir
|
||||
for f in glob('*.mod'):
|
||||
if f in existing_modules:
|
||||
continue
|
||||
t = os.path.join(module_build_dir, f)
|
||||
if os.path.abspath(f) == os.path.abspath(t):
|
||||
continue
|
||||
if os.path.isfile(t):
|
||||
os.remove(t)
|
||||
try:
|
||||
self.move_file(f, module_build_dir)
|
||||
except DistutilsFileError:
|
||||
log.warn('failed to move %r to %r'
|
||||
% (f, module_build_dir))
|
||||
|
||||
if f_sources:
|
||||
log.info("compiling Fortran sources")
|
||||
f_objects += fcompiler.compile(f_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs)
|
||||
else:
|
||||
f_objects = []
|
||||
|
||||
if f_objects and not fcompiler.can_ccompiler_link(compiler):
|
||||
# Default linker cannot link Fortran object files, and results
|
||||
# need to be wrapped later. Instead of creating a real static
|
||||
# library, just keep track of the object files.
|
||||
listfn = os.path.join(self.build_clib,
|
||||
lib_name + '.fobjects')
|
||||
with open(listfn, 'w') as f:
|
||||
f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
|
||||
|
||||
listfn = os.path.join(self.build_clib,
|
||||
lib_name + '.cobjects')
|
||||
with open(listfn, 'w') as f:
|
||||
f.write("\n".join(os.path.abspath(obj) for obj in objects))
|
||||
|
||||
# create empty "library" file for dependency tracking
|
||||
lib_fname = os.path.join(self.build_clib,
|
||||
lib_name + compiler.static_lib_extension)
|
||||
with open(lib_fname, 'wb') as f:
|
||||
pass
|
||||
else:
|
||||
# assume that default linker is suitable for
|
||||
# linking Fortran object files
|
||||
objects.extend(f_objects)
|
||||
compiler.create_static_lib(objects, lib_name,
|
||||
output_dir=self.build_clib,
|
||||
debug=self.debug)
|
||||
|
||||
# fix library dependencies
|
||||
clib_libraries = build_info.get('libraries', [])
|
||||
for lname, binfo in libraries:
|
||||
if lname in clib_libraries:
|
||||
clib_libraries.extend(binfo.get('libraries', []))
|
||||
if clib_libraries:
|
||||
build_info['libraries'] = clib_libraries
|
733
.venv/Lib/site-packages/numpy/distutils/command/build_ext.py
Normal file
733
.venv/Lib/site-packages/numpy/distutils/command/build_ext.py
Normal file
@ -0,0 +1,733 @@
|
||||
""" Modified version of build_ext that handles fortran source files.
|
||||
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
from glob import glob
|
||||
|
||||
from distutils.dep_util import newer_group
|
||||
from distutils.command.build_ext import build_ext as old_build_ext
|
||||
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
|
||||
DistutilsError
|
||||
from distutils.file_util import copy_file
|
||||
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.exec_command import filepath_from_subprocess_output
|
||||
from numpy.distutils.system_info import combine_paths
|
||||
from numpy.distutils.misc_util import (
|
||||
filter_sources, get_ext_source_files, get_numpy_include_dirs,
|
||||
has_cxx_sources, has_f_sources, is_sequence
|
||||
)
|
||||
from numpy.distutils.command.config_compiler import show_fortran_compilers
|
||||
from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
|
||||
|
||||
class build_ext (old_build_ext):
|
||||
|
||||
description = "build C/C++/F extensions (compile/link to build directory)"
|
||||
|
||||
user_options = old_build_ext.user_options + [
|
||||
('fcompiler=', None,
|
||||
"specify the Fortran compiler type"),
|
||||
('parallel=', 'j',
|
||||
"number of parallel jobs"),
|
||||
('warn-error', None,
|
||||
"turn all warnings into errors (-Werror)"),
|
||||
('cpu-baseline=', None,
|
||||
"specify a list of enabled baseline CPU optimizations"),
|
||||
('cpu-dispatch=', None,
|
||||
"specify a list of dispatched CPU optimizations"),
|
||||
('disable-optimization', None,
|
||||
"disable CPU optimized code(dispatch,simd,fast...)"),
|
||||
('simd-test=', None,
|
||||
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
|
||||
]
|
||||
|
||||
help_options = old_build_ext.help_options + [
|
||||
('help-fcompiler', None, "list available Fortran compilers",
|
||||
show_fortran_compilers),
|
||||
]
|
||||
|
||||
boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
|
||||
|
||||
def initialize_options(self):
|
||||
old_build_ext.initialize_options(self)
|
||||
self.fcompiler = None
|
||||
self.parallel = None
|
||||
self.warn_error = None
|
||||
self.cpu_baseline = None
|
||||
self.cpu_dispatch = None
|
||||
self.disable_optimization = None
|
||||
self.simd_test = None
|
||||
|
||||
def finalize_options(self):
|
||||
if self.parallel:
|
||||
try:
|
||||
self.parallel = int(self.parallel)
|
||||
except ValueError as e:
|
||||
raise ValueError("--parallel/-j argument must be an integer") from e
|
||||
|
||||
# Ensure that self.include_dirs and self.distribution.include_dirs
|
||||
# refer to the same list object. finalize_options will modify
|
||||
# self.include_dirs, but self.distribution.include_dirs is used
|
||||
# during the actual build.
|
||||
# self.include_dirs is None unless paths are specified with
|
||||
# --include-dirs.
|
||||
# The include paths will be passed to the compiler in the order:
|
||||
# numpy paths, --include-dirs paths, Python include path.
|
||||
if isinstance(self.include_dirs, str):
|
||||
self.include_dirs = self.include_dirs.split(os.pathsep)
|
||||
incl_dirs = self.include_dirs or []
|
||||
if self.distribution.include_dirs is None:
|
||||
self.distribution.include_dirs = []
|
||||
self.include_dirs = self.distribution.include_dirs
|
||||
self.include_dirs.extend(incl_dirs)
|
||||
|
||||
old_build_ext.finalize_options(self)
|
||||
self.set_undefined_options('build',
|
||||
('parallel', 'parallel'),
|
||||
('warn_error', 'warn_error'),
|
||||
('cpu_baseline', 'cpu_baseline'),
|
||||
('cpu_dispatch', 'cpu_dispatch'),
|
||||
('disable_optimization', 'disable_optimization'),
|
||||
('simd_test', 'simd_test')
|
||||
)
|
||||
CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
|
||||
|
||||
def run(self):
|
||||
if not self.extensions:
|
||||
return
|
||||
|
||||
# Make sure that extension sources are complete.
|
||||
self.run_command('build_src')
|
||||
|
||||
if self.distribution.has_c_libraries():
|
||||
if self.inplace:
|
||||
if self.distribution.have_run.get('build_clib'):
|
||||
log.warn('build_clib already run, it is too late to '
|
||||
'ensure in-place build of build_clib')
|
||||
build_clib = self.distribution.get_command_obj(
|
||||
'build_clib')
|
||||
else:
|
||||
build_clib = self.distribution.get_command_obj(
|
||||
'build_clib')
|
||||
build_clib.inplace = 1
|
||||
build_clib.ensure_finalized()
|
||||
build_clib.run()
|
||||
self.distribution.have_run['build_clib'] = 1
|
||||
|
||||
else:
|
||||
self.run_command('build_clib')
|
||||
build_clib = self.get_finalized_command('build_clib')
|
||||
self.library_dirs.append(build_clib.build_clib)
|
||||
else:
|
||||
build_clib = None
|
||||
|
||||
# Not including C libraries to the list of
|
||||
# extension libraries automatically to prevent
|
||||
# bogus linking commands. Extensions must
|
||||
# explicitly specify the C libraries that they use.
|
||||
|
||||
from distutils.ccompiler import new_compiler
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
|
||||
compiler_type = self.compiler
|
||||
# Initialize C compiler:
|
||||
self.compiler = new_compiler(compiler=compiler_type,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force)
|
||||
self.compiler.customize(self.distribution)
|
||||
self.compiler.customize_cmd(self)
|
||||
|
||||
if self.warn_error:
|
||||
self.compiler.compiler.append('-Werror')
|
||||
self.compiler.compiler_so.append('-Werror')
|
||||
|
||||
self.compiler.show_customization()
|
||||
|
||||
if not self.disable_optimization:
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
|
||||
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
|
||||
opt_cache_path = os.path.abspath(
|
||||
os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
|
||||
)
|
||||
if hasattr(self, "compiler_opt"):
|
||||
# By default `CCompilerOpt` update the cache at the exit of
|
||||
# the process, which may lead to duplicate building
|
||||
# (see build_extension()/force_rebuild) if run() called
|
||||
# multiple times within the same os process/thread without
|
||||
# giving the chance the previous instances of `CCompilerOpt`
|
||||
# to update the cache.
|
||||
self.compiler_opt.cache_flush()
|
||||
|
||||
self.compiler_opt = new_ccompiler_opt(
|
||||
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
|
||||
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
|
||||
cache_path=opt_cache_path
|
||||
)
|
||||
def report(copt):
|
||||
log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
|
||||
log.info(copt.report(full=True))
|
||||
|
||||
import atexit
|
||||
atexit.register(report, self.compiler_opt)
|
||||
|
||||
# Setup directory for storing generated extra DLL files on Windows
|
||||
self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
|
||||
if not os.path.isdir(self.extra_dll_dir):
|
||||
os.makedirs(self.extra_dll_dir)
|
||||
|
||||
# Create mapping of libraries built by build_clib:
|
||||
clibs = {}
|
||||
if build_clib is not None:
|
||||
for libname, build_info in build_clib.libraries or []:
|
||||
if libname in clibs and clibs[libname] != build_info:
|
||||
log.warn('library %r defined more than once,'
|
||||
' overwriting build_info\n%s... \nwith\n%s...'
|
||||
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
|
||||
clibs[libname] = build_info
|
||||
# .. and distribution libraries:
|
||||
for libname, build_info in self.distribution.libraries or []:
|
||||
if libname in clibs:
|
||||
# build_clib libraries have a precedence before distribution ones
|
||||
continue
|
||||
clibs[libname] = build_info
|
||||
|
||||
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
|
||||
# Update extension libraries, library_dirs, and macros.
|
||||
all_languages = set()
|
||||
for ext in self.extensions:
|
||||
ext_languages = set()
|
||||
c_libs = []
|
||||
c_lib_dirs = []
|
||||
macros = []
|
||||
for libname in ext.libraries:
|
||||
if libname in clibs:
|
||||
binfo = clibs[libname]
|
||||
c_libs += binfo.get('libraries', [])
|
||||
c_lib_dirs += binfo.get('library_dirs', [])
|
||||
for m in binfo.get('macros', []):
|
||||
if m not in macros:
|
||||
macros.append(m)
|
||||
|
||||
for l in clibs.get(libname, {}).get('source_languages', []):
|
||||
ext_languages.add(l)
|
||||
if c_libs:
|
||||
new_c_libs = ext.libraries + c_libs
|
||||
log.info('updating extension %r libraries from %r to %r'
|
||||
% (ext.name, ext.libraries, new_c_libs))
|
||||
ext.libraries = new_c_libs
|
||||
ext.library_dirs = ext.library_dirs + c_lib_dirs
|
||||
if macros:
|
||||
log.info('extending extension %r defined_macros with %r'
|
||||
% (ext.name, macros))
|
||||
ext.define_macros = ext.define_macros + macros
|
||||
|
||||
# determine extension languages
|
||||
if has_f_sources(ext.sources):
|
||||
ext_languages.add('f77')
|
||||
if has_cxx_sources(ext.sources):
|
||||
ext_languages.add('c++')
|
||||
l = ext.language or self.compiler.detect_language(ext.sources)
|
||||
if l:
|
||||
ext_languages.add(l)
|
||||
|
||||
# reset language attribute for choosing proper linker
|
||||
#
|
||||
# When we build extensions with multiple languages, we have to
|
||||
# choose a linker. The rules here are:
|
||||
# 1. if there is Fortran code, always prefer the Fortran linker,
|
||||
# 2. otherwise prefer C++ over C,
|
||||
# 3. Users can force a particular linker by using
|
||||
# `language='c'` # or 'c++', 'f90', 'f77'
|
||||
# in their config.add_extension() calls.
|
||||
if 'c++' in ext_languages:
|
||||
ext_language = 'c++'
|
||||
else:
|
||||
ext_language = 'c' # default
|
||||
|
||||
has_fortran = False
|
||||
if 'f90' in ext_languages:
|
||||
ext_language = 'f90'
|
||||
has_fortran = True
|
||||
elif 'f77' in ext_languages:
|
||||
ext_language = 'f77'
|
||||
has_fortran = True
|
||||
|
||||
if not ext.language or has_fortran:
|
||||
if l and l != ext_language and ext.language:
|
||||
log.warn('resetting extension %r language from %r to %r.' %
|
||||
(ext.name, l, ext_language))
|
||||
|
||||
ext.language = ext_language
|
||||
|
||||
# global language
|
||||
all_languages.update(ext_languages)
|
||||
|
||||
need_f90_compiler = 'f90' in all_languages
|
||||
need_f77_compiler = 'f77' in all_languages
|
||||
need_cxx_compiler = 'c++' in all_languages
|
||||
|
||||
# Initialize C++ compiler:
|
||||
if need_cxx_compiler:
|
||||
self._cxx_compiler = new_compiler(compiler=compiler_type,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force)
|
||||
compiler = self._cxx_compiler
|
||||
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
|
||||
compiler.customize_cmd(self)
|
||||
compiler.show_customization()
|
||||
self._cxx_compiler = compiler.cxx_compiler()
|
||||
else:
|
||||
self._cxx_compiler = None
|
||||
|
||||
# Initialize Fortran 77 compiler:
|
||||
if need_f77_compiler:
|
||||
ctype = self.fcompiler
|
||||
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90=False,
|
||||
c_compiler=self.compiler)
|
||||
fcompiler = self._f77_compiler
|
||||
if fcompiler:
|
||||
ctype = fcompiler.compiler_type
|
||||
fcompiler.customize(self.distribution)
|
||||
if fcompiler and fcompiler.get_version():
|
||||
fcompiler.customize_cmd(self)
|
||||
fcompiler.show_customization()
|
||||
else:
|
||||
self.warn('f77_compiler=%s is not available.' %
|
||||
(ctype))
|
||||
self._f77_compiler = None
|
||||
else:
|
||||
self._f77_compiler = None
|
||||
|
||||
# Initialize Fortran 90 compiler:
|
||||
if need_f90_compiler:
|
||||
ctype = self.fcompiler
|
||||
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90=True,
|
||||
c_compiler=self.compiler)
|
||||
fcompiler = self._f90_compiler
|
||||
if fcompiler:
|
||||
ctype = fcompiler.compiler_type
|
||||
fcompiler.customize(self.distribution)
|
||||
if fcompiler and fcompiler.get_version():
|
||||
fcompiler.customize_cmd(self)
|
||||
fcompiler.show_customization()
|
||||
else:
|
||||
self.warn('f90_compiler=%s is not available.' %
|
||||
(ctype))
|
||||
self._f90_compiler = None
|
||||
else:
|
||||
self._f90_compiler = None
|
||||
|
||||
# Build extensions
|
||||
self.build_extensions()
|
||||
|
||||
# Copy over any extra DLL files
|
||||
# FIXME: In the case where there are more than two packages,
|
||||
# we blindly assume that both packages need all of the libraries,
|
||||
# resulting in a larger wheel than is required. This should be fixed,
|
||||
# but it's so rare that I won't bother to handle it.
|
||||
pkg_roots = {
|
||||
self.get_ext_fullname(ext.name).split('.')[0]
|
||||
for ext in self.extensions
|
||||
}
|
||||
for pkg_root in pkg_roots:
|
||||
shared_lib_dir = os.path.join(pkg_root, '.libs')
|
||||
if not self.inplace:
|
||||
shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
|
||||
for fn in os.listdir(self.extra_dll_dir):
|
||||
if not os.path.isdir(shared_lib_dir):
|
||||
os.makedirs(shared_lib_dir)
|
||||
if not fn.lower().endswith('.dll'):
|
||||
continue
|
||||
runtime_lib = os.path.join(self.extra_dll_dir, fn)
|
||||
copy_file(runtime_lib, shared_lib_dir)
|
||||
|
||||
def swig_sources(self, sources, extensions=None):
|
||||
# Do nothing. Swig sources have been handled in build_src command.
|
||||
return sources
|
||||
|
||||
def build_extension(self, ext):
|
||||
sources = ext.sources
|
||||
if sources is None or not is_sequence(sources):
|
||||
raise DistutilsSetupError(
|
||||
("in 'ext_modules' option (extension '%s'), " +
|
||||
"'sources' must be present and must be " +
|
||||
"a list of source filenames") % ext.name)
|
||||
sources = list(sources)
|
||||
|
||||
if not sources:
|
||||
return
|
||||
|
||||
fullname = self.get_ext_fullname(ext.name)
|
||||
if self.inplace:
|
||||
modpath = fullname.split('.')
|
||||
package = '.'.join(modpath[0:-1])
|
||||
base = modpath[-1]
|
||||
build_py = self.get_finalized_command('build_py')
|
||||
package_dir = build_py.get_package_dir(package)
|
||||
ext_filename = os.path.join(package_dir,
|
||||
self.get_ext_filename(base))
|
||||
else:
|
||||
ext_filename = os.path.join(self.build_lib,
|
||||
self.get_ext_filename(fullname))
|
||||
depends = sources + ext.depends
|
||||
|
||||
force_rebuild = self.force
|
||||
if not self.disable_optimization and not self.compiler_opt.is_cached():
|
||||
log.debug("Detected changes on compiler optimizations")
|
||||
force_rebuild = True
|
||||
if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
|
||||
log.debug("skipping '%s' extension (up-to-date)", ext.name)
|
||||
return
|
||||
else:
|
||||
log.info("building '%s' extension", ext.name)
|
||||
|
||||
extra_args = ext.extra_compile_args or []
|
||||
extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
|
||||
extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
|
||||
|
||||
macros = ext.define_macros[:]
|
||||
for undef in ext.undef_macros:
|
||||
macros.append((undef,))
|
||||
|
||||
c_sources, cxx_sources, f_sources, fmodule_sources = \
|
||||
filter_sources(ext.sources)
|
||||
|
||||
if self.compiler.compiler_type == 'msvc':
|
||||
if cxx_sources:
|
||||
# Needed to compile kiva.agg._agg extension.
|
||||
extra_args.append('/Zm1000')
|
||||
# this hack works around the msvc compiler attributes
|
||||
# problem, msvc uses its own convention :(
|
||||
c_sources += cxx_sources
|
||||
cxx_sources = []
|
||||
|
||||
# Set Fortran/C++ compilers for compilation and linking.
|
||||
if ext.language == 'f90':
|
||||
fcompiler = self._f90_compiler
|
||||
elif ext.language == 'f77':
|
||||
fcompiler = self._f77_compiler
|
||||
else: # in case ext.language is c++, for instance
|
||||
fcompiler = self._f90_compiler or self._f77_compiler
|
||||
if fcompiler is not None:
|
||||
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
|
||||
ext, 'extra_f77_compile_args') else []
|
||||
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
|
||||
ext, 'extra_f90_compile_args') else []
|
||||
cxx_compiler = self._cxx_compiler
|
||||
|
||||
# check for the availability of required compilers
|
||||
if cxx_sources and cxx_compiler is None:
|
||||
raise DistutilsError("extension %r has C++ sources"
|
||||
"but no C++ compiler found" % (ext.name))
|
||||
if (f_sources or fmodule_sources) and fcompiler is None:
|
||||
raise DistutilsError("extension %r has Fortran sources "
|
||||
"but no Fortran compiler found" % (ext.name))
|
||||
if ext.language in ['f77', 'f90'] and fcompiler is None:
|
||||
self.warn("extension %r has Fortran libraries "
|
||||
"but no Fortran linker found, using default linker" % (ext.name))
|
||||
if ext.language == 'c++' and cxx_compiler is None:
|
||||
self.warn("extension %r has C++ libraries "
|
||||
"but no C++ linker found, using default linker" % (ext.name))
|
||||
|
||||
kws = {'depends': ext.depends}
|
||||
output_dir = self.build_temp
|
||||
|
||||
include_dirs = ext.include_dirs + get_numpy_include_dirs()
|
||||
|
||||
# filtering C dispatch-table sources when optimization is not disabled,
|
||||
# otherwise treated as normal sources.
|
||||
copt_c_sources = []
|
||||
copt_cxx_sources = []
|
||||
copt_baseline_flags = []
|
||||
copt_macros = []
|
||||
if not self.disable_optimization:
|
||||
bsrc_dir = self.get_finalized_command("build_src").build_src
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include")
|
||||
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
|
||||
include_dirs.append(dispatch_hpath)
|
||||
|
||||
copt_build_src = None if self.inplace else bsrc_dir
|
||||
for _srcs, _dst, _ext in (
|
||||
((c_sources,), copt_c_sources, ('.dispatch.c',)),
|
||||
((c_sources, cxx_sources), copt_cxx_sources,
|
||||
('.dispatch.cpp', '.dispatch.cxx'))
|
||||
):
|
||||
for _src in _srcs:
|
||||
_dst += [
|
||||
_src.pop(_src.index(s))
|
||||
for s in _src[:] if s.endswith(_ext)
|
||||
]
|
||||
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
|
||||
else:
|
||||
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
|
||||
|
||||
c_objects = []
|
||||
if copt_cxx_sources:
|
||||
log.info("compiling C++ dispatch-able sources")
|
||||
c_objects += self.compiler_opt.try_dispatch(
|
||||
copt_cxx_sources,
|
||||
output_dir=output_dir,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_args + extra_cxxflags,
|
||||
ccompiler=cxx_compiler,
|
||||
**kws
|
||||
)
|
||||
if copt_c_sources:
|
||||
log.info("compiling C dispatch-able sources")
|
||||
c_objects += self.compiler_opt.try_dispatch(
|
||||
copt_c_sources,
|
||||
output_dir=output_dir,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_args + extra_cflags,
|
||||
**kws)
|
||||
if c_sources:
|
||||
log.info("compiling C sources")
|
||||
c_objects += self.compiler.compile(
|
||||
c_sources,
|
||||
output_dir=output_dir,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=(extra_args + copt_baseline_flags +
|
||||
extra_cflags),
|
||||
**kws)
|
||||
if cxx_sources:
|
||||
log.info("compiling C++ sources")
|
||||
c_objects += cxx_compiler.compile(
|
||||
cxx_sources,
|
||||
output_dir=output_dir,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=(extra_args + copt_baseline_flags +
|
||||
extra_cxxflags),
|
||||
**kws)
|
||||
|
||||
extra_postargs = []
|
||||
f_objects = []
|
||||
if fmodule_sources:
|
||||
log.info("compiling Fortran 90 module sources")
|
||||
module_dirs = ext.module_dirs[:]
|
||||
module_build_dir = os.path.join(
|
||||
self.build_temp, os.path.dirname(
|
||||
self.get_ext_filename(fullname)))
|
||||
|
||||
self.mkpath(module_build_dir)
|
||||
if fcompiler.module_dir_switch is None:
|
||||
existing_modules = glob('*.mod')
|
||||
extra_postargs += fcompiler.module_options(
|
||||
module_dirs, module_build_dir)
|
||||
f_objects += fcompiler.compile(fmodule_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs,
|
||||
depends=ext.depends)
|
||||
|
||||
if fcompiler.module_dir_switch is None:
|
||||
for f in glob('*.mod'):
|
||||
if f in existing_modules:
|
||||
continue
|
||||
t = os.path.join(module_build_dir, f)
|
||||
if os.path.abspath(f) == os.path.abspath(t):
|
||||
continue
|
||||
if os.path.isfile(t):
|
||||
os.remove(t)
|
||||
try:
|
||||
self.move_file(f, module_build_dir)
|
||||
except DistutilsFileError:
|
||||
log.warn('failed to move %r to %r' %
|
||||
(f, module_build_dir))
|
||||
if f_sources:
|
||||
log.info("compiling Fortran sources")
|
||||
f_objects += fcompiler.compile(f_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs,
|
||||
depends=ext.depends)
|
||||
|
||||
if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
|
||||
unlinkable_fobjects = f_objects
|
||||
objects = c_objects
|
||||
else:
|
||||
unlinkable_fobjects = []
|
||||
objects = c_objects + f_objects
|
||||
|
||||
if ext.extra_objects:
|
||||
objects.extend(ext.extra_objects)
|
||||
extra_args = ext.extra_link_args or []
|
||||
libraries = self.get_libraries(ext)[:]
|
||||
library_dirs = ext.library_dirs[:]
|
||||
|
||||
linker = self.compiler.link_shared_object
|
||||
# Always use system linker when using MSVC compiler.
|
||||
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
|
||||
# expand libraries with fcompiler libraries as we are
|
||||
# not using fcompiler linker
|
||||
self._libs_with_msvc_and_fortran(
|
||||
fcompiler, libraries, library_dirs)
|
||||
|
||||
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
|
||||
linker = fcompiler.link_shared_object
|
||||
if ext.language == 'c++' and cxx_compiler is not None:
|
||||
linker = cxx_compiler.link_shared_object
|
||||
|
||||
if fcompiler is not None:
|
||||
objects, libraries = self._process_unlinkable_fobjects(
|
||||
objects, libraries,
|
||||
fcompiler, library_dirs,
|
||||
unlinkable_fobjects)
|
||||
|
||||
linker(objects, ext_filename,
|
||||
libraries=libraries,
|
||||
library_dirs=library_dirs,
|
||||
runtime_library_dirs=ext.runtime_library_dirs,
|
||||
extra_postargs=extra_args,
|
||||
export_symbols=self.get_export_symbols(ext),
|
||||
debug=self.debug,
|
||||
build_temp=self.build_temp,
|
||||
target_lang=ext.language)
|
||||
|
||||
def _add_dummy_mingwex_sym(self, c_sources):
|
||||
build_src = self.get_finalized_command("build_src").build_src
|
||||
build_clib = self.get_finalized_command("build_clib").build_clib
|
||||
objects = self.compiler.compile([os.path.join(build_src,
|
||||
"gfortran_vs2003_hack.c")],
|
||||
output_dir=self.build_temp)
|
||||
self.compiler.create_static_lib(
|
||||
objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
|
||||
|
||||
def _process_unlinkable_fobjects(self, objects, libraries,
|
||||
fcompiler, library_dirs,
|
||||
unlinkable_fobjects):
|
||||
libraries = list(libraries)
|
||||
objects = list(objects)
|
||||
unlinkable_fobjects = list(unlinkable_fobjects)
|
||||
|
||||
# Expand possible fake static libraries to objects;
|
||||
# make sure to iterate over a copy of the list as
|
||||
# "fake" libraries will be removed as they are
|
||||
# encountered
|
||||
for lib in libraries[:]:
|
||||
for libdir in library_dirs:
|
||||
fake_lib = os.path.join(libdir, lib + '.fobjects')
|
||||
if os.path.isfile(fake_lib):
|
||||
# Replace fake static library
|
||||
libraries.remove(lib)
|
||||
with open(fake_lib, 'r') as f:
|
||||
unlinkable_fobjects.extend(f.read().splitlines())
|
||||
|
||||
# Expand C objects
|
||||
c_lib = os.path.join(libdir, lib + '.cobjects')
|
||||
with open(c_lib, 'r') as f:
|
||||
objects.extend(f.read().splitlines())
|
||||
|
||||
# Wrap unlinkable objects to a linkable one
|
||||
if unlinkable_fobjects:
|
||||
fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
|
||||
wrapped = fcompiler.wrap_unlinkable_objects(
|
||||
fobjects, output_dir=self.build_temp,
|
||||
extra_dll_dir=self.extra_dll_dir)
|
||||
objects.extend(wrapped)
|
||||
|
||||
return objects, libraries
|
||||
|
||||
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
|
||||
c_library_dirs):
|
||||
if fcompiler is None:
|
||||
return
|
||||
|
||||
for libname in c_libraries:
|
||||
if libname.startswith('msvc'):
|
||||
continue
|
||||
fileexists = False
|
||||
for libdir in c_library_dirs or []:
|
||||
libfile = os.path.join(libdir, '%s.lib' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists:
|
||||
continue
|
||||
# make g77-compiled static libs available to MSVC
|
||||
fileexists = False
|
||||
for libdir in c_library_dirs:
|
||||
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
# copy libname.a file to name.lib so that MSVC linker
|
||||
# can find it
|
||||
libfile2 = os.path.join(self.build_temp, libname + '.lib')
|
||||
copy_file(libfile, libfile2)
|
||||
if self.build_temp not in c_library_dirs:
|
||||
c_library_dirs.append(self.build_temp)
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists:
|
||||
continue
|
||||
log.warn('could not find library %r in directories %s'
|
||||
% (libname, c_library_dirs))
|
||||
|
||||
# Always use system linker when using MSVC compiler.
|
||||
f_lib_dirs = []
|
||||
for dir in fcompiler.library_dirs:
|
||||
# correct path when compiling in Cygwin but with normal Win
|
||||
# Python
|
||||
if dir.startswith('/usr/lib'):
|
||||
try:
|
||||
dir = subprocess.check_output(['cygpath', '-w', dir])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
dir = filepath_from_subprocess_output(dir)
|
||||
f_lib_dirs.append(dir)
|
||||
c_library_dirs.extend(f_lib_dirs)
|
||||
|
||||
# make g77-compiled static libs available to MSVC
|
||||
for lib in fcompiler.libraries:
|
||||
if not lib.startswith('msvc'):
|
||||
c_libraries.append(lib)
|
||||
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
|
||||
if p:
|
||||
dst_name = os.path.join(self.build_temp, lib + '.lib')
|
||||
if not os.path.isfile(dst_name):
|
||||
copy_file(p[0], dst_name)
|
||||
if self.build_temp not in c_library_dirs:
|
||||
c_library_dirs.append(self.build_temp)
|
||||
|
||||
def get_source_files(self):
|
||||
self.check_extensions_list(self.extensions)
|
||||
filenames = []
|
||||
for ext in self.extensions:
|
||||
filenames.extend(get_ext_source_files(ext))
|
||||
return filenames
|
||||
|
||||
def get_outputs(self):
|
||||
self.check_extensions_list(self.extensions)
|
||||
|
||||
outputs = []
|
||||
for ext in self.extensions:
|
||||
if not ext.sources:
|
||||
continue
|
||||
fullname = self.get_ext_fullname(ext.name)
|
||||
outputs.append(os.path.join(self.build_lib,
|
||||
self.get_ext_filename(fullname)))
|
||||
return outputs
|
31
.venv/Lib/site-packages/numpy/distutils/command/build_py.py
Normal file
31
.venv/Lib/site-packages/numpy/distutils/command/build_py.py
Normal file
@ -0,0 +1,31 @@
|
||||
from distutils.command.build_py import build_py as old_build_py
|
||||
from numpy.distutils.misc_util import is_string
|
||||
|
||||
class build_py(old_build_py):
|
||||
|
||||
def run(self):
|
||||
build_src = self.get_finalized_command('build_src')
|
||||
if build_src.py_modules_dict and self.packages is None:
|
||||
self.packages = list(build_src.py_modules_dict.keys ())
|
||||
old_build_py.run(self)
|
||||
|
||||
def find_package_modules(self, package, package_dir):
|
||||
modules = old_build_py.find_package_modules(self, package, package_dir)
|
||||
|
||||
# Find build_src generated *.py files.
|
||||
build_src = self.get_finalized_command('build_src')
|
||||
modules += build_src.py_modules_dict.get(package, [])
|
||||
|
||||
return modules
|
||||
|
||||
def find_modules(self):
|
||||
old_py_modules = self.py_modules[:]
|
||||
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
|
||||
self.py_modules[:] = new_py_modules
|
||||
modules = old_build_py.find_modules(self)
|
||||
self.py_modules[:] = old_py_modules
|
||||
|
||||
return modules
|
||||
|
||||
# XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
|
||||
# and item[2] is source file.
|
@ -0,0 +1,49 @@
|
||||
""" Modified version of build_scripts that handles building scripts from functions.
|
||||
|
||||
"""
|
||||
from distutils.command.build_scripts import build_scripts as old_build_scripts
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.misc_util import is_string
|
||||
|
||||
class build_scripts(old_build_scripts):
|
||||
|
||||
def generate_scripts(self, scripts):
|
||||
new_scripts = []
|
||||
func_scripts = []
|
||||
for script in scripts:
|
||||
if is_string(script):
|
||||
new_scripts.append(script)
|
||||
else:
|
||||
func_scripts.append(script)
|
||||
if not func_scripts:
|
||||
return new_scripts
|
||||
|
||||
build_dir = self.build_dir
|
||||
self.mkpath(build_dir)
|
||||
for func in func_scripts:
|
||||
script = func(build_dir)
|
||||
if not script:
|
||||
continue
|
||||
if is_string(script):
|
||||
log.info(" adding '%s' to scripts" % (script,))
|
||||
new_scripts.append(script)
|
||||
else:
|
||||
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
|
||||
new_scripts.extend(list(script))
|
||||
return new_scripts
|
||||
|
||||
def run (self):
|
||||
if not self.scripts:
|
||||
return
|
||||
|
||||
self.scripts = self.generate_scripts(self.scripts)
|
||||
# Now make sure that the distribution object has this list of scripts.
|
||||
# setuptools' develop command requires that this be a list of filenames,
|
||||
# not functions.
|
||||
self.distribution.scripts = self.scripts
|
||||
|
||||
return old_build_scripts.run(self)
|
||||
|
||||
def get_source_files(self):
|
||||
from numpy.distutils.misc_util import get_script_files
|
||||
return get_script_files(self.scripts)
|
773
.venv/Lib/site-packages/numpy/distutils/command/build_src.py
Normal file
773
.venv/Lib/site-packages/numpy/distutils/command/build_src.py
Normal file
@ -0,0 +1,773 @@
|
||||
""" Build swig and f2py sources.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shlex
|
||||
import copy
|
||||
|
||||
from distutils.command import build_ext
|
||||
from distutils.dep_util import newer_group, newer
|
||||
from distutils.util import get_platform
|
||||
from distutils.errors import DistutilsError, DistutilsSetupError
|
||||
|
||||
|
||||
# this import can't be done here, as it uses numpy stuff only available
|
||||
# after it's installed
|
||||
#import numpy.f2py
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.misc_util import (
|
||||
fortran_ext_match, appendpath, is_string, is_sequence, get_cmd
|
||||
)
|
||||
from numpy.distutils.from_template import process_file as process_f_file
|
||||
from numpy.distutils.conv_template import process_file as process_c_file
|
||||
|
||||
def subst_vars(target, source, d):
|
||||
"""Substitute any occurrence of @foo@ by d['foo'] from source file into
|
||||
target."""
|
||||
var = re.compile('@([a-zA-Z_]+)@')
|
||||
with open(source, 'r') as fs:
|
||||
with open(target, 'w') as ft:
|
||||
for l in fs:
|
||||
m = var.search(l)
|
||||
if m:
|
||||
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
|
||||
else:
|
||||
ft.write(l)
|
||||
|
||||
class build_src(build_ext.build_ext):
|
||||
|
||||
description = "build sources from SWIG, F2PY files or a function"
|
||||
|
||||
user_options = [
|
||||
('build-src=', 'd', "directory to \"build\" sources to"),
|
||||
('f2py-opts=', None, "list of f2py command line options"),
|
||||
('swig=', None, "path to the SWIG executable"),
|
||||
('swig-opts=', None, "list of SWIG command line options"),
|
||||
('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
|
||||
('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
|
||||
('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
|
||||
('force', 'f', "forcibly build everything (ignore file timestamps)"),
|
||||
('inplace', 'i',
|
||||
"ignore build-lib and put compiled extensions into the source " +
|
||||
"directory alongside your pure Python modules"),
|
||||
('verbose-cfg', None,
|
||||
"change logging level from WARN to INFO which will show all " +
|
||||
"compiler output")
|
||||
]
|
||||
|
||||
boolean_options = ['force', 'inplace', 'verbose-cfg']
|
||||
|
||||
help_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
self.extensions = None
|
||||
self.package = None
|
||||
self.py_modules = None
|
||||
self.py_modules_dict = None
|
||||
self.build_src = None
|
||||
self.build_lib = None
|
||||
self.build_base = None
|
||||
self.force = None
|
||||
self.inplace = None
|
||||
self.package_dir = None
|
||||
self.f2pyflags = None # obsolete
|
||||
self.f2py_opts = None
|
||||
self.swigflags = None # obsolete
|
||||
self.swig_opts = None
|
||||
self.swig_cpp = None
|
||||
self.swig = None
|
||||
self.verbose_cfg = None
|
||||
|
||||
def finalize_options(self):
|
||||
self.set_undefined_options('build',
|
||||
('build_base', 'build_base'),
|
||||
('build_lib', 'build_lib'),
|
||||
('force', 'force'))
|
||||
if self.package is None:
|
||||
self.package = self.distribution.ext_package
|
||||
self.extensions = self.distribution.ext_modules
|
||||
self.libraries = self.distribution.libraries or []
|
||||
self.py_modules = self.distribution.py_modules or []
|
||||
self.data_files = self.distribution.data_files or []
|
||||
|
||||
if self.build_src is None:
|
||||
plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
|
||||
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
|
||||
|
||||
# py_modules_dict is used in build_py.find_package_modules
|
||||
self.py_modules_dict = {}
|
||||
|
||||
if self.f2pyflags:
|
||||
if self.f2py_opts:
|
||||
log.warn('ignoring --f2pyflags as --f2py-opts already used')
|
||||
else:
|
||||
self.f2py_opts = self.f2pyflags
|
||||
self.f2pyflags = None
|
||||
if self.f2py_opts is None:
|
||||
self.f2py_opts = []
|
||||
else:
|
||||
self.f2py_opts = shlex.split(self.f2py_opts)
|
||||
|
||||
if self.swigflags:
|
||||
if self.swig_opts:
|
||||
log.warn('ignoring --swigflags as --swig-opts already used')
|
||||
else:
|
||||
self.swig_opts = self.swigflags
|
||||
self.swigflags = None
|
||||
|
||||
if self.swig_opts is None:
|
||||
self.swig_opts = []
|
||||
else:
|
||||
self.swig_opts = shlex.split(self.swig_opts)
|
||||
|
||||
# use options from build_ext command
|
||||
build_ext = self.get_finalized_command('build_ext')
|
||||
if self.inplace is None:
|
||||
self.inplace = build_ext.inplace
|
||||
if self.swig_cpp is None:
|
||||
self.swig_cpp = build_ext.swig_cpp
|
||||
for c in ['swig', 'swig_opt']:
|
||||
o = '--'+c.replace('_', '-')
|
||||
v = getattr(build_ext, c, None)
|
||||
if v:
|
||||
if getattr(self, c):
|
||||
log.warn('both build_src and build_ext define %s option' % (o))
|
||||
else:
|
||||
log.info('using "%s=%s" option from build_ext command' % (o, v))
|
||||
setattr(self, c, v)
|
||||
|
||||
def run(self):
|
||||
log.info("build_src")
|
||||
if not (self.extensions or self.libraries):
|
||||
return
|
||||
self.build_sources()
|
||||
|
||||
def build_sources(self):
|
||||
|
||||
if self.inplace:
|
||||
self.get_package_dir = \
|
||||
self.get_finalized_command('build_py').get_package_dir
|
||||
|
||||
self.build_py_modules_sources()
|
||||
|
||||
for libname_info in self.libraries:
|
||||
self.build_library_sources(*libname_info)
|
||||
|
||||
if self.extensions:
|
||||
self.check_extensions_list(self.extensions)
|
||||
|
||||
for ext in self.extensions:
|
||||
self.build_extension_sources(ext)
|
||||
|
||||
self.build_data_files_sources()
|
||||
self.build_npy_pkg_config()
|
||||
|
||||
def build_data_files_sources(self):
|
||||
if not self.data_files:
|
||||
return
|
||||
log.info('building data_files sources')
|
||||
from numpy.distutils.misc_util import get_data_files
|
||||
new_data_files = []
|
||||
for data in self.data_files:
|
||||
if isinstance(data, str):
|
||||
new_data_files.append(data)
|
||||
elif isinstance(data, tuple):
|
||||
d, files = data
|
||||
if self.inplace:
|
||||
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
|
||||
else:
|
||||
build_dir = os.path.join(self.build_src, d)
|
||||
funcs = [f for f in files if hasattr(f, '__call__')]
|
||||
files = [f for f in files if not hasattr(f, '__call__')]
|
||||
for f in funcs:
|
||||
if f.__code__.co_argcount==1:
|
||||
s = f(build_dir)
|
||||
else:
|
||||
s = f()
|
||||
if s is not None:
|
||||
if isinstance(s, list):
|
||||
files.extend(s)
|
||||
elif isinstance(s, str):
|
||||
files.append(s)
|
||||
else:
|
||||
raise TypeError(repr(s))
|
||||
filenames = get_data_files((d, files))
|
||||
new_data_files.append((d, filenames))
|
||||
else:
|
||||
raise TypeError(repr(data))
|
||||
self.data_files[:] = new_data_files
|
||||
|
||||
|
||||
def _build_npy_pkg_config(self, info, gd):
|
||||
template, install_dir, subst_dict = info
|
||||
template_dir = os.path.dirname(template)
|
||||
for k, v in gd.items():
|
||||
subst_dict[k] = v
|
||||
|
||||
if self.inplace == 1:
|
||||
generated_dir = os.path.join(template_dir, install_dir)
|
||||
else:
|
||||
generated_dir = os.path.join(self.build_src, template_dir,
|
||||
install_dir)
|
||||
generated = os.path.basename(os.path.splitext(template)[0])
|
||||
generated_path = os.path.join(generated_dir, generated)
|
||||
if not os.path.exists(generated_dir):
|
||||
os.makedirs(generated_dir)
|
||||
|
||||
subst_vars(generated_path, template, subst_dict)
|
||||
|
||||
# Where to install relatively to install prefix
|
||||
full_install_dir = os.path.join(template_dir, install_dir)
|
||||
return full_install_dir, generated_path
|
||||
|
||||
def build_npy_pkg_config(self):
|
||||
log.info('build_src: building npy-pkg config files')
|
||||
|
||||
# XXX: another ugly workaround to circumvent distutils brain damage. We
|
||||
# need the install prefix here, but finalizing the options of the
|
||||
# install command when only building sources cause error. Instead, we
|
||||
# copy the install command instance, and finalize the copy so that it
|
||||
# does not disrupt how distutils want to do things when with the
|
||||
# original install command instance.
|
||||
install_cmd = copy.copy(get_cmd('install'))
|
||||
if not install_cmd.finalized == 1:
|
||||
install_cmd.finalize_options()
|
||||
build_npkg = False
|
||||
if self.inplace == 1:
|
||||
top_prefix = '.'
|
||||
build_npkg = True
|
||||
elif hasattr(install_cmd, 'install_libbase'):
|
||||
top_prefix = install_cmd.install_libbase
|
||||
build_npkg = True
|
||||
|
||||
if build_npkg:
|
||||
for pkg, infos in self.distribution.installed_pkg_config.items():
|
||||
pkg_path = self.distribution.package_dir[pkg]
|
||||
prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
|
||||
d = {'prefix': prefix}
|
||||
for info in infos:
|
||||
install_dir, generated = self._build_npy_pkg_config(info, d)
|
||||
self.distribution.data_files.append((install_dir,
|
||||
[generated]))
|
||||
|
||||
def build_py_modules_sources(self):
|
||||
if not self.py_modules:
|
||||
return
|
||||
log.info('building py_modules sources')
|
||||
new_py_modules = []
|
||||
for source in self.py_modules:
|
||||
if is_sequence(source) and len(source)==3:
|
||||
package, module_base, source = source
|
||||
if self.inplace:
|
||||
build_dir = self.get_package_dir(package)
|
||||
else:
|
||||
build_dir = os.path.join(self.build_src,
|
||||
os.path.join(*package.split('.')))
|
||||
if hasattr(source, '__call__'):
|
||||
target = os.path.join(build_dir, module_base + '.py')
|
||||
source = source(target)
|
||||
if source is None:
|
||||
continue
|
||||
modules = [(package, module_base, source)]
|
||||
if package not in self.py_modules_dict:
|
||||
self.py_modules_dict[package] = []
|
||||
self.py_modules_dict[package] += modules
|
||||
else:
|
||||
new_py_modules.append(source)
|
||||
self.py_modules[:] = new_py_modules
|
||||
|
||||
def build_library_sources(self, lib_name, build_info):
|
||||
sources = list(build_info.get('sources', []))
|
||||
|
||||
if not sources:
|
||||
return
|
||||
|
||||
log.info('building library "%s" sources' % (lib_name))
|
||||
|
||||
sources = self.generate_sources(sources, (lib_name, build_info))
|
||||
|
||||
sources = self.template_sources(sources, (lib_name, build_info))
|
||||
|
||||
sources, h_files = self.filter_h_files(sources)
|
||||
|
||||
if h_files:
|
||||
log.info('%s - nothing done with h_files = %s',
|
||||
self.package, h_files)
|
||||
|
||||
#for f in h_files:
|
||||
# self.distribution.headers.append((lib_name,f))
|
||||
|
||||
build_info['sources'] = sources
|
||||
return
|
||||
|
||||
def build_extension_sources(self, ext):
|
||||
|
||||
sources = list(ext.sources)
|
||||
|
||||
log.info('building extension "%s" sources' % (ext.name))
|
||||
|
||||
fullname = self.get_ext_fullname(ext.name)
|
||||
|
||||
modpath = fullname.split('.')
|
||||
package = '.'.join(modpath[0:-1])
|
||||
|
||||
if self.inplace:
|
||||
self.ext_target_dir = self.get_package_dir(package)
|
||||
|
||||
sources = self.generate_sources(sources, ext)
|
||||
sources = self.template_sources(sources, ext)
|
||||
sources = self.swig_sources(sources, ext)
|
||||
sources = self.f2py_sources(sources, ext)
|
||||
sources = self.pyrex_sources(sources, ext)
|
||||
|
||||
sources, py_files = self.filter_py_files(sources)
|
||||
|
||||
if package not in self.py_modules_dict:
|
||||
self.py_modules_dict[package] = []
|
||||
modules = []
|
||||
for f in py_files:
|
||||
module = os.path.splitext(os.path.basename(f))[0]
|
||||
modules.append((package, module, f))
|
||||
self.py_modules_dict[package] += modules
|
||||
|
||||
sources, h_files = self.filter_h_files(sources)
|
||||
|
||||
if h_files:
|
||||
log.info('%s - nothing done with h_files = %s',
|
||||
package, h_files)
|
||||
#for f in h_files:
|
||||
# self.distribution.headers.append((package,f))
|
||||
|
||||
ext.sources = sources
|
||||
|
||||
def generate_sources(self, sources, extension):
|
||||
new_sources = []
|
||||
func_sources = []
|
||||
for source in sources:
|
||||
if is_string(source):
|
||||
new_sources.append(source)
|
||||
else:
|
||||
func_sources.append(source)
|
||||
if not func_sources:
|
||||
return new_sources
|
||||
if self.inplace and not is_sequence(extension):
|
||||
build_dir = self.ext_target_dir
|
||||
else:
|
||||
if is_sequence(extension):
|
||||
name = extension[0]
|
||||
# if 'include_dirs' not in extension[1]:
|
||||
# extension[1]['include_dirs'] = []
|
||||
# incl_dirs = extension[1]['include_dirs']
|
||||
else:
|
||||
name = extension.name
|
||||
# incl_dirs = extension.include_dirs
|
||||
#if self.build_src not in incl_dirs:
|
||||
# incl_dirs.append(self.build_src)
|
||||
build_dir = os.path.join(*([self.build_src]
|
||||
+name.split('.')[:-1]))
|
||||
self.mkpath(build_dir)
|
||||
|
||||
if self.verbose_cfg:
|
||||
new_level = log.INFO
|
||||
else:
|
||||
new_level = log.WARN
|
||||
old_level = log.set_threshold(new_level)
|
||||
|
||||
for func in func_sources:
|
||||
source = func(extension, build_dir)
|
||||
if not source:
|
||||
continue
|
||||
if is_sequence(source):
|
||||
[log.info(" adding '%s' to sources." % (s,)) for s in source]
|
||||
new_sources.extend(source)
|
||||
else:
|
||||
log.info(" adding '%s' to sources." % (source,))
|
||||
new_sources.append(source)
|
||||
log.set_threshold(old_level)
|
||||
return new_sources
|
||||
|
||||
def filter_py_files(self, sources):
|
||||
return self.filter_files(sources, ['.py'])
|
||||
|
||||
def filter_h_files(self, sources):
|
||||
return self.filter_files(sources, ['.h', '.hpp', '.inc'])
|
||||
|
||||
def filter_files(self, sources, exts = []):
|
||||
new_sources = []
|
||||
files = []
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext in exts:
|
||||
files.append(source)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
return new_sources, files
|
||||
|
||||
def template_sources(self, sources, extension):
|
||||
new_sources = []
|
||||
if is_sequence(extension):
|
||||
depends = extension[1].get('depends')
|
||||
include_dirs = extension[1].get('include_dirs')
|
||||
else:
|
||||
depends = extension.depends
|
||||
include_dirs = extension.include_dirs
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.src': # Template file
|
||||
if self.inplace:
|
||||
target_dir = os.path.dirname(base)
|
||||
else:
|
||||
target_dir = appendpath(self.build_src, os.path.dirname(base))
|
||||
self.mkpath(target_dir)
|
||||
target_file = os.path.join(target_dir, os.path.basename(base))
|
||||
if (self.force or newer_group([source] + depends, target_file)):
|
||||
if _f_pyf_ext_match(base):
|
||||
log.info("from_template:> %s" % (target_file))
|
||||
outstr = process_f_file(source)
|
||||
else:
|
||||
log.info("conv_template:> %s" % (target_file))
|
||||
outstr = process_c_file(source)
|
||||
with open(target_file, 'w') as fid:
|
||||
fid.write(outstr)
|
||||
if _header_ext_match(target_file):
|
||||
d = os.path.dirname(target_file)
|
||||
if d not in include_dirs:
|
||||
log.info(" adding '%s' to include_dirs." % (d))
|
||||
include_dirs.append(d)
|
||||
new_sources.append(target_file)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
return new_sources
|
||||
|
||||
def pyrex_sources(self, sources, extension):
|
||||
"""Pyrex not supported; this remains for Cython support (see below)"""
|
||||
new_sources = []
|
||||
ext_name = extension.name.split('.')[-1]
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.pyx':
|
||||
target_file = self.generate_a_pyrex_source(base, ext_name,
|
||||
source,
|
||||
extension)
|
||||
new_sources.append(target_file)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
return new_sources
|
||||
|
||||
def generate_a_pyrex_source(self, base, ext_name, source, extension):
|
||||
"""Pyrex is not supported, but some projects monkeypatch this method.
|
||||
|
||||
That allows compiling Cython code, see gh-6955.
|
||||
This method will remain here for compatibility reasons.
|
||||
"""
|
||||
return []
|
||||
|
||||
def f2py_sources(self, sources, extension):
|
||||
new_sources = []
|
||||
f2py_sources = []
|
||||
f_sources = []
|
||||
f2py_targets = {}
|
||||
target_dirs = []
|
||||
ext_name = extension.name.split('.')[-1]
|
||||
skip_f2py = 0
|
||||
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.pyf': # F2PY interface file
|
||||
if self.inplace:
|
||||
target_dir = os.path.dirname(base)
|
||||
else:
|
||||
target_dir = appendpath(self.build_src, os.path.dirname(base))
|
||||
if os.path.isfile(source):
|
||||
name = get_f2py_modulename(source)
|
||||
if name != ext_name:
|
||||
raise DistutilsSetupError('mismatch of extension names: %s '
|
||||
'provides %r but expected %r' % (
|
||||
source, name, ext_name))
|
||||
target_file = os.path.join(target_dir, name+'module.c')
|
||||
else:
|
||||
log.debug(' source %s does not exist: skipping f2py\'ing.' \
|
||||
% (source))
|
||||
name = ext_name
|
||||
skip_f2py = 1
|
||||
target_file = os.path.join(target_dir, name+'module.c')
|
||||
if not os.path.isfile(target_file):
|
||||
log.warn(' target %s does not exist:\n '\
|
||||
'Assuming %smodule.c was generated with '\
|
||||
'"build_src --inplace" command.' \
|
||||
% (target_file, name))
|
||||
target_dir = os.path.dirname(base)
|
||||
target_file = os.path.join(target_dir, name+'module.c')
|
||||
if not os.path.isfile(target_file):
|
||||
raise DistutilsSetupError("%r missing" % (target_file,))
|
||||
log.info(' Yes! Using %r as up-to-date target.' \
|
||||
% (target_file))
|
||||
target_dirs.append(target_dir)
|
||||
f2py_sources.append(source)
|
||||
f2py_targets[source] = target_file
|
||||
new_sources.append(target_file)
|
||||
elif fortran_ext_match(ext):
|
||||
f_sources.append(source)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
|
||||
if not (f2py_sources or f_sources):
|
||||
return new_sources
|
||||
|
||||
for d in target_dirs:
|
||||
self.mkpath(d)
|
||||
|
||||
f2py_options = extension.f2py_options + self.f2py_opts
|
||||
|
||||
if self.distribution.libraries:
|
||||
for name, build_info in self.distribution.libraries:
|
||||
if name in extension.libraries:
|
||||
f2py_options.extend(build_info.get('f2py_options', []))
|
||||
|
||||
log.info("f2py options: %s" % (f2py_options))
|
||||
|
||||
if f2py_sources:
|
||||
if len(f2py_sources) != 1:
|
||||
raise DistutilsSetupError(
|
||||
'only one .pyf file is allowed per extension module but got'\
|
||||
' more: %r' % (f2py_sources,))
|
||||
source = f2py_sources[0]
|
||||
target_file = f2py_targets[source]
|
||||
target_dir = os.path.dirname(target_file) or '.'
|
||||
depends = [source] + extension.depends
|
||||
if (self.force or newer_group(depends, target_file, 'newer')) \
|
||||
and not skip_f2py:
|
||||
log.info("f2py: %s" % (source))
|
||||
import numpy.f2py
|
||||
numpy.f2py.run_main(f2py_options
|
||||
+ ['--build-dir', target_dir, source])
|
||||
else:
|
||||
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
|
||||
else:
|
||||
#XXX TODO: --inplace support for sdist command
|
||||
if is_sequence(extension):
|
||||
name = extension[0]
|
||||
else: name = extension.name
|
||||
target_dir = os.path.join(*([self.build_src]
|
||||
+name.split('.')[:-1]))
|
||||
target_file = os.path.join(target_dir, ext_name + 'module.c')
|
||||
new_sources.append(target_file)
|
||||
depends = f_sources + extension.depends
|
||||
if (self.force or newer_group(depends, target_file, 'newer')) \
|
||||
and not skip_f2py:
|
||||
log.info("f2py:> %s" % (target_file))
|
||||
self.mkpath(target_dir)
|
||||
import numpy.f2py
|
||||
numpy.f2py.run_main(f2py_options + ['--lower',
|
||||
'--build-dir', target_dir]+\
|
||||
['-m', ext_name]+f_sources)
|
||||
else:
|
||||
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
|
||||
% (target_file))
|
||||
|
||||
if not os.path.isfile(target_file):
|
||||
raise DistutilsError("f2py target file %r not generated" % (target_file,))
|
||||
|
||||
build_dir = os.path.join(self.build_src, target_dir)
|
||||
target_c = os.path.join(build_dir, 'fortranobject.c')
|
||||
target_h = os.path.join(build_dir, 'fortranobject.h')
|
||||
log.info(" adding '%s' to sources." % (target_c))
|
||||
new_sources.append(target_c)
|
||||
if build_dir not in extension.include_dirs:
|
||||
log.info(" adding '%s' to include_dirs." % (build_dir))
|
||||
extension.include_dirs.append(build_dir)
|
||||
|
||||
if not skip_f2py:
|
||||
import numpy.f2py
|
||||
d = os.path.dirname(numpy.f2py.__file__)
|
||||
source_c = os.path.join(d, 'src', 'fortranobject.c')
|
||||
source_h = os.path.join(d, 'src', 'fortranobject.h')
|
||||
if newer(source_c, target_c) or newer(source_h, target_h):
|
||||
self.mkpath(os.path.dirname(target_c))
|
||||
self.copy_file(source_c, target_c)
|
||||
self.copy_file(source_h, target_h)
|
||||
else:
|
||||
if not os.path.isfile(target_c):
|
||||
raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
|
||||
if not os.path.isfile(target_h):
|
||||
raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
|
||||
|
||||
for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
|
||||
filename = os.path.join(target_dir, ext_name + name_ext)
|
||||
if os.path.isfile(filename):
|
||||
log.info(" adding '%s' to sources." % (filename))
|
||||
f_sources.append(filename)
|
||||
|
||||
return new_sources + f_sources
|
||||
|
||||
def swig_sources(self, sources, extension):
|
||||
# Assuming SWIG 1.3.14 or later. See compatibility note in
|
||||
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
|
||||
|
||||
new_sources = []
|
||||
swig_sources = []
|
||||
swig_targets = {}
|
||||
target_dirs = []
|
||||
py_files = [] # swig generated .py files
|
||||
target_ext = '.c'
|
||||
if '-c++' in extension.swig_opts:
|
||||
typ = 'c++'
|
||||
is_cpp = True
|
||||
extension.swig_opts.remove('-c++')
|
||||
elif self.swig_cpp:
|
||||
typ = 'c++'
|
||||
is_cpp = True
|
||||
else:
|
||||
typ = None
|
||||
is_cpp = False
|
||||
skip_swig = 0
|
||||
ext_name = extension.name.split('.')[-1]
|
||||
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.i': # SWIG interface file
|
||||
# the code below assumes that the sources list
|
||||
# contains not more than one .i SWIG interface file
|
||||
if self.inplace:
|
||||
target_dir = os.path.dirname(base)
|
||||
py_target_dir = self.ext_target_dir
|
||||
else:
|
||||
target_dir = appendpath(self.build_src, os.path.dirname(base))
|
||||
py_target_dir = target_dir
|
||||
if os.path.isfile(source):
|
||||
name = get_swig_modulename(source)
|
||||
if name != ext_name[1:]:
|
||||
raise DistutilsSetupError(
|
||||
'mismatch of extension names: %s provides %r'
|
||||
' but expected %r' % (source, name, ext_name[1:]))
|
||||
if typ is None:
|
||||
typ = get_swig_target(source)
|
||||
is_cpp = typ=='c++'
|
||||
else:
|
||||
typ2 = get_swig_target(source)
|
||||
if typ2 is None:
|
||||
log.warn('source %r does not define swig target, assuming %s swig target' \
|
||||
% (source, typ))
|
||||
elif typ!=typ2:
|
||||
log.warn('expected %r but source %r defines %r swig target' \
|
||||
% (typ, source, typ2))
|
||||
if typ2=='c++':
|
||||
log.warn('resetting swig target to c++ (some targets may have .c extension)')
|
||||
is_cpp = True
|
||||
else:
|
||||
log.warn('assuming that %r has c++ swig target' % (source))
|
||||
if is_cpp:
|
||||
target_ext = '.cpp'
|
||||
target_file = os.path.join(target_dir, '%s_wrap%s' \
|
||||
% (name, target_ext))
|
||||
else:
|
||||
log.warn(' source %s does not exist: skipping swig\'ing.' \
|
||||
% (source))
|
||||
name = ext_name[1:]
|
||||
skip_swig = 1
|
||||
target_file = _find_swig_target(target_dir, name)
|
||||
if not os.path.isfile(target_file):
|
||||
log.warn(' target %s does not exist:\n '\
|
||||
'Assuming %s_wrap.{c,cpp} was generated with '\
|
||||
'"build_src --inplace" command.' \
|
||||
% (target_file, name))
|
||||
target_dir = os.path.dirname(base)
|
||||
target_file = _find_swig_target(target_dir, name)
|
||||
if not os.path.isfile(target_file):
|
||||
raise DistutilsSetupError("%r missing" % (target_file,))
|
||||
log.warn(' Yes! Using %r as up-to-date target.' \
|
||||
% (target_file))
|
||||
target_dirs.append(target_dir)
|
||||
new_sources.append(target_file)
|
||||
py_files.append(os.path.join(py_target_dir, name+'.py'))
|
||||
swig_sources.append(source)
|
||||
swig_targets[source] = new_sources[-1]
|
||||
else:
|
||||
new_sources.append(source)
|
||||
|
||||
if not swig_sources:
|
||||
return new_sources
|
||||
|
||||
if skip_swig:
|
||||
return new_sources + py_files
|
||||
|
||||
for d in target_dirs:
|
||||
self.mkpath(d)
|
||||
|
||||
swig = self.swig or self.find_swig()
|
||||
swig_cmd = [swig, "-python"] + extension.swig_opts
|
||||
if is_cpp:
|
||||
swig_cmd.append('-c++')
|
||||
for d in extension.include_dirs:
|
||||
swig_cmd.append('-I'+d)
|
||||
for source in swig_sources:
|
||||
target = swig_targets[source]
|
||||
depends = [source] + extension.depends
|
||||
if self.force or newer_group(depends, target, 'newer'):
|
||||
log.info("%s: %s" % (os.path.basename(swig) \
|
||||
+ (is_cpp and '++' or ''), source))
|
||||
self.spawn(swig_cmd + self.swig_opts \
|
||||
+ ["-o", target, '-outdir', py_target_dir, source])
|
||||
else:
|
||||
log.debug(" skipping '%s' swig interface (up-to-date)" \
|
||||
% (source))
|
||||
|
||||
return new_sources + py_files
|
||||
|
||||
_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
|
||||
_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match
|
||||
|
||||
#### SWIG related auxiliary functions ####
|
||||
_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
|
||||
re.I).match
|
||||
_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search
|
||||
_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search
|
||||
|
||||
def get_swig_target(source):
|
||||
with open(source, 'r') as f:
|
||||
result = None
|
||||
line = f.readline()
|
||||
if _has_cpp_header(line):
|
||||
result = 'c++'
|
||||
if _has_c_header(line):
|
||||
result = 'c'
|
||||
return result
|
||||
|
||||
def get_swig_modulename(source):
|
||||
with open(source, 'r') as f:
|
||||
name = None
|
||||
for line in f:
|
||||
m = _swig_module_name_match(line)
|
||||
if m:
|
||||
name = m.group('name')
|
||||
break
|
||||
return name
|
||||
|
||||
def _find_swig_target(target_dir, name):
|
||||
for ext in ['.cpp', '.c']:
|
||||
target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
|
||||
if os.path.isfile(target):
|
||||
break
|
||||
return target
|
||||
|
||||
#### F2PY related auxiliary functions ####
|
||||
|
||||
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
|
||||
re.I).match
|
||||
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
|
||||
r'__user__[\w_]*)', re.I).match
|
||||
|
||||
def get_f2py_modulename(source):
|
||||
name = None
|
||||
with open(source) as f:
|
||||
for line in f:
|
||||
m = _f2py_module_name_match(line)
|
||||
if m:
|
||||
if _f2py_user_module_name_match(line): # skip *__user__* names
|
||||
continue
|
||||
name = m.group('name')
|
||||
break
|
||||
return name
|
||||
|
||||
##########################################
|
517
.venv/Lib/site-packages/numpy/distutils/command/config.py
Normal file
517
.venv/Lib/site-packages/numpy/distutils/command/config.py
Normal file
@ -0,0 +1,517 @@
|
||||
# Added Fortran compiler support to config. Currently useful only for
|
||||
# try_compile call. try_run works but is untested for most of Fortran
|
||||
# compilers (they must define linker_exe first).
|
||||
# Pearu Peterson
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import warnings
|
||||
|
||||
from distutils.command.config import config as old_config
|
||||
from distutils.command.config import LANG_EXT
|
||||
from distutils import log
|
||||
from distutils.file_util import copy_file
|
||||
from distutils.ccompiler import CompileError, LinkError
|
||||
import distutils
|
||||
from numpy.distutils.exec_command import filepath_from_subprocess_output
|
||||
from numpy.distutils.mingw32ccompiler import generate_manifest
|
||||
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
|
||||
check_gcc_function_attribute_with_intrinsics,
|
||||
check_gcc_variable_attribute,
|
||||
check_gcc_version_at_least,
|
||||
check_inline,
|
||||
check_restrict,
|
||||
check_compiler_gcc)
|
||||
|
||||
LANG_EXT['f77'] = '.f'
|
||||
LANG_EXT['f90'] = '.f90'
|
||||
|
||||
class config(old_config):
|
||||
old_config.user_options += [
|
||||
('fcompiler=', None, "specify the Fortran compiler type"),
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
self.fcompiler = None
|
||||
old_config.initialize_options(self)
|
||||
|
||||
def _check_compiler (self):
|
||||
old_config._check_compiler(self)
|
||||
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
|
||||
|
||||
if sys.platform == 'win32' and (self.compiler.compiler_type in
|
||||
('msvc', 'intelw', 'intelemw')):
|
||||
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
|
||||
# initialize call query_vcvarsall, which throws an IOError, and
|
||||
# causes an error along the way without much information. We try to
|
||||
# catch it here, hoping it is early enough, and print an helpful
|
||||
# message instead of Error: None.
|
||||
if not self.compiler.initialized:
|
||||
try:
|
||||
self.compiler.initialize()
|
||||
except IOError as e:
|
||||
msg = textwrap.dedent("""\
|
||||
Could not initialize compiler instance: do you have Visual Studio
|
||||
installed? If you are trying to build with MinGW, please use "python setup.py
|
||||
build -c mingw32" instead. If you have Visual Studio installed, check it is
|
||||
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
|
||||
VS 2010 for >= 3.3).
|
||||
|
||||
Original exception was: %s, and the Compiler class was %s
|
||||
============================================================================""") \
|
||||
% (e, self.compiler.__class__.__name__)
|
||||
print(textwrap.dedent("""\
|
||||
============================================================================"""))
|
||||
raise distutils.errors.DistutilsPlatformError(msg) from e
|
||||
|
||||
# After MSVC is initialized, add an explicit /MANIFEST to linker
|
||||
# flags. See issues gh-4245 and gh-4101 for details. Also
|
||||
# relevant are issues 4431 and 16296 on the Python bug tracker.
|
||||
from distutils import msvc9compiler
|
||||
if msvc9compiler.get_build_version() >= 10:
|
||||
for ldflags in [self.compiler.ldflags_shared,
|
||||
self.compiler.ldflags_shared_debug]:
|
||||
if '/MANIFEST' not in ldflags:
|
||||
ldflags.append('/MANIFEST')
|
||||
|
||||
if not isinstance(self.fcompiler, FCompiler):
|
||||
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
|
||||
dry_run=self.dry_run, force=1,
|
||||
c_compiler=self.compiler)
|
||||
if self.fcompiler is not None:
|
||||
self.fcompiler.customize(self.distribution)
|
||||
if self.fcompiler.get_version():
|
||||
self.fcompiler.customize_cmd(self)
|
||||
self.fcompiler.show_customization()
|
||||
|
||||
def _wrap_method(self, mth, lang, args):
|
||||
from distutils.ccompiler import CompileError
|
||||
from distutils.errors import DistutilsExecError
|
||||
save_compiler = self.compiler
|
||||
if lang in ['f77', 'f90']:
|
||||
self.compiler = self.fcompiler
|
||||
if self.compiler is None:
|
||||
raise CompileError('%s compiler is not set' % (lang,))
|
||||
try:
|
||||
ret = mth(*((self,)+args))
|
||||
except (DistutilsExecError, CompileError) as e:
|
||||
self.compiler = save_compiler
|
||||
raise CompileError from e
|
||||
self.compiler = save_compiler
|
||||
return ret
|
||||
|
||||
def _compile (self, body, headers, include_dirs, lang):
|
||||
src, obj = self._wrap_method(old_config._compile, lang,
|
||||
(body, headers, include_dirs, lang))
|
||||
# _compile in unixcompiler.py sometimes creates .d dependency files.
|
||||
# Clean them up.
|
||||
self.temp_files.append(obj + '.d')
|
||||
return src, obj
|
||||
|
||||
def _link (self, body,
|
||||
headers, include_dirs,
|
||||
libraries, library_dirs, lang):
|
||||
if self.compiler.compiler_type=='msvc':
|
||||
libraries = (libraries or [])[:]
|
||||
library_dirs = (library_dirs or [])[:]
|
||||
if lang in ['f77', 'f90']:
|
||||
lang = 'c' # always use system linker when using MSVC compiler
|
||||
if self.fcompiler:
|
||||
for d in self.fcompiler.library_dirs or []:
|
||||
# correct path when compiling in Cygwin but with
|
||||
# normal Win Python
|
||||
if d.startswith('/usr/lib'):
|
||||
try:
|
||||
d = subprocess.check_output(['cygpath',
|
||||
'-w', d])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
d = filepath_from_subprocess_output(d)
|
||||
library_dirs.append(d)
|
||||
for libname in self.fcompiler.libraries or []:
|
||||
if libname not in libraries:
|
||||
libraries.append(libname)
|
||||
for libname in libraries:
|
||||
if libname.startswith('msvc'): continue
|
||||
fileexists = False
|
||||
for libdir in library_dirs or []:
|
||||
libfile = os.path.join(libdir, '%s.lib' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists: continue
|
||||
# make g77-compiled static libs available to MSVC
|
||||
fileexists = False
|
||||
for libdir in library_dirs:
|
||||
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
# copy libname.a file to name.lib so that MSVC linker
|
||||
# can find it
|
||||
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
|
||||
copy_file(libfile, libfile2)
|
||||
self.temp_files.append(libfile2)
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists: continue
|
||||
log.warn('could not find library %r in directories %s' \
|
||||
% (libname, library_dirs))
|
||||
elif self.compiler.compiler_type == 'mingw32':
|
||||
generate_manifest(self)
|
||||
return self._wrap_method(old_config._link, lang,
|
||||
(body, headers, include_dirs,
|
||||
libraries, library_dirs, lang))
|
||||
|
||||
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
|
||||
self._check_compiler()
|
||||
return self.try_compile(
|
||||
"/* we need a dummy line to make distutils happy */",
|
||||
[header], include_dirs)
|
||||
|
||||
def check_decl(self, symbol,
|
||||
headers=None, include_dirs=None):
|
||||
self._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
int main(void)
|
||||
{
|
||||
#ifndef %s
|
||||
(void) %s;
|
||||
#endif
|
||||
;
|
||||
return 0;
|
||||
}""") % (symbol, symbol)
|
||||
|
||||
return self.try_compile(body, headers, include_dirs)
|
||||
|
||||
def check_macro_true(self, symbol,
|
||||
headers=None, include_dirs=None):
|
||||
self._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
int main(void)
|
||||
{
|
||||
#if %s
|
||||
#else
|
||||
#error false or undefined macro
|
||||
#endif
|
||||
;
|
||||
return 0;
|
||||
}""") % (symbol,)
|
||||
|
||||
return self.try_compile(body, headers, include_dirs)
|
||||
|
||||
def check_type(self, type_name, headers=None, include_dirs=None,
|
||||
library_dirs=None):
|
||||
"""Check type availability. Return True if the type can be compiled,
|
||||
False otherwise"""
|
||||
self._check_compiler()
|
||||
|
||||
# First check the type can be compiled
|
||||
body = textwrap.dedent(r"""
|
||||
int main(void) {
|
||||
if ((%(name)s *) 0)
|
||||
return 0;
|
||||
if (sizeof (%(name)s))
|
||||
return 0;
|
||||
}
|
||||
""") % {'name': type_name}
|
||||
|
||||
st = False
|
||||
try:
|
||||
try:
|
||||
self._compile(body % {'type': type_name},
|
||||
headers, include_dirs, 'c')
|
||||
st = True
|
||||
except distutils.errors.CompileError:
|
||||
st = False
|
||||
finally:
|
||||
self._clean()
|
||||
|
||||
return st
|
||||
|
||||
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
|
||||
"""Check size of a given type."""
|
||||
self._check_compiler()
|
||||
|
||||
# First check the type can be compiled
|
||||
body = textwrap.dedent(r"""
|
||||
typedef %(type)s npy_check_sizeof_type;
|
||||
int main (void)
|
||||
{
|
||||
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
|
||||
test_array [0] = 0
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
self._compile(body % {'type': type_name},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
|
||||
if expected:
|
||||
body = textwrap.dedent(r"""
|
||||
typedef %(type)s npy_check_sizeof_type;
|
||||
int main (void)
|
||||
{
|
||||
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
|
||||
test_array [0] = 0
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
for size in expected:
|
||||
try:
|
||||
self._compile(body % {'type': type_name, 'size': size},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
return size
|
||||
except CompileError:
|
||||
pass
|
||||
|
||||
# this fails to *compile* if size > sizeof(type)
|
||||
body = textwrap.dedent(r"""
|
||||
typedef %(type)s npy_check_sizeof_type;
|
||||
int main (void)
|
||||
{
|
||||
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
|
||||
test_array [0] = 0
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
|
||||
# The principle is simple: we first find low and high bounds of size
|
||||
# for the type, where low/high are looked up on a log scale. Then, we
|
||||
# do a binary search to find the exact size between low and high
|
||||
low = 0
|
||||
mid = 0
|
||||
while True:
|
||||
try:
|
||||
self._compile(body % {'type': type_name, 'size': mid},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
break
|
||||
except CompileError:
|
||||
#log.info("failure to test for bound %d" % mid)
|
||||
low = mid + 1
|
||||
mid = 2 * mid + 1
|
||||
|
||||
high = mid
|
||||
# Binary search:
|
||||
while low != high:
|
||||
mid = (high - low) // 2 + low
|
||||
try:
|
||||
self._compile(body % {'type': type_name, 'size': mid},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
high = mid
|
||||
except CompileError:
|
||||
low = mid + 1
|
||||
return low
|
||||
|
||||
def check_func(self, func,
|
||||
headers=None, include_dirs=None,
|
||||
libraries=None, library_dirs=None,
|
||||
decl=False, call=False, call_args=None):
|
||||
# clean up distutils's config a bit: add void to main(), and
|
||||
# return a value.
|
||||
self._check_compiler()
|
||||
body = []
|
||||
if decl:
|
||||
if type(decl) == str:
|
||||
body.append(decl)
|
||||
else:
|
||||
body.append("int %s (void);" % func)
|
||||
# Handle MSVC intrinsics: force MS compiler to make a function call.
|
||||
# Useful to test for some functions when built with optimization on, to
|
||||
# avoid build error because the intrinsic and our 'fake' test
|
||||
# declaration do not match.
|
||||
body.append("#ifdef _MSC_VER")
|
||||
body.append("#pragma function(%s)" % func)
|
||||
body.append("#endif")
|
||||
body.append("int main (void) {")
|
||||
if call:
|
||||
if call_args is None:
|
||||
call_args = ''
|
||||
body.append(" %s(%s);" % (func, call_args))
|
||||
else:
|
||||
body.append(" %s;" % func)
|
||||
body.append(" return 0;")
|
||||
body.append("}")
|
||||
body = '\n'.join(body) + "\n"
|
||||
|
||||
return self.try_link(body, headers, include_dirs,
|
||||
libraries, library_dirs)
|
||||
|
||||
def check_funcs_once(self, funcs,
|
||||
headers=None, include_dirs=None,
|
||||
libraries=None, library_dirs=None,
|
||||
decl=False, call=False, call_args=None):
|
||||
"""Check a list of functions at once.
|
||||
|
||||
This is useful to speed up things, since all the functions in the funcs
|
||||
list will be put in one compilation unit.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
funcs : seq
|
||||
list of functions to test
|
||||
include_dirs : seq
|
||||
list of header paths
|
||||
libraries : seq
|
||||
list of libraries to link the code snippet to
|
||||
library_dirs : seq
|
||||
list of library paths
|
||||
decl : dict
|
||||
for every (key, value), the declaration in the value will be
|
||||
used for function in key. If a function is not in the
|
||||
dictionary, no declaration will be used.
|
||||
call : dict
|
||||
for every item (f, value), if the value is True, a call will be
|
||||
done to the function f.
|
||||
"""
|
||||
self._check_compiler()
|
||||
body = []
|
||||
if decl:
|
||||
for f, v in decl.items():
|
||||
if v:
|
||||
body.append("int %s (void);" % f)
|
||||
|
||||
# Handle MS intrinsics. See check_func for more info.
|
||||
body.append("#ifdef _MSC_VER")
|
||||
for func in funcs:
|
||||
body.append("#pragma function(%s)" % func)
|
||||
body.append("#endif")
|
||||
|
||||
body.append("int main (void) {")
|
||||
if call:
|
||||
for f in funcs:
|
||||
if f in call and call[f]:
|
||||
if not (call_args and f in call_args and call_args[f]):
|
||||
args = ''
|
||||
else:
|
||||
args = call_args[f]
|
||||
body.append(" %s(%s);" % (f, args))
|
||||
else:
|
||||
body.append(" %s;" % f)
|
||||
else:
|
||||
for f in funcs:
|
||||
body.append(" %s;" % f)
|
||||
body.append(" return 0;")
|
||||
body.append("}")
|
||||
body = '\n'.join(body) + "\n"
|
||||
|
||||
return self.try_link(body, headers, include_dirs,
|
||||
libraries, library_dirs)
|
||||
|
||||
def check_inline(self):
|
||||
"""Return the inline keyword recognized by the compiler, empty string
|
||||
otherwise."""
|
||||
return check_inline(self)
|
||||
|
||||
def check_restrict(self):
|
||||
"""Return the restrict keyword recognized by the compiler, empty string
|
||||
otherwise."""
|
||||
return check_restrict(self)
|
||||
|
||||
def check_compiler_gcc(self):
|
||||
"""Return True if the C compiler is gcc"""
|
||||
return check_compiler_gcc(self)
|
||||
|
||||
def check_gcc_function_attribute(self, attribute, name):
|
||||
return check_gcc_function_attribute(self, attribute, name)
|
||||
|
||||
def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
|
||||
code, include):
|
||||
return check_gcc_function_attribute_with_intrinsics(self, attribute,
|
||||
name, code, include)
|
||||
|
||||
def check_gcc_variable_attribute(self, attribute):
|
||||
return check_gcc_variable_attribute(self, attribute)
|
||||
|
||||
def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
|
||||
"""Return True if the GCC version is greater than or equal to the
|
||||
specified version."""
|
||||
return check_gcc_version_at_least(self, major, minor, patchlevel)
|
||||
|
||||
def get_output(self, body, headers=None, include_dirs=None,
|
||||
libraries=None, library_dirs=None,
|
||||
lang="c", use_tee=None):
|
||||
"""Try to compile, link to an executable, and run a program
|
||||
built from 'body' and 'headers'. Returns the exit status code
|
||||
of the program and its output.
|
||||
"""
|
||||
# 2008-11-16, RemoveMe
|
||||
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
|
||||
"Usage of get_output is deprecated: please do not \n"
|
||||
"use it anymore, and avoid configuration checks \n"
|
||||
"involving running executable on the target machine.\n"
|
||||
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
self._check_compiler()
|
||||
exitcode, output = 255, ''
|
||||
try:
|
||||
grabber = GrabStdout()
|
||||
try:
|
||||
src, obj, exe = self._link(body, headers, include_dirs,
|
||||
libraries, library_dirs, lang)
|
||||
grabber.restore()
|
||||
except Exception:
|
||||
output = grabber.data
|
||||
grabber.restore()
|
||||
raise
|
||||
exe = os.path.join('.', exe)
|
||||
try:
|
||||
# specify cwd arg for consistency with
|
||||
# historic usage pattern of exec_command()
|
||||
# also, note that exe appears to be a string,
|
||||
# which exec_command() handled, but we now
|
||||
# use a list for check_output() -- this assumes
|
||||
# that exe is always a single command
|
||||
output = subprocess.check_output([exe], cwd='.')
|
||||
except subprocess.CalledProcessError as exc:
|
||||
exitstatus = exc.returncode
|
||||
output = ''
|
||||
except OSError:
|
||||
# preserve the EnvironmentError exit status
|
||||
# used historically in exec_command()
|
||||
exitstatus = 127
|
||||
output = ''
|
||||
else:
|
||||
output = filepath_from_subprocess_output(output)
|
||||
if hasattr(os, 'WEXITSTATUS'):
|
||||
exitcode = os.WEXITSTATUS(exitstatus)
|
||||
if os.WIFSIGNALED(exitstatus):
|
||||
sig = os.WTERMSIG(exitstatus)
|
||||
log.error('subprocess exited with signal %d' % (sig,))
|
||||
if sig == signal.SIGINT:
|
||||
# control-C
|
||||
raise KeyboardInterrupt
|
||||
else:
|
||||
exitcode = exitstatus
|
||||
log.info("success!")
|
||||
except (CompileError, LinkError):
|
||||
log.info("failure.")
|
||||
self._clean()
|
||||
return exitcode, output
|
||||
|
||||
class GrabStdout:
|
||||
|
||||
def __init__(self):
|
||||
self.sys_stdout = sys.stdout
|
||||
self.data = ''
|
||||
sys.stdout = self
|
||||
|
||||
def write (self, data):
|
||||
self.sys_stdout.write(data)
|
||||
self.data += data
|
||||
|
||||
def flush (self):
|
||||
self.sys_stdout.flush()
|
||||
|
||||
def restore(self):
|
||||
sys.stdout = self.sys_stdout
|
@ -0,0 +1,126 @@
|
||||
from distutils.core import Command
|
||||
from numpy.distutils import log
|
||||
|
||||
#XXX: Linker flags
|
||||
|
||||
def show_fortran_compilers(_cache=None):
|
||||
# Using cache to prevent infinite recursion.
|
||||
if _cache:
|
||||
return
|
||||
elif _cache is None:
|
||||
_cache = []
|
||||
_cache.append(1)
|
||||
from numpy.distutils.fcompiler import show_fcompilers
|
||||
import distutils.core
|
||||
dist = distutils.core._setup_distribution
|
||||
show_fcompilers(dist)
|
||||
|
||||
class config_fc(Command):
|
||||
""" Distutils command to hold user specified options
|
||||
to Fortran compilers.
|
||||
|
||||
config_fc command is used by the FCompiler.customize() method.
|
||||
"""
|
||||
|
||||
description = "specify Fortran 77/Fortran 90 compiler information"
|
||||
|
||||
user_options = [
|
||||
('fcompiler=', None, "specify Fortran compiler type"),
|
||||
('f77exec=', None, "specify F77 compiler command"),
|
||||
('f90exec=', None, "specify F90 compiler command"),
|
||||
('f77flags=', None, "specify F77 compiler flags"),
|
||||
('f90flags=', None, "specify F90 compiler flags"),
|
||||
('opt=', None, "specify optimization flags"),
|
||||
('arch=', None, "specify architecture specific optimization flags"),
|
||||
('debug', 'g', "compile with debugging information"),
|
||||
('noopt', None, "compile without optimization"),
|
||||
('noarch', None, "compile without arch-dependent optimization"),
|
||||
]
|
||||
|
||||
help_options = [
|
||||
('help-fcompiler', None, "list available Fortran compilers",
|
||||
show_fortran_compilers),
|
||||
]
|
||||
|
||||
boolean_options = ['debug', 'noopt', 'noarch']
|
||||
|
||||
def initialize_options(self):
|
||||
self.fcompiler = None
|
||||
self.f77exec = None
|
||||
self.f90exec = None
|
||||
self.f77flags = None
|
||||
self.f90flags = None
|
||||
self.opt = None
|
||||
self.arch = None
|
||||
self.debug = None
|
||||
self.noopt = None
|
||||
self.noarch = None
|
||||
|
||||
def finalize_options(self):
|
||||
log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
|
||||
build_clib = self.get_finalized_command('build_clib')
|
||||
build_ext = self.get_finalized_command('build_ext')
|
||||
config = self.get_finalized_command('config')
|
||||
build = self.get_finalized_command('build')
|
||||
cmd_list = [self, config, build_clib, build_ext, build]
|
||||
for a in ['fcompiler']:
|
||||
l = []
|
||||
for c in cmd_list:
|
||||
v = getattr(c, a)
|
||||
if v is not None:
|
||||
if not isinstance(v, str): v = v.compiler_type
|
||||
if v not in l: l.append(v)
|
||||
if not l: v1 = None
|
||||
else: v1 = l[0]
|
||||
if len(l)>1:
|
||||
log.warn(' commands have different --%s options: %s'\
|
||||
', using first in list as default' % (a, l))
|
||||
if v1:
|
||||
for c in cmd_list:
|
||||
if getattr(c, a) is None: setattr(c, a, v1)
|
||||
|
||||
def run(self):
|
||||
# Do nothing.
|
||||
return
|
||||
|
||||
class config_cc(Command):
|
||||
""" Distutils command to hold user specified options
|
||||
to C/C++ compilers.
|
||||
"""
|
||||
|
||||
description = "specify C/C++ compiler information"
|
||||
|
||||
user_options = [
|
||||
('compiler=', None, "specify C/C++ compiler type"),
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
self.compiler = None
|
||||
|
||||
def finalize_options(self):
|
||||
log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options')
|
||||
build_clib = self.get_finalized_command('build_clib')
|
||||
build_ext = self.get_finalized_command('build_ext')
|
||||
config = self.get_finalized_command('config')
|
||||
build = self.get_finalized_command('build')
|
||||
cmd_list = [self, config, build_clib, build_ext, build]
|
||||
for a in ['compiler']:
|
||||
l = []
|
||||
for c in cmd_list:
|
||||
v = getattr(c, a)
|
||||
if v is not None:
|
||||
if not isinstance(v, str): v = v.compiler_type
|
||||
if v not in l: l.append(v)
|
||||
if not l: v1 = None
|
||||
else: v1 = l[0]
|
||||
if len(l)>1:
|
||||
log.warn(' commands have different --%s options: %s'\
|
||||
', using first in list as default' % (a, l))
|
||||
if v1:
|
||||
for c in cmd_list:
|
||||
if getattr(c, a) is None: setattr(c, a, v1)
|
||||
return
|
||||
|
||||
def run(self):
|
||||
# Do nothing.
|
||||
return
|
15
.venv/Lib/site-packages/numpy/distutils/command/develop.py
Normal file
15
.venv/Lib/site-packages/numpy/distutils/command/develop.py
Normal file
@ -0,0 +1,15 @@
|
||||
""" Override the develop command from setuptools so we can ensure that our
|
||||
generated files (from build_src or build_scripts) are properly converted to real
|
||||
files with filenames.
|
||||
|
||||
"""
|
||||
from setuptools.command.develop import develop as old_develop
|
||||
|
||||
class develop(old_develop):
|
||||
__doc__ = old_develop.__doc__
|
||||
def install_for_development(self):
|
||||
# Build sources in-place, too.
|
||||
self.reinitialize_command('build_src', inplace=1)
|
||||
# Make sure scripts are built.
|
||||
self.run_command('build_scripts')
|
||||
old_develop.install_for_development(self)
|
25
.venv/Lib/site-packages/numpy/distutils/command/egg_info.py
Normal file
25
.venv/Lib/site-packages/numpy/distutils/command/egg_info.py
Normal file
@ -0,0 +1,25 @@
|
||||
import sys
|
||||
|
||||
from setuptools.command.egg_info import egg_info as _egg_info
|
||||
|
||||
class egg_info(_egg_info):
|
||||
def run(self):
|
||||
if 'sdist' in sys.argv:
|
||||
import warnings
|
||||
import textwrap
|
||||
msg = textwrap.dedent("""
|
||||
`build_src` is being run, this may lead to missing
|
||||
files in your sdist! You want to use distutils.sdist
|
||||
instead of the setuptools version:
|
||||
|
||||
from distutils.command.sdist import sdist
|
||||
cmdclass={'sdist': sdist}"
|
||||
|
||||
See numpy's setup.py or gh-7131 for details.""")
|
||||
warnings.warn(msg, UserWarning, stacklevel=2)
|
||||
|
||||
# We need to ensure that build_src has been executed in order to give
|
||||
# setuptools' egg_info command real filenames instead of functions which
|
||||
# generate files.
|
||||
self.run_command("build_src")
|
||||
_egg_info.run(self)
|
79
.venv/Lib/site-packages/numpy/distutils/command/install.py
Normal file
79
.venv/Lib/site-packages/numpy/distutils/command/install.py
Normal file
@ -0,0 +1,79 @@
|
||||
import sys
|
||||
if 'setuptools' in sys.modules:
|
||||
import setuptools.command.install as old_install_mod
|
||||
have_setuptools = True
|
||||
else:
|
||||
import distutils.command.install as old_install_mod
|
||||
have_setuptools = False
|
||||
from distutils.file_util import write_file
|
||||
|
||||
old_install = old_install_mod.install
|
||||
|
||||
class install(old_install):
|
||||
|
||||
# Always run install_clib - the command is cheap, so no need to bypass it;
|
||||
# but it's not run by setuptools -- so it's run again in install_data
|
||||
sub_commands = old_install.sub_commands + [
|
||||
('install_clib', lambda x: True)
|
||||
]
|
||||
|
||||
def finalize_options (self):
|
||||
old_install.finalize_options(self)
|
||||
self.install_lib = self.install_libbase
|
||||
|
||||
def setuptools_run(self):
|
||||
""" The setuptools version of the .run() method.
|
||||
|
||||
We must pull in the entire code so we can override the level used in the
|
||||
_getframe() call since we wrap this call by one more level.
|
||||
"""
|
||||
from distutils.command.install import install as distutils_install
|
||||
|
||||
# Explicit request for old-style install? Just do it
|
||||
if self.old_and_unmanageable or self.single_version_externally_managed:
|
||||
return distutils_install.run(self)
|
||||
|
||||
# Attempt to detect whether we were called from setup() or by another
|
||||
# command. If we were called by setup(), our caller will be the
|
||||
# 'run_command' method in 'distutils.dist', and *its* caller will be
|
||||
# the 'run_commands' method. If we were called any other way, our
|
||||
# immediate caller *might* be 'run_command', but it won't have been
|
||||
# called by 'run_commands'. This is slightly kludgy, but seems to
|
||||
# work.
|
||||
#
|
||||
caller = sys._getframe(3)
|
||||
caller_module = caller.f_globals.get('__name__', '')
|
||||
caller_name = caller.f_code.co_name
|
||||
|
||||
if caller_module != 'distutils.dist' or caller_name!='run_commands':
|
||||
# We weren't called from the command line or setup(), so we
|
||||
# should run in backward-compatibility mode to support bdist_*
|
||||
# commands.
|
||||
distutils_install.run(self)
|
||||
else:
|
||||
self.do_egg_install()
|
||||
|
||||
def run(self):
|
||||
if not have_setuptools:
|
||||
r = old_install.run(self)
|
||||
else:
|
||||
r = self.setuptools_run()
|
||||
if self.record:
|
||||
# bdist_rpm fails when INSTALLED_FILES contains
|
||||
# paths with spaces. Such paths must be enclosed
|
||||
# with double-quotes.
|
||||
with open(self.record, 'r') as f:
|
||||
lines = []
|
||||
need_rewrite = False
|
||||
for l in f:
|
||||
l = l.rstrip()
|
||||
if ' ' in l:
|
||||
need_rewrite = True
|
||||
l = '"%s"' % (l)
|
||||
lines.append(l)
|
||||
if need_rewrite:
|
||||
self.execute(write_file,
|
||||
(self.record, lines),
|
||||
"re-writing list of installed files to '%s'" %
|
||||
self.record)
|
||||
return r
|
@ -0,0 +1,40 @@
|
||||
import os
|
||||
from distutils.core import Command
|
||||
from distutils.ccompiler import new_compiler
|
||||
from numpy.distutils.misc_util import get_cmd
|
||||
|
||||
class install_clib(Command):
|
||||
description = "Command to install installable C libraries"
|
||||
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
self.install_dir = None
|
||||
self.outfiles = []
|
||||
|
||||
def finalize_options(self):
|
||||
self.set_undefined_options('install', ('install_lib', 'install_dir'))
|
||||
|
||||
def run (self):
|
||||
build_clib_cmd = get_cmd("build_clib")
|
||||
if not build_clib_cmd.build_clib:
|
||||
# can happen if the user specified `--skip-build`
|
||||
build_clib_cmd.finalize_options()
|
||||
build_dir = build_clib_cmd.build_clib
|
||||
|
||||
# We need the compiler to get the library name -> filename association
|
||||
if not build_clib_cmd.compiler:
|
||||
compiler = new_compiler(compiler=None)
|
||||
compiler.customize(self.distribution)
|
||||
else:
|
||||
compiler = build_clib_cmd.compiler
|
||||
|
||||
for l in self.distribution.installed_libraries:
|
||||
target_dir = os.path.join(self.install_dir, l.target_dir)
|
||||
name = compiler.library_filename(l.name)
|
||||
source = os.path.join(build_dir, name)
|
||||
self.mkpath(target_dir)
|
||||
self.outfiles.append(self.copy_file(source, target_dir)[0])
|
||||
|
||||
def get_outputs(self):
|
||||
return self.outfiles
|
@ -0,0 +1,24 @@
|
||||
import sys
|
||||
have_setuptools = ('setuptools' in sys.modules)
|
||||
|
||||
from distutils.command.install_data import install_data as old_install_data
|
||||
|
||||
#data installer with improved intelligence over distutils
|
||||
#data files are copied into the project directory instead
|
||||
#of willy-nilly
|
||||
class install_data (old_install_data):
|
||||
|
||||
def run(self):
|
||||
old_install_data.run(self)
|
||||
|
||||
if have_setuptools:
|
||||
# Run install_clib again, since setuptools does not run sub-commands
|
||||
# of install automatically
|
||||
self.run_command('install_clib')
|
||||
|
||||
def finalize_options (self):
|
||||
self.set_undefined_options('install',
|
||||
('install_lib', 'install_dir'),
|
||||
('root', 'root'),
|
||||
('force', 'force'),
|
||||
)
|
@ -0,0 +1,25 @@
|
||||
import os
|
||||
from distutils.command.install_headers import install_headers as old_install_headers
|
||||
|
||||
class install_headers (old_install_headers):
|
||||
|
||||
def run (self):
|
||||
headers = self.distribution.headers
|
||||
if not headers:
|
||||
return
|
||||
|
||||
prefix = os.path.dirname(self.install_dir)
|
||||
for header in headers:
|
||||
if isinstance(header, tuple):
|
||||
# Kind of a hack, but I don't know where else to change this...
|
||||
if header[0] == 'numpy.core':
|
||||
header = ('numpy', header[1])
|
||||
if os.path.splitext(header[1])[1] == '.inc':
|
||||
continue
|
||||
d = os.path.join(*([prefix]+header[0].split('.')))
|
||||
header = header[1]
|
||||
else:
|
||||
d = self.install_dir
|
||||
self.mkpath(d)
|
||||
(out, _) = self.copy_file(header, d)
|
||||
self.outfiles.append(out)
|
27
.venv/Lib/site-packages/numpy/distutils/command/sdist.py
Normal file
27
.venv/Lib/site-packages/numpy/distutils/command/sdist.py
Normal file
@ -0,0 +1,27 @@
|
||||
import sys
|
||||
if 'setuptools' in sys.modules:
|
||||
from setuptools.command.sdist import sdist as old_sdist
|
||||
else:
|
||||
from distutils.command.sdist import sdist as old_sdist
|
||||
|
||||
from numpy.distutils.misc_util import get_data_files
|
||||
|
||||
class sdist(old_sdist):
|
||||
|
||||
def add_defaults (self):
|
||||
old_sdist.add_defaults(self)
|
||||
|
||||
dist = self.distribution
|
||||
|
||||
if dist.has_data_files():
|
||||
for data in dist.data_files:
|
||||
self.filelist.extend(get_data_files(data))
|
||||
|
||||
if dist.has_headers():
|
||||
headers = []
|
||||
for h in dist.headers:
|
||||
if isinstance(h, str): headers.append(h)
|
||||
else: headers.append(h[1])
|
||||
self.filelist.extend(headers)
|
||||
|
||||
return
|
329
.venv/Lib/site-packages/numpy/distutils/conv_template.py
Normal file
329
.venv/Lib/site-packages/numpy/distutils/conv_template.py
Normal file
@ -0,0 +1,329 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
takes templated file .xxx.src and produces .xxx file where .xxx is
|
||||
.i or .c or .h, using the following template rules
|
||||
|
||||
/**begin repeat -- on a line by itself marks the start of a repeated code
|
||||
segment
|
||||
/**end repeat**/ -- on a line by itself marks it's end
|
||||
|
||||
After the /**begin repeat and before the */, all the named templates are placed
|
||||
these should all have the same number of replacements
|
||||
|
||||
Repeat blocks can be nested, with each nested block labeled with its depth,
|
||||
i.e.
|
||||
/**begin repeat1
|
||||
*....
|
||||
*/
|
||||
/**end repeat1**/
|
||||
|
||||
When using nested loops, you can optionally exclude particular
|
||||
combinations of the variables using (inside the comment portion of the inner loop):
|
||||
|
||||
:exclude: var1=value1, var2=value2, ...
|
||||
|
||||
This will exclude the pattern where var1 is value1 and var2 is value2 when
|
||||
the result is being generated.
|
||||
|
||||
|
||||
In the main body each replace will use one entry from the list of named replacements
|
||||
|
||||
Note that all #..# forms in a block must have the same number of
|
||||
comma-separated entries.
|
||||
|
||||
Example:
|
||||
|
||||
An input file containing
|
||||
|
||||
/**begin repeat
|
||||
* #a = 1,2,3#
|
||||
* #b = 1,2,3#
|
||||
*/
|
||||
|
||||
/**begin repeat1
|
||||
* #c = ted, jim#
|
||||
*/
|
||||
@a@, @b@, @c@
|
||||
/**end repeat1**/
|
||||
|
||||
/**end repeat**/
|
||||
|
||||
produces
|
||||
|
||||
line 1 "template.c.src"
|
||||
|
||||
/*
|
||||
*********************************************************************
|
||||
** This file was autogenerated from a template DO NOT EDIT!!**
|
||||
** Changes should be made to the original source (.src) file **
|
||||
*********************************************************************
|
||||
*/
|
||||
|
||||
#line 9
|
||||
1, 1, ted
|
||||
|
||||
#line 9
|
||||
1, 1, jim
|
||||
|
||||
#line 9
|
||||
2, 2, ted
|
||||
|
||||
#line 9
|
||||
2, 2, jim
|
||||
|
||||
#line 9
|
||||
3, 3, ted
|
||||
|
||||
#line 9
|
||||
3, 3, jim
|
||||
|
||||
"""
|
||||
|
||||
__all__ = ['process_str', 'process_file']
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
# names for replacement that are already global.
|
||||
global_names = {}
|
||||
|
||||
# header placed at the front of head processed file
|
||||
header =\
|
||||
"""
|
||||
/*
|
||||
*****************************************************************************
|
||||
** This file was autogenerated from a template DO NOT EDIT!!!! **
|
||||
** Changes should be made to the original source (.src) file **
|
||||
*****************************************************************************
|
||||
*/
|
||||
|
||||
"""
|
||||
# Parse string for repeat loops
|
||||
def parse_structure(astr, level):
|
||||
"""
|
||||
The returned line number is from the beginning of the string, starting
|
||||
at zero. Returns an empty list if no loops found.
|
||||
|
||||
"""
|
||||
if level == 0 :
|
||||
loopbeg = "/**begin repeat"
|
||||
loopend = "/**end repeat**/"
|
||||
else :
|
||||
loopbeg = "/**begin repeat%d" % level
|
||||
loopend = "/**end repeat%d**/" % level
|
||||
|
||||
ind = 0
|
||||
line = 0
|
||||
spanlist = []
|
||||
while True:
|
||||
start = astr.find(loopbeg, ind)
|
||||
if start == -1:
|
||||
break
|
||||
start2 = astr.find("*/", start)
|
||||
start2 = astr.find("\n", start2)
|
||||
fini1 = astr.find(loopend, start2)
|
||||
fini2 = astr.find("\n", fini1)
|
||||
line += astr.count("\n", ind, start2+1)
|
||||
spanlist.append((start, start2+1, fini1, fini2+1, line))
|
||||
line += astr.count("\n", start2+1, fini2)
|
||||
ind = fini2
|
||||
spanlist.sort()
|
||||
return spanlist
|
||||
|
||||
|
||||
def paren_repl(obj):
|
||||
torep = obj.group(1)
|
||||
numrep = obj.group(2)
|
||||
return ','.join([torep]*int(numrep))
|
||||
|
||||
parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
|
||||
plainrep = re.compile(r"([^*]+)\*(\d+)")
|
||||
def parse_values(astr):
|
||||
# replaces all occurrences of '(a,b,c)*4' in astr
|
||||
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
|
||||
# empty values, i.e., ()*4 yields ',,,'. The result is
|
||||
# split at ',' and a list of values returned.
|
||||
astr = parenrep.sub(paren_repl, astr)
|
||||
# replaces occurrences of xxx*3 with xxx, xxx, xxx
|
||||
astr = ','.join([plainrep.sub(paren_repl, x.strip())
|
||||
for x in astr.split(',')])
|
||||
return astr.split(',')
|
||||
|
||||
|
||||
stripast = re.compile(r"\n\s*\*?")
|
||||
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
|
||||
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
|
||||
exclude_re = re.compile(":exclude:")
|
||||
def parse_loop_header(loophead) :
|
||||
"""Find all named replacements in the header
|
||||
|
||||
Returns a list of dictionaries, one for each loop iteration,
|
||||
where each key is a name to be substituted and the corresponding
|
||||
value is the replacement string.
|
||||
|
||||
Also return a list of exclusions. The exclusions are dictionaries
|
||||
of key value pairs. There can be more than one exclusion.
|
||||
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
|
||||
|
||||
"""
|
||||
# Strip out '\n' and leading '*', if any, in continuation lines.
|
||||
# This should not effect code previous to this change as
|
||||
# continuation lines were not allowed.
|
||||
loophead = stripast.sub("", loophead)
|
||||
# parse out the names and lists of values
|
||||
names = []
|
||||
reps = named_re.findall(loophead)
|
||||
nsub = None
|
||||
for rep in reps:
|
||||
name = rep[0]
|
||||
vals = parse_values(rep[1])
|
||||
size = len(vals)
|
||||
if nsub is None :
|
||||
nsub = size
|
||||
elif nsub != size :
|
||||
msg = "Mismatch in number of values, %d != %d\n%s = %s"
|
||||
raise ValueError(msg % (nsub, size, name, vals))
|
||||
names.append((name, vals))
|
||||
|
||||
|
||||
# Find any exclude variables
|
||||
excludes = []
|
||||
|
||||
for obj in exclude_re.finditer(loophead):
|
||||
span = obj.span()
|
||||
# find next newline
|
||||
endline = loophead.find('\n', span[1])
|
||||
substr = loophead[span[1]:endline]
|
||||
ex_names = exclude_vars_re.findall(substr)
|
||||
excludes.append(dict(ex_names))
|
||||
|
||||
# generate list of dictionaries, one for each template iteration
|
||||
dlist = []
|
||||
if nsub is None :
|
||||
raise ValueError("No substitution variables found")
|
||||
for i in range(nsub):
|
||||
tmp = {name: vals[i] for name, vals in names}
|
||||
dlist.append(tmp)
|
||||
return dlist
|
||||
|
||||
replace_re = re.compile(r"@(\w+)@")
|
||||
def parse_string(astr, env, level, line) :
|
||||
lineno = "#line %d\n" % line
|
||||
|
||||
# local function for string replacement, uses env
|
||||
def replace(match):
|
||||
name = match.group(1)
|
||||
try :
|
||||
val = env[name]
|
||||
except KeyError:
|
||||
msg = 'line %d: no definition of key "%s"'%(line, name)
|
||||
raise ValueError(msg) from None
|
||||
return val
|
||||
|
||||
code = [lineno]
|
||||
struct = parse_structure(astr, level)
|
||||
if struct :
|
||||
# recurse over inner loops
|
||||
oldend = 0
|
||||
newlevel = level + 1
|
||||
for sub in struct:
|
||||
pref = astr[oldend:sub[0]]
|
||||
head = astr[sub[0]:sub[1]]
|
||||
text = astr[sub[1]:sub[2]]
|
||||
oldend = sub[3]
|
||||
newline = line + sub[4]
|
||||
code.append(replace_re.sub(replace, pref))
|
||||
try :
|
||||
envlist = parse_loop_header(head)
|
||||
except ValueError as e:
|
||||
msg = "line %d: %s" % (newline, e)
|
||||
raise ValueError(msg)
|
||||
for newenv in envlist :
|
||||
newenv.update(env)
|
||||
newcode = parse_string(text, newenv, newlevel, newline)
|
||||
code.extend(newcode)
|
||||
suff = astr[oldend:]
|
||||
code.append(replace_re.sub(replace, suff))
|
||||
else :
|
||||
# replace keys
|
||||
code.append(replace_re.sub(replace, astr))
|
||||
code.append('\n')
|
||||
return ''.join(code)
|
||||
|
||||
def process_str(astr):
|
||||
code = [header]
|
||||
code.extend(parse_string(astr, global_names, 0, 1))
|
||||
return ''.join(code)
|
||||
|
||||
|
||||
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
|
||||
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
|
||||
|
||||
def resolve_includes(source):
|
||||
d = os.path.dirname(source)
|
||||
with open(source) as fid:
|
||||
lines = []
|
||||
for line in fid:
|
||||
m = include_src_re.match(line)
|
||||
if m:
|
||||
fn = m.group('name')
|
||||
if not os.path.isabs(fn):
|
||||
fn = os.path.join(d, fn)
|
||||
if os.path.isfile(fn):
|
||||
lines.extend(resolve_includes(fn))
|
||||
else:
|
||||
lines.append(line)
|
||||
else:
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
def process_file(source):
|
||||
lines = resolve_includes(source)
|
||||
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
|
||||
try:
|
||||
code = process_str(''.join(lines))
|
||||
except ValueError as e:
|
||||
raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
|
||||
return '#line 1 "%s"\n%s' % (sourcefile, code)
|
||||
|
||||
|
||||
def unique_key(adict):
|
||||
# this obtains a unique key given a dictionary
|
||||
# currently it works by appending together n of the letters of the
|
||||
# current keys and increasing n until a unique key is found
|
||||
# -- not particularly quick
|
||||
allkeys = list(adict.keys())
|
||||
done = False
|
||||
n = 1
|
||||
while not done:
|
||||
newkey = "".join([x[:n] for x in allkeys])
|
||||
if newkey in allkeys:
|
||||
n += 1
|
||||
else:
|
||||
done = True
|
||||
return newkey
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
file = sys.argv[1]
|
||||
except IndexError:
|
||||
fid = sys.stdin
|
||||
outfile = sys.stdout
|
||||
else:
|
||||
fid = open(file, 'r')
|
||||
(base, ext) = os.path.splitext(file)
|
||||
newname = base
|
||||
outfile = open(newname, 'w')
|
||||
|
||||
allstr = fid.read()
|
||||
try:
|
||||
writestr = process_str(allstr)
|
||||
except ValueError as e:
|
||||
raise ValueError("In %s loop at %s" % (file, e)) from None
|
||||
|
||||
outfile.write(writestr)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
215
.venv/Lib/site-packages/numpy/distutils/core.py
Normal file
215
.venv/Lib/site-packages/numpy/distutils/core.py
Normal file
@ -0,0 +1,215 @@
|
||||
import sys
|
||||
from distutils.core import Distribution
|
||||
|
||||
if 'setuptools' in sys.modules:
|
||||
have_setuptools = True
|
||||
from setuptools import setup as old_setup
|
||||
# easy_install imports math, it may be picked up from cwd
|
||||
from setuptools.command import easy_install
|
||||
try:
|
||||
# very old versions of setuptools don't have this
|
||||
from setuptools.command import bdist_egg
|
||||
except ImportError:
|
||||
have_setuptools = False
|
||||
else:
|
||||
from distutils.core import setup as old_setup
|
||||
have_setuptools = False
|
||||
|
||||
import warnings
|
||||
import distutils.core
|
||||
import distutils.dist
|
||||
|
||||
from numpy.distutils.extension import Extension # noqa: F401
|
||||
from numpy.distutils.numpy_distribution import NumpyDistribution
|
||||
from numpy.distutils.command import config, config_compiler, \
|
||||
build, build_py, build_ext, build_clib, build_src, build_scripts, \
|
||||
sdist, install_data, install_headers, install, bdist_rpm, \
|
||||
install_clib
|
||||
from numpy.distutils.misc_util import is_sequence, is_string
|
||||
|
||||
numpy_cmdclass = {'build': build.build,
|
||||
'build_src': build_src.build_src,
|
||||
'build_scripts': build_scripts.build_scripts,
|
||||
'config_cc': config_compiler.config_cc,
|
||||
'config_fc': config_compiler.config_fc,
|
||||
'config': config.config,
|
||||
'build_ext': build_ext.build_ext,
|
||||
'build_py': build_py.build_py,
|
||||
'build_clib': build_clib.build_clib,
|
||||
'sdist': sdist.sdist,
|
||||
'install_data': install_data.install_data,
|
||||
'install_headers': install_headers.install_headers,
|
||||
'install_clib': install_clib.install_clib,
|
||||
'install': install.install,
|
||||
'bdist_rpm': bdist_rpm.bdist_rpm,
|
||||
}
|
||||
if have_setuptools:
|
||||
# Use our own versions of develop and egg_info to ensure that build_src is
|
||||
# handled appropriately.
|
||||
from numpy.distutils.command import develop, egg_info
|
||||
numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
|
||||
numpy_cmdclass['develop'] = develop.develop
|
||||
numpy_cmdclass['easy_install'] = easy_install.easy_install
|
||||
numpy_cmdclass['egg_info'] = egg_info.egg_info
|
||||
|
||||
def _dict_append(d, **kws):
|
||||
for k, v in kws.items():
|
||||
if k not in d:
|
||||
d[k] = v
|
||||
continue
|
||||
dv = d[k]
|
||||
if isinstance(dv, tuple):
|
||||
d[k] = dv + tuple(v)
|
||||
elif isinstance(dv, list):
|
||||
d[k] = dv + list(v)
|
||||
elif isinstance(dv, dict):
|
||||
_dict_append(dv, **v)
|
||||
elif is_string(dv):
|
||||
d[k] = dv + v
|
||||
else:
|
||||
raise TypeError(repr(type(dv)))
|
||||
|
||||
def _command_line_ok(_cache=None):
|
||||
""" Return True if command line does not contain any
|
||||
help or display requests.
|
||||
"""
|
||||
if _cache:
|
||||
return _cache[0]
|
||||
elif _cache is None:
|
||||
_cache = []
|
||||
ok = True
|
||||
display_opts = ['--'+n for n in Distribution.display_option_names]
|
||||
for o in Distribution.display_options:
|
||||
if o[1]:
|
||||
display_opts.append('-'+o[1])
|
||||
for arg in sys.argv:
|
||||
if arg.startswith('--help') or arg=='-h' or arg in display_opts:
|
||||
ok = False
|
||||
break
|
||||
_cache.append(ok)
|
||||
return ok
|
||||
|
||||
def get_distribution(always=False):
|
||||
dist = distutils.core._setup_distribution
|
||||
# XXX Hack to get numpy installable with easy_install.
|
||||
# The problem is easy_install runs it's own setup(), which
|
||||
# sets up distutils.core._setup_distribution. However,
|
||||
# when our setup() runs, that gets overwritten and lost.
|
||||
# We can't use isinstance, as the DistributionWithoutHelpCommands
|
||||
# class is local to a function in setuptools.command.easy_install
|
||||
if dist is not None and \
|
||||
'DistributionWithoutHelpCommands' in repr(dist):
|
||||
dist = None
|
||||
if always and dist is None:
|
||||
dist = NumpyDistribution()
|
||||
return dist
|
||||
|
||||
def setup(**attr):
|
||||
|
||||
cmdclass = numpy_cmdclass.copy()
|
||||
|
||||
new_attr = attr.copy()
|
||||
if 'cmdclass' in new_attr:
|
||||
cmdclass.update(new_attr['cmdclass'])
|
||||
new_attr['cmdclass'] = cmdclass
|
||||
|
||||
if 'configuration' in new_attr:
|
||||
# To avoid calling configuration if there are any errors
|
||||
# or help request in command in the line.
|
||||
configuration = new_attr.pop('configuration')
|
||||
|
||||
old_dist = distutils.core._setup_distribution
|
||||
old_stop = distutils.core._setup_stop_after
|
||||
distutils.core._setup_distribution = None
|
||||
distutils.core._setup_stop_after = "commandline"
|
||||
try:
|
||||
dist = setup(**new_attr)
|
||||
finally:
|
||||
distutils.core._setup_distribution = old_dist
|
||||
distutils.core._setup_stop_after = old_stop
|
||||
if dist.help or not _command_line_ok():
|
||||
# probably displayed help, skip running any commands
|
||||
return dist
|
||||
|
||||
# create setup dictionary and append to new_attr
|
||||
config = configuration()
|
||||
if hasattr(config, 'todict'):
|
||||
config = config.todict()
|
||||
_dict_append(new_attr, **config)
|
||||
|
||||
# Move extension source libraries to libraries
|
||||
libraries = []
|
||||
for ext in new_attr.get('ext_modules', []):
|
||||
new_libraries = []
|
||||
for item in ext.libraries:
|
||||
if is_sequence(item):
|
||||
lib_name, build_info = item
|
||||
_check_append_ext_library(libraries, lib_name, build_info)
|
||||
new_libraries.append(lib_name)
|
||||
elif is_string(item):
|
||||
new_libraries.append(item)
|
||||
else:
|
||||
raise TypeError("invalid description of extension module "
|
||||
"library %r" % (item,))
|
||||
ext.libraries = new_libraries
|
||||
if libraries:
|
||||
if 'libraries' not in new_attr:
|
||||
new_attr['libraries'] = []
|
||||
for item in libraries:
|
||||
_check_append_library(new_attr['libraries'], item)
|
||||
|
||||
# sources in ext_modules or libraries may contain header files
|
||||
if ('ext_modules' in new_attr or 'libraries' in new_attr) \
|
||||
and 'headers' not in new_attr:
|
||||
new_attr['headers'] = []
|
||||
|
||||
# Use our custom NumpyDistribution class instead of distutils' one
|
||||
new_attr['distclass'] = NumpyDistribution
|
||||
|
||||
return old_setup(**new_attr)
|
||||
|
||||
def _check_append_library(libraries, item):
|
||||
for libitem in libraries:
|
||||
if is_sequence(libitem):
|
||||
if is_sequence(item):
|
||||
if item[0]==libitem[0]:
|
||||
if item[1] is libitem[1]:
|
||||
return
|
||||
warnings.warn("[0] libraries list contains %r with"
|
||||
" different build_info" % (item[0],),
|
||||
stacklevel=2)
|
||||
break
|
||||
else:
|
||||
if item==libitem[0]:
|
||||
warnings.warn("[1] libraries list contains %r with"
|
||||
" no build_info" % (item[0],),
|
||||
stacklevel=2)
|
||||
break
|
||||
else:
|
||||
if is_sequence(item):
|
||||
if item[0]==libitem:
|
||||
warnings.warn("[2] libraries list contains %r with"
|
||||
" no build_info" % (item[0],),
|
||||
stacklevel=2)
|
||||
break
|
||||
else:
|
||||
if item==libitem:
|
||||
return
|
||||
libraries.append(item)
|
||||
|
||||
def _check_append_ext_library(libraries, lib_name, build_info):
|
||||
for item in libraries:
|
||||
if is_sequence(item):
|
||||
if item[0]==lib_name:
|
||||
if item[1] is build_info:
|
||||
return
|
||||
warnings.warn("[3] libraries list contains %r with"
|
||||
" different build_info" % (lib_name,),
|
||||
stacklevel=2)
|
||||
break
|
||||
elif item==lib_name:
|
||||
warnings.warn("[4] libraries list contains %r with"
|
||||
" no build_info" % (lib_name,),
|
||||
stacklevel=2)
|
||||
break
|
||||
libraries.append((lib_name, build_info))
|
683
.venv/Lib/site-packages/numpy/distutils/cpuinfo.py
Normal file
683
.venv/Lib/site-packages/numpy/distutils/cpuinfo.py
Normal file
@ -0,0 +1,683 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
cpuinfo
|
||||
|
||||
Copyright 2002 Pearu Peterson all rights reserved,
|
||||
Pearu Peterson <pearu@cens.ioc.ee>
|
||||
Permission to use, modify, and distribute this software is given under the
|
||||
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
|
||||
this distribution for specifics.
|
||||
|
||||
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
Pearu Peterson
|
||||
|
||||
"""
|
||||
__all__ = ['cpu']
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from subprocess import getstatusoutput
|
||||
|
||||
|
||||
def getoutput(cmd, successful_status=(0,), stacklevel=1):
|
||||
try:
|
||||
status, output = getstatusoutput(cmd)
|
||||
except OSError as e:
|
||||
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
|
||||
return False, ""
|
||||
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
|
||||
return True, output
|
||||
return False, output
|
||||
|
||||
def command_info(successful_status=(0,), stacklevel=1, **kw):
|
||||
info = {}
|
||||
for key in kw:
|
||||
ok, output = getoutput(kw[key], successful_status=successful_status,
|
||||
stacklevel=stacklevel+1)
|
||||
if ok:
|
||||
info[key] = output.strip()
|
||||
return info
|
||||
|
||||
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
|
||||
ok, output = getoutput(cmd, successful_status=successful_status,
|
||||
stacklevel=stacklevel+1)
|
||||
if not ok:
|
||||
return
|
||||
for line in output.splitlines():
|
||||
yield line.strip()
|
||||
|
||||
def key_value_from_command(cmd, sep, successful_status=(0,),
|
||||
stacklevel=1):
|
||||
d = {}
|
||||
for line in command_by_line(cmd, successful_status=successful_status,
|
||||
stacklevel=stacklevel+1):
|
||||
l = [s.strip() for s in line.split(sep, 1)]
|
||||
if len(l) == 2:
|
||||
d[l[0]] = l[1]
|
||||
return d
|
||||
|
||||
class CPUInfoBase:
|
||||
"""Holds CPU information and provides methods for requiring
|
||||
the availability of various CPU features.
|
||||
"""
|
||||
|
||||
def _try_call(self, func):
|
||||
try:
|
||||
return func()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def __getattr__(self, name):
|
||||
if not name.startswith('_'):
|
||||
if hasattr(self, '_'+name):
|
||||
attr = getattr(self, '_'+name)
|
||||
if isinstance(attr, types.MethodType):
|
||||
return lambda func=self._try_call,attr=attr : func(attr)
|
||||
else:
|
||||
return lambda : None
|
||||
raise AttributeError(name)
|
||||
|
||||
def _getNCPUs(self):
|
||||
return 1
|
||||
|
||||
def __get_nbits(self):
|
||||
abits = platform.architecture()[0]
|
||||
nbits = re.compile(r'(\d+)bit').search(abits).group(1)
|
||||
return nbits
|
||||
|
||||
def _is_32bit(self):
|
||||
return self.__get_nbits() == '32'
|
||||
|
||||
def _is_64bit(self):
|
||||
return self.__get_nbits() == '64'
|
||||
|
||||
class LinuxCPUInfo(CPUInfoBase):
|
||||
|
||||
info = None
|
||||
|
||||
def __init__(self):
|
||||
if self.info is not None:
|
||||
return
|
||||
info = [ {} ]
|
||||
ok, output = getoutput('uname -m')
|
||||
if ok:
|
||||
info[0]['uname_m'] = output.strip()
|
||||
try:
|
||||
fo = open('/proc/cpuinfo')
|
||||
except OSError as e:
|
||||
warnings.warn(str(e), UserWarning, stacklevel=2)
|
||||
else:
|
||||
for line in fo:
|
||||
name_value = [s.strip() for s in line.split(':', 1)]
|
||||
if len(name_value) != 2:
|
||||
continue
|
||||
name, value = name_value
|
||||
if not info or name in info[-1]: # next processor
|
||||
info.append({})
|
||||
info[-1][name] = value
|
||||
fo.close()
|
||||
self.__class__.info = info
|
||||
|
||||
def _not_impl(self): pass
|
||||
|
||||
# Athlon
|
||||
|
||||
def _is_AMD(self):
|
||||
return self.info[0]['vendor_id']=='AuthenticAMD'
|
||||
|
||||
def _is_AthlonK6_2(self):
|
||||
return self._is_AMD() and self.info[0]['model'] == '2'
|
||||
|
||||
def _is_AthlonK6_3(self):
|
||||
return self._is_AMD() and self.info[0]['model'] == '3'
|
||||
|
||||
def _is_AthlonK6(self):
|
||||
return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
|
||||
|
||||
def _is_AthlonK7(self):
|
||||
return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
|
||||
|
||||
def _is_AthlonMP(self):
|
||||
return re.match(r'.*?Athlon\(tm\) MP\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_AMD64(self):
|
||||
return self.is_AMD() and self.info[0]['family'] == '15'
|
||||
|
||||
def _is_Athlon64(self):
|
||||
return re.match(r'.*?Athlon\(tm\) 64\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_AthlonHX(self):
|
||||
return re.match(r'.*?Athlon HX\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_Opteron(self):
|
||||
return re.match(r'.*?Opteron\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_Hammer(self):
|
||||
return re.match(r'.*?Hammer\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
# Alpha
|
||||
|
||||
def _is_Alpha(self):
|
||||
return self.info[0]['cpu']=='Alpha'
|
||||
|
||||
def _is_EV4(self):
|
||||
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
|
||||
|
||||
def _is_EV5(self):
|
||||
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
|
||||
|
||||
def _is_EV56(self):
|
||||
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
|
||||
|
||||
def _is_PCA56(self):
|
||||
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
|
||||
|
||||
# Intel
|
||||
|
||||
#XXX
|
||||
_is_i386 = _not_impl
|
||||
|
||||
def _is_Intel(self):
|
||||
return self.info[0]['vendor_id']=='GenuineIntel'
|
||||
|
||||
def _is_i486(self):
|
||||
return self.info[0]['cpu']=='i486'
|
||||
|
||||
def _is_i586(self):
|
||||
return self.is_Intel() and self.info[0]['cpu family'] == '5'
|
||||
|
||||
def _is_i686(self):
|
||||
return self.is_Intel() and self.info[0]['cpu family'] == '6'
|
||||
|
||||
def _is_Celeron(self):
|
||||
return re.match(r'.*?Celeron',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_Pentium(self):
|
||||
return re.match(r'.*?Pentium',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_PentiumII(self):
|
||||
return re.match(r'.*?Pentium.*?II\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_PentiumPro(self):
|
||||
return re.match(r'.*?PentiumPro\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_PentiumMMX(self):
|
||||
return re.match(r'.*?Pentium.*?MMX\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_PentiumIII(self):
|
||||
return re.match(r'.*?Pentium.*?III\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_PentiumIV(self):
|
||||
return re.match(r'.*?Pentium.*?(IV|4)\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_PentiumM(self):
|
||||
return re.match(r'.*?Pentium.*?M\b',
|
||||
self.info[0]['model name']) is not None
|
||||
|
||||
def _is_Prescott(self):
|
||||
return self.is_PentiumIV() and self.has_sse3()
|
||||
|
||||
def _is_Nocona(self):
|
||||
return (self.is_Intel()
|
||||
and (self.info[0]['cpu family'] == '6'
|
||||
or self.info[0]['cpu family'] == '15')
|
||||
and (self.has_sse3() and not self.has_ssse3())
|
||||
and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
|
||||
|
||||
def _is_Core2(self):
|
||||
return (self.is_64bit() and self.is_Intel() and
|
||||
re.match(r'.*?Core\(TM\)2\b',
|
||||
self.info[0]['model name']) is not None)
|
||||
|
||||
def _is_Itanium(self):
|
||||
return re.match(r'.*?Itanium\b',
|
||||
self.info[0]['family']) is not None
|
||||
|
||||
def _is_XEON(self):
|
||||
return re.match(r'.*?XEON\b',
|
||||
self.info[0]['model name'], re.IGNORECASE) is not None
|
||||
|
||||
_is_Xeon = _is_XEON
|
||||
|
||||
# Varia
|
||||
|
||||
def _is_singleCPU(self):
|
||||
return len(self.info) == 1
|
||||
|
||||
def _getNCPUs(self):
|
||||
return len(self.info)
|
||||
|
||||
def _has_fdiv_bug(self):
|
||||
return self.info[0]['fdiv_bug']=='yes'
|
||||
|
||||
def _has_f00f_bug(self):
|
||||
return self.info[0]['f00f_bug']=='yes'
|
||||
|
||||
def _has_mmx(self):
|
||||
return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
|
||||
|
||||
def _has_sse(self):
|
||||
return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
|
||||
|
||||
def _has_sse2(self):
|
||||
return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
|
||||
|
||||
def _has_sse3(self):
|
||||
return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
|
||||
|
||||
def _has_ssse3(self):
|
||||
return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
|
||||
|
||||
def _has_3dnow(self):
|
||||
return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
|
||||
|
||||
def _has_3dnowext(self):
|
||||
return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
|
||||
|
||||
class IRIXCPUInfo(CPUInfoBase):
|
||||
info = None
|
||||
|
||||
def __init__(self):
|
||||
if self.info is not None:
|
||||
return
|
||||
info = key_value_from_command('sysconf', sep=' ',
|
||||
successful_status=(0, 1))
|
||||
self.__class__.info = info
|
||||
|
||||
def _not_impl(self): pass
|
||||
|
||||
def _is_singleCPU(self):
|
||||
return self.info.get('NUM_PROCESSORS') == '1'
|
||||
|
||||
def _getNCPUs(self):
|
||||
return int(self.info.get('NUM_PROCESSORS', 1))
|
||||
|
||||
def __cputype(self, n):
|
||||
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
|
||||
def _is_r2000(self): return self.__cputype(2000)
|
||||
def _is_r3000(self): return self.__cputype(3000)
|
||||
def _is_r3900(self): return self.__cputype(3900)
|
||||
def _is_r4000(self): return self.__cputype(4000)
|
||||
def _is_r4100(self): return self.__cputype(4100)
|
||||
def _is_r4300(self): return self.__cputype(4300)
|
||||
def _is_r4400(self): return self.__cputype(4400)
|
||||
def _is_r4600(self): return self.__cputype(4600)
|
||||
def _is_r4650(self): return self.__cputype(4650)
|
||||
def _is_r5000(self): return self.__cputype(5000)
|
||||
def _is_r6000(self): return self.__cputype(6000)
|
||||
def _is_r8000(self): return self.__cputype(8000)
|
||||
def _is_r10000(self): return self.__cputype(10000)
|
||||
def _is_r12000(self): return self.__cputype(12000)
|
||||
def _is_rorion(self): return self.__cputype('orion')
|
||||
|
||||
def get_ip(self):
|
||||
try: return self.info.get('MACHINE')
|
||||
except Exception: pass
|
||||
def __machine(self, n):
|
||||
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
|
||||
def _is_IP19(self): return self.__machine(19)
|
||||
def _is_IP20(self): return self.__machine(20)
|
||||
def _is_IP21(self): return self.__machine(21)
|
||||
def _is_IP22(self): return self.__machine(22)
|
||||
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
|
||||
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
|
||||
def _is_IP24(self): return self.__machine(24)
|
||||
def _is_IP25(self): return self.__machine(25)
|
||||
def _is_IP26(self): return self.__machine(26)
|
||||
def _is_IP27(self): return self.__machine(27)
|
||||
def _is_IP28(self): return self.__machine(28)
|
||||
def _is_IP30(self): return self.__machine(30)
|
||||
def _is_IP32(self): return self.__machine(32)
|
||||
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
|
||||
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
|
||||
|
||||
|
||||
class DarwinCPUInfo(CPUInfoBase):
|
||||
info = None
|
||||
|
||||
def __init__(self):
|
||||
if self.info is not None:
|
||||
return
|
||||
info = command_info(arch='arch',
|
||||
machine='machine')
|
||||
info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
|
||||
self.__class__.info = info
|
||||
|
||||
def _not_impl(self): pass
|
||||
|
||||
def _getNCPUs(self):
|
||||
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
|
||||
|
||||
def _is_Power_Macintosh(self):
|
||||
return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
|
||||
|
||||
def _is_i386(self):
|
||||
return self.info['arch']=='i386'
|
||||
def _is_ppc(self):
|
||||
return self.info['arch']=='ppc'
|
||||
|
||||
def __machine(self, n):
|
||||
return self.info['machine'] == 'ppc%s'%n
|
||||
def _is_ppc601(self): return self.__machine(601)
|
||||
def _is_ppc602(self): return self.__machine(602)
|
||||
def _is_ppc603(self): return self.__machine(603)
|
||||
def _is_ppc603e(self): return self.__machine('603e')
|
||||
def _is_ppc604(self): return self.__machine(604)
|
||||
def _is_ppc604e(self): return self.__machine('604e')
|
||||
def _is_ppc620(self): return self.__machine(620)
|
||||
def _is_ppc630(self): return self.__machine(630)
|
||||
def _is_ppc740(self): return self.__machine(740)
|
||||
def _is_ppc7400(self): return self.__machine(7400)
|
||||
def _is_ppc7450(self): return self.__machine(7450)
|
||||
def _is_ppc750(self): return self.__machine(750)
|
||||
def _is_ppc403(self): return self.__machine(403)
|
||||
def _is_ppc505(self): return self.__machine(505)
|
||||
def _is_ppc801(self): return self.__machine(801)
|
||||
def _is_ppc821(self): return self.__machine(821)
|
||||
def _is_ppc823(self): return self.__machine(823)
|
||||
def _is_ppc860(self): return self.__machine(860)
|
||||
|
||||
|
||||
class SunOSCPUInfo(CPUInfoBase):
|
||||
|
||||
info = None
|
||||
|
||||
def __init__(self):
|
||||
if self.info is not None:
|
||||
return
|
||||
info = command_info(arch='arch',
|
||||
mach='mach',
|
||||
uname_i='uname_i',
|
||||
isainfo_b='isainfo -b',
|
||||
isainfo_n='isainfo -n',
|
||||
)
|
||||
info['uname_X'] = key_value_from_command('uname -X', sep='=')
|
||||
for line in command_by_line('psrinfo -v 0'):
|
||||
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
|
||||
if m:
|
||||
info['processor'] = m.group('p')
|
||||
break
|
||||
self.__class__.info = info
|
||||
|
||||
def _not_impl(self): pass
|
||||
|
||||
def _is_i386(self):
|
||||
return self.info['isainfo_n']=='i386'
|
||||
def _is_sparc(self):
|
||||
return self.info['isainfo_n']=='sparc'
|
||||
def _is_sparcv9(self):
|
||||
return self.info['isainfo_n']=='sparcv9'
|
||||
|
||||
def _getNCPUs(self):
|
||||
return int(self.info['uname_X'].get('NumCPU', 1))
|
||||
|
||||
def _is_sun4(self):
|
||||
return self.info['arch']=='sun4'
|
||||
|
||||
def _is_SUNW(self):
|
||||
return re.match(r'SUNW', self.info['uname_i']) is not None
|
||||
def _is_sparcstation5(self):
|
||||
return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
|
||||
def _is_ultra1(self):
|
||||
return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
|
||||
def _is_ultra250(self):
|
||||
return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
|
||||
def _is_ultra2(self):
|
||||
return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
|
||||
def _is_ultra30(self):
|
||||
return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
|
||||
def _is_ultra4(self):
|
||||
return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
|
||||
def _is_ultra5_10(self):
|
||||
return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
|
||||
def _is_ultra5(self):
|
||||
return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
|
||||
def _is_ultra60(self):
|
||||
return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
|
||||
def _is_ultra80(self):
|
||||
return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
|
||||
def _is_ultraenterprice(self):
|
||||
return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
|
||||
def _is_ultraenterprice10k(self):
|
||||
return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
|
||||
def _is_sunfire(self):
|
||||
return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
|
||||
def _is_ultra(self):
|
||||
return re.match(r'.*Ultra', self.info['uname_i']) is not None
|
||||
|
||||
def _is_cpusparcv7(self):
|
||||
return self.info['processor']=='sparcv7'
|
||||
def _is_cpusparcv8(self):
|
||||
return self.info['processor']=='sparcv8'
|
||||
def _is_cpusparcv9(self):
|
||||
return self.info['processor']=='sparcv9'
|
||||
|
||||
class Win32CPUInfo(CPUInfoBase):
|
||||
|
||||
info = None
|
||||
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
|
||||
# XXX: what does the value of
|
||||
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
|
||||
# mean?
|
||||
|
||||
def __init__(self):
|
||||
if self.info is not None:
|
||||
return
|
||||
info = []
|
||||
try:
|
||||
#XXX: Bad style to use so long `try:...except:...`. Fix it!
|
||||
import winreg
|
||||
|
||||
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"
|
||||
r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
|
||||
chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
|
||||
pnum=0
|
||||
while True:
|
||||
try:
|
||||
proc=winreg.EnumKey(chnd, pnum)
|
||||
except winreg.error:
|
||||
break
|
||||
else:
|
||||
pnum+=1
|
||||
info.append({"Processor":proc})
|
||||
phnd=winreg.OpenKey(chnd, proc)
|
||||
pidx=0
|
||||
while True:
|
||||
try:
|
||||
name, value, vtpe=winreg.EnumValue(phnd, pidx)
|
||||
except winreg.error:
|
||||
break
|
||||
else:
|
||||
pidx=pidx+1
|
||||
info[-1][name]=value
|
||||
if name=="Identifier":
|
||||
srch=prgx.search(value)
|
||||
if srch:
|
||||
info[-1]["Family"]=int(srch.group("FML"))
|
||||
info[-1]["Model"]=int(srch.group("MDL"))
|
||||
info[-1]["Stepping"]=int(srch.group("STP"))
|
||||
except Exception as e:
|
||||
print(e, '(ignoring)')
|
||||
self.__class__.info = info
|
||||
|
||||
def _not_impl(self): pass
|
||||
|
||||
# Athlon
|
||||
|
||||
def _is_AMD(self):
|
||||
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
|
||||
|
||||
def _is_Am486(self):
|
||||
return self.is_AMD() and self.info[0]['Family']==4
|
||||
|
||||
def _is_Am5x86(self):
|
||||
return self.is_AMD() and self.info[0]['Family']==4
|
||||
|
||||
def _is_AMDK5(self):
|
||||
return self.is_AMD() and self.info[0]['Family']==5 \
|
||||
and self.info[0]['Model'] in [0, 1, 2, 3]
|
||||
|
||||
def _is_AMDK6(self):
|
||||
return self.is_AMD() and self.info[0]['Family']==5 \
|
||||
and self.info[0]['Model'] in [6, 7]
|
||||
|
||||
def _is_AMDK6_2(self):
|
||||
return self.is_AMD() and self.info[0]['Family']==5 \
|
||||
and self.info[0]['Model']==8
|
||||
|
||||
def _is_AMDK6_3(self):
|
||||
return self.is_AMD() and self.info[0]['Family']==5 \
|
||||
and self.info[0]['Model']==9
|
||||
|
||||
def _is_AMDK7(self):
|
||||
return self.is_AMD() and self.info[0]['Family'] == 6
|
||||
|
||||
# To reliably distinguish between the different types of AMD64 chips
|
||||
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
|
||||
# require looking at the 'brand' from cpuid
|
||||
|
||||
def _is_AMD64(self):
|
||||
return self.is_AMD() and self.info[0]['Family'] == 15
|
||||
|
||||
# Intel
|
||||
|
||||
def _is_Intel(self):
|
||||
return self.info[0]['VendorIdentifier']=='GenuineIntel'
|
||||
|
||||
def _is_i386(self):
|
||||
return self.info[0]['Family']==3
|
||||
|
||||
def _is_i486(self):
|
||||
return self.info[0]['Family']==4
|
||||
|
||||
def _is_i586(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==5
|
||||
|
||||
def _is_i686(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==6
|
||||
|
||||
def _is_Pentium(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==5
|
||||
|
||||
def _is_PentiumMMX(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==5 \
|
||||
and self.info[0]['Model']==4
|
||||
|
||||
def _is_PentiumPro(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==6 \
|
||||
and self.info[0]['Model']==1
|
||||
|
||||
def _is_PentiumII(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==6 \
|
||||
and self.info[0]['Model'] in [3, 5, 6]
|
||||
|
||||
def _is_PentiumIII(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==6 \
|
||||
and self.info[0]['Model'] in [7, 8, 9, 10, 11]
|
||||
|
||||
def _is_PentiumIV(self):
|
||||
return self.is_Intel() and self.info[0]['Family']==15
|
||||
|
||||
def _is_PentiumM(self):
|
||||
return self.is_Intel() and self.info[0]['Family'] == 6 \
|
||||
and self.info[0]['Model'] in [9, 13, 14]
|
||||
|
||||
def _is_Core2(self):
|
||||
return self.is_Intel() and self.info[0]['Family'] == 6 \
|
||||
and self.info[0]['Model'] in [15, 16, 17]
|
||||
|
||||
# Varia
|
||||
|
||||
def _is_singleCPU(self):
|
||||
return len(self.info) == 1
|
||||
|
||||
def _getNCPUs(self):
|
||||
return len(self.info)
|
||||
|
||||
def _has_mmx(self):
|
||||
if self.is_Intel():
|
||||
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
|
||||
or (self.info[0]['Family'] in [6, 15])
|
||||
elif self.is_AMD():
|
||||
return self.info[0]['Family'] in [5, 6, 15]
|
||||
else:
|
||||
return False
|
||||
|
||||
def _has_sse(self):
|
||||
if self.is_Intel():
|
||||
return ((self.info[0]['Family']==6 and
|
||||
self.info[0]['Model'] in [7, 8, 9, 10, 11])
|
||||
or self.info[0]['Family']==15)
|
||||
elif self.is_AMD():
|
||||
return ((self.info[0]['Family']==6 and
|
||||
self.info[0]['Model'] in [6, 7, 8, 10])
|
||||
or self.info[0]['Family']==15)
|
||||
else:
|
||||
return False
|
||||
|
||||
def _has_sse2(self):
|
||||
if self.is_Intel():
|
||||
return self.is_Pentium4() or self.is_PentiumM() \
|
||||
or self.is_Core2()
|
||||
elif self.is_AMD():
|
||||
return self.is_AMD64()
|
||||
else:
|
||||
return False
|
||||
|
||||
def _has_3dnow(self):
|
||||
return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
|
||||
|
||||
def _has_3dnowext(self):
|
||||
return self.is_AMD() and self.info[0]['Family'] in [6, 15]
|
||||
|
||||
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
|
||||
cpuinfo = LinuxCPUInfo
|
||||
elif sys.platform.startswith('irix'):
|
||||
cpuinfo = IRIXCPUInfo
|
||||
elif sys.platform == 'darwin':
|
||||
cpuinfo = DarwinCPUInfo
|
||||
elif sys.platform.startswith('sunos'):
|
||||
cpuinfo = SunOSCPUInfo
|
||||
elif sys.platform.startswith('win32'):
|
||||
cpuinfo = Win32CPUInfo
|
||||
elif sys.platform.startswith('cygwin'):
|
||||
cpuinfo = LinuxCPUInfo
|
||||
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
|
||||
else:
|
||||
cpuinfo = CPUInfoBase
|
||||
|
||||
cpu = cpuinfo()
|
||||
|
||||
#if __name__ == "__main__":
|
||||
#
|
||||
# cpu.is_blaa()
|
||||
# cpu.is_Intel()
|
||||
# cpu.is_Alpha()
|
||||
#
|
||||
# print('CPU information:'),
|
||||
# for name in dir(cpuinfo):
|
||||
# if name[0]=='_' and name[1]!='_':
|
||||
# r = getattr(cpu,name[1:])()
|
||||
# if r:
|
||||
# if r!=1:
|
||||
# print('%s=%s' %(name[1:],r))
|
||||
# else:
|
||||
# print(name[1:]),
|
||||
# print()
|
316
.venv/Lib/site-packages/numpy/distutils/exec_command.py
Normal file
316
.venv/Lib/site-packages/numpy/distutils/exec_command.py
Normal file
@ -0,0 +1,316 @@
|
||||
"""
|
||||
exec_command
|
||||
|
||||
Implements exec_command function that is (almost) equivalent to
|
||||
commands.getstatusoutput function but on NT, DOS systems the
|
||||
returned status is actually correct (though, the returned status
|
||||
values may be different by a factor). In addition, exec_command
|
||||
takes keyword arguments for (re-)defining environment variables.
|
||||
|
||||
Provides functions:
|
||||
|
||||
exec_command --- execute command in a specified directory and
|
||||
in the modified environment.
|
||||
find_executable --- locate a command using info from environment
|
||||
variable PATH. Equivalent to posix `which`
|
||||
command.
|
||||
|
||||
Author: Pearu Peterson <pearu@cens.ioc.ee>
|
||||
Created: 11 January 2003
|
||||
|
||||
Requires: Python 2.x
|
||||
|
||||
Successfully tested on:
|
||||
|
||||
======== ============ =================================================
|
||||
os.name sys.platform comments
|
||||
======== ============ =================================================
|
||||
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
|
||||
PyCrust 0.9.3, Idle 1.0.2
|
||||
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
|
||||
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
|
||||
posix darwin Darwin 7.2.0, Python 2.3
|
||||
nt win32 Windows Me
|
||||
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
|
||||
Python 2.1.1 Idle 0.8
|
||||
nt win32 Windows 98, Python 2.1.1. Idle 0.8
|
||||
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
|
||||
fail i.e. redefining environment variables may
|
||||
not work. FIXED: don't use cygwin echo!
|
||||
Comment: also `cmd /c echo` will not work
|
||||
but redefining environment variables do work.
|
||||
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
|
||||
nt win32 Windows XP, Python 2.3.3
|
||||
======== ============ =================================================
|
||||
|
||||
Known bugs:
|
||||
|
||||
* Tests, that send messages to stderr, fail when executed from MSYS prompt
|
||||
because the messages are lost at some point.
|
||||
|
||||
"""
|
||||
__all__ = ['exec_command', 'find_executable']
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import locale
|
||||
import warnings
|
||||
|
||||
from numpy.distutils.misc_util import is_sequence, make_temp_file
|
||||
from numpy.distutils import log
|
||||
|
||||
def filepath_from_subprocess_output(output):
|
||||
"""
|
||||
Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
|
||||
|
||||
Inherited from `exec_command`, and possibly incorrect.
|
||||
"""
|
||||
mylocale = locale.getpreferredencoding(False)
|
||||
if mylocale is None:
|
||||
mylocale = 'ascii'
|
||||
output = output.decode(mylocale, errors='replace')
|
||||
output = output.replace('\r\n', '\n')
|
||||
# Another historical oddity
|
||||
if output[-1:] == '\n':
|
||||
output = output[:-1]
|
||||
return output
|
||||
|
||||
|
||||
def forward_bytes_to_stdout(val):
|
||||
"""
|
||||
Forward bytes from a subprocess call to the console, without attempting to
|
||||
decode them.
|
||||
|
||||
The assumption is that the subprocess call already returned bytes in
|
||||
a suitable encoding.
|
||||
"""
|
||||
if hasattr(sys.stdout, 'buffer'):
|
||||
# use the underlying binary output if there is one
|
||||
sys.stdout.buffer.write(val)
|
||||
elif hasattr(sys.stdout, 'encoding'):
|
||||
# round-trip the encoding if necessary
|
||||
sys.stdout.write(val.decode(sys.stdout.encoding))
|
||||
else:
|
||||
# make a best-guess at the encoding
|
||||
sys.stdout.write(val.decode('utf8', errors='replace'))
|
||||
|
||||
|
||||
def temp_file_name():
|
||||
# 2019-01-30, 1.17
|
||||
warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
|
||||
'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
|
||||
fo, name = make_temp_file()
|
||||
fo.close()
|
||||
return name
|
||||
|
||||
def get_pythonexe():
|
||||
pythonexe = sys.executable
|
||||
if os.name in ['nt', 'dos']:
|
||||
fdir, fn = os.path.split(pythonexe)
|
||||
fn = fn.upper().replace('PYTHONW', 'PYTHON')
|
||||
pythonexe = os.path.join(fdir, fn)
|
||||
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
|
||||
return pythonexe
|
||||
|
||||
def find_executable(exe, path=None, _cache={}):
|
||||
"""Return full path of a executable or None.
|
||||
|
||||
Symbolic links are not followed.
|
||||
"""
|
||||
key = exe, path
|
||||
try:
|
||||
return _cache[key]
|
||||
except KeyError:
|
||||
pass
|
||||
log.debug('find_executable(%r)' % exe)
|
||||
orig_exe = exe
|
||||
|
||||
if path is None:
|
||||
path = os.environ.get('PATH', os.defpath)
|
||||
if os.name=='posix':
|
||||
realpath = os.path.realpath
|
||||
else:
|
||||
realpath = lambda a:a
|
||||
|
||||
if exe.startswith('"'):
|
||||
exe = exe[1:-1]
|
||||
|
||||
suffixes = ['']
|
||||
if os.name in ['nt', 'dos', 'os2']:
|
||||
fn, ext = os.path.splitext(exe)
|
||||
extra_suffixes = ['.exe', '.com', '.bat']
|
||||
if ext.lower() not in extra_suffixes:
|
||||
suffixes = extra_suffixes
|
||||
|
||||
if os.path.isabs(exe):
|
||||
paths = ['']
|
||||
else:
|
||||
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
|
||||
|
||||
for path in paths:
|
||||
fn = os.path.join(path, exe)
|
||||
for s in suffixes:
|
||||
f_ext = fn+s
|
||||
if not os.path.islink(f_ext):
|
||||
f_ext = realpath(f_ext)
|
||||
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
|
||||
log.info('Found executable %s' % f_ext)
|
||||
_cache[key] = f_ext
|
||||
return f_ext
|
||||
|
||||
log.warn('Could not locate executable %s' % orig_exe)
|
||||
return None
|
||||
|
||||
############################################################
|
||||
|
||||
def _preserve_environment( names ):
|
||||
log.debug('_preserve_environment(%r)' % (names))
|
||||
env = {name: os.environ.get(name) for name in names}
|
||||
return env
|
||||
|
||||
def _update_environment( **env ):
|
||||
log.debug('_update_environment(...)')
|
||||
for name, value in env.items():
|
||||
os.environ[name] = value or ''
|
||||
|
||||
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
|
||||
_with_python = 1, **env ):
|
||||
"""
|
||||
Return (status,output) of executed command.
|
||||
|
||||
.. deprecated:: 1.17
|
||||
Use subprocess.Popen instead
|
||||
|
||||
Parameters
|
||||
----------
|
||||
command : str
|
||||
A concatenated string of executable and arguments.
|
||||
execute_in : str
|
||||
Before running command ``cd execute_in`` and after ``cd -``.
|
||||
use_shell : {bool, None}, optional
|
||||
If True, execute ``sh -c command``. Default None (True)
|
||||
use_tee : {bool, None}, optional
|
||||
If True use tee. Default None (True)
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : str
|
||||
Both stdout and stderr messages.
|
||||
|
||||
Notes
|
||||
-----
|
||||
On NT, DOS systems the returned status is correct for external commands.
|
||||
Wild cards will not work for non-posix systems or when use_shell=0.
|
||||
|
||||
"""
|
||||
# 2019-01-30, 1.17
|
||||
warnings.warn('exec_command is deprecated since NumPy v1.17, use '
|
||||
'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
|
||||
log.debug('exec_command(%r,%s)' % (command,
|
||||
','.join(['%s=%r'%kv for kv in env.items()])))
|
||||
|
||||
if use_tee is None:
|
||||
use_tee = os.name=='posix'
|
||||
if use_shell is None:
|
||||
use_shell = os.name=='posix'
|
||||
execute_in = os.path.abspath(execute_in)
|
||||
oldcwd = os.path.abspath(os.getcwd())
|
||||
|
||||
if __name__[-12:] == 'exec_command':
|
||||
exec_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
elif os.path.isfile('exec_command.py'):
|
||||
exec_dir = os.path.abspath('.')
|
||||
else:
|
||||
exec_dir = os.path.abspath(sys.argv[0])
|
||||
if os.path.isfile(exec_dir):
|
||||
exec_dir = os.path.dirname(exec_dir)
|
||||
|
||||
if oldcwd!=execute_in:
|
||||
os.chdir(execute_in)
|
||||
log.debug('New cwd: %s' % execute_in)
|
||||
else:
|
||||
log.debug('Retaining cwd: %s' % oldcwd)
|
||||
|
||||
oldenv = _preserve_environment( list(env.keys()) )
|
||||
_update_environment( **env )
|
||||
|
||||
try:
|
||||
st = _exec_command(command,
|
||||
use_shell=use_shell,
|
||||
use_tee=use_tee,
|
||||
**env)
|
||||
finally:
|
||||
if oldcwd!=execute_in:
|
||||
os.chdir(oldcwd)
|
||||
log.debug('Restored cwd to %s' % oldcwd)
|
||||
_update_environment(**oldenv)
|
||||
|
||||
return st
|
||||
|
||||
|
||||
def _exec_command(command, use_shell=None, use_tee = None, **env):
|
||||
"""
|
||||
Internal workhorse for exec_command().
|
||||
"""
|
||||
if use_shell is None:
|
||||
use_shell = os.name=='posix'
|
||||
if use_tee is None:
|
||||
use_tee = os.name=='posix'
|
||||
|
||||
if os.name == 'posix' and use_shell:
|
||||
# On POSIX, subprocess always uses /bin/sh, override
|
||||
sh = os.environ.get('SHELL', '/bin/sh')
|
||||
if is_sequence(command):
|
||||
command = [sh, '-c', ' '.join(command)]
|
||||
else:
|
||||
command = [sh, '-c', command]
|
||||
use_shell = False
|
||||
|
||||
elif os.name == 'nt' and is_sequence(command):
|
||||
# On Windows, join the string for CreateProcess() ourselves as
|
||||
# subprocess does it a bit differently
|
||||
command = ' '.join(_quote_arg(arg) for arg in command)
|
||||
|
||||
# Inherit environment by default
|
||||
env = env or None
|
||||
try:
|
||||
# universal_newlines is set to False so that communicate()
|
||||
# will return bytes. We need to decode the output ourselves
|
||||
# so that Python will not raise a UnicodeDecodeError when
|
||||
# it encounters an invalid character; rather, we simply replace it
|
||||
proc = subprocess.Popen(command, shell=use_shell, env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=False)
|
||||
except OSError:
|
||||
# Return 127, as os.spawn*() and /bin/sh do
|
||||
return 127, ''
|
||||
|
||||
text, err = proc.communicate()
|
||||
mylocale = locale.getpreferredencoding(False)
|
||||
if mylocale is None:
|
||||
mylocale = 'ascii'
|
||||
text = text.decode(mylocale, errors='replace')
|
||||
text = text.replace('\r\n', '\n')
|
||||
# Another historical oddity
|
||||
if text[-1:] == '\n':
|
||||
text = text[:-1]
|
||||
|
||||
if use_tee and text:
|
||||
print(text)
|
||||
return proc.returncode, text
|
||||
|
||||
|
||||
def _quote_arg(arg):
|
||||
"""
|
||||
Quote the argument for safe use in a shell command line.
|
||||
"""
|
||||
# If there is a quote in the string, assume relevants parts of the
|
||||
# string are already quoted (e.g. '-I"C:\\Program Files\\..."')
|
||||
if '"' not in arg and ' ' in arg:
|
||||
return '"%s"' % arg
|
||||
return arg
|
||||
|
||||
############################################################
|
107
.venv/Lib/site-packages/numpy/distutils/extension.py
Normal file
107
.venv/Lib/site-packages/numpy/distutils/extension.py
Normal file
@ -0,0 +1,107 @@
|
||||
"""distutils.extension
|
||||
|
||||
Provides the Extension class, used to describe C/C++ extension
|
||||
modules in setup scripts.
|
||||
|
||||
Overridden to support f2py.
|
||||
|
||||
"""
|
||||
import re
|
||||
from distutils.extension import Extension as old_Extension
|
||||
|
||||
|
||||
cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
|
||||
fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
|
||||
|
||||
|
||||
class Extension(old_Extension):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
Extension name.
|
||||
sources : list of str
|
||||
List of source file locations relative to the top directory of
|
||||
the package.
|
||||
extra_compile_args : list of str
|
||||
Extra command line arguments to pass to the compiler.
|
||||
extra_f77_compile_args : list of str
|
||||
Extra command line arguments to pass to the fortran77 compiler.
|
||||
extra_f90_compile_args : list of str
|
||||
Extra command line arguments to pass to the fortran90 compiler.
|
||||
"""
|
||||
def __init__(
|
||||
self, name, sources,
|
||||
include_dirs=None,
|
||||
define_macros=None,
|
||||
undef_macros=None,
|
||||
library_dirs=None,
|
||||
libraries=None,
|
||||
runtime_library_dirs=None,
|
||||
extra_objects=None,
|
||||
extra_compile_args=None,
|
||||
extra_link_args=None,
|
||||
export_symbols=None,
|
||||
swig_opts=None,
|
||||
depends=None,
|
||||
language=None,
|
||||
f2py_options=None,
|
||||
module_dirs=None,
|
||||
extra_c_compile_args=None,
|
||||
extra_cxx_compile_args=None,
|
||||
extra_f77_compile_args=None,
|
||||
extra_f90_compile_args=None,):
|
||||
|
||||
old_Extension.__init__(
|
||||
self, name, [],
|
||||
include_dirs=include_dirs,
|
||||
define_macros=define_macros,
|
||||
undef_macros=undef_macros,
|
||||
library_dirs=library_dirs,
|
||||
libraries=libraries,
|
||||
runtime_library_dirs=runtime_library_dirs,
|
||||
extra_objects=extra_objects,
|
||||
extra_compile_args=extra_compile_args,
|
||||
extra_link_args=extra_link_args,
|
||||
export_symbols=export_symbols)
|
||||
|
||||
# Avoid assert statements checking that sources contains strings:
|
||||
self.sources = sources
|
||||
|
||||
# Python 2.4 distutils new features
|
||||
self.swig_opts = swig_opts or []
|
||||
# swig_opts is assumed to be a list. Here we handle the case where it
|
||||
# is specified as a string instead.
|
||||
if isinstance(self.swig_opts, str):
|
||||
import warnings
|
||||
msg = "swig_opts is specified as a string instead of a list"
|
||||
warnings.warn(msg, SyntaxWarning, stacklevel=2)
|
||||
self.swig_opts = self.swig_opts.split()
|
||||
|
||||
# Python 2.3 distutils new features
|
||||
self.depends = depends or []
|
||||
self.language = language
|
||||
|
||||
# numpy_distutils features
|
||||
self.f2py_options = f2py_options or []
|
||||
self.module_dirs = module_dirs or []
|
||||
self.extra_c_compile_args = extra_c_compile_args or []
|
||||
self.extra_cxx_compile_args = extra_cxx_compile_args or []
|
||||
self.extra_f77_compile_args = extra_f77_compile_args or []
|
||||
self.extra_f90_compile_args = extra_f90_compile_args or []
|
||||
|
||||
return
|
||||
|
||||
def has_cxx_sources(self):
|
||||
for source in self.sources:
|
||||
if cxx_ext_re(str(source)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_f2py_sources(self):
|
||||
for source in self.sources:
|
||||
if fortran_pyf_ext_re(source):
|
||||
return True
|
||||
return False
|
||||
|
||||
# class Extension
|
1024
.venv/Lib/site-packages/numpy/distutils/fcompiler/__init__.py
Normal file
1024
.venv/Lib/site-packages/numpy/distutils/fcompiler/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
156
.venv/Lib/site-packages/numpy/distutils/fcompiler/absoft.py
Normal file
156
.venv/Lib/site-packages/numpy/distutils/fcompiler/absoft.py
Normal file
@ -0,0 +1,156 @@
|
||||
|
||||
# http://www.absoft.com/literature/osxuserguide.pdf
|
||||
# http://www.absoft.com/documentation.html
|
||||
|
||||
# Notes:
|
||||
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
|
||||
# generated extension modules (works for f2py v2.45.241_1936 and up)
|
||||
import os
|
||||
|
||||
from numpy.distutils.cpuinfo import cpu
|
||||
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
|
||||
from numpy.distutils.misc_util import cyg2win32
|
||||
|
||||
compilers = ['AbsoftFCompiler']
|
||||
|
||||
class AbsoftFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'absoft'
|
||||
description = 'Absoft Corp Fortran Compiler'
|
||||
#version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
|
||||
version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
|
||||
r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
|
||||
|
||||
# on windows: f90 -V -c dummy.f
|
||||
# f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
|
||||
|
||||
# samt5735(8)$ f90 -V -c dummy.f
|
||||
# f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
|
||||
# Note that fink installs g77 as f77, so need to use f90 for detection.
|
||||
|
||||
executables = {
|
||||
'version_cmd' : None, # set by update_executables
|
||||
'compiler_f77' : ["f77"],
|
||||
'compiler_fix' : ["f90"],
|
||||
'compiler_f90' : ["f90"],
|
||||
'linker_so' : ["<F90>"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
if os.name=='nt':
|
||||
library_switch = '/out:' #No space after /out:!
|
||||
|
||||
module_dir_switch = None
|
||||
module_include_switch = '-p'
|
||||
|
||||
def update_executables(self):
|
||||
f = cyg2win32(dummy_fortran_file())
|
||||
self.executables['version_cmd'] = ['<F90>', '-V', '-c',
|
||||
f+'.f', '-o', f+'.o']
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
if os.name=='nt':
|
||||
opt = ['/dll']
|
||||
# The "-K shared" switches are being left in for pre-9.0 versions
|
||||
# of Absoft though I don't think versions earlier than 9 can
|
||||
# actually be used to build shared libraries. In fact, version
|
||||
# 8 of Absoft doesn't recognize "-K shared" and will fail.
|
||||
elif self.get_version() >= '9.0':
|
||||
opt = ['-shared']
|
||||
else:
|
||||
opt = ["-K", "shared"]
|
||||
return opt
|
||||
|
||||
def library_dir_option(self, dir):
|
||||
if os.name=='nt':
|
||||
return ['-link', '/PATH:%s' % (dir)]
|
||||
return "-L" + dir
|
||||
|
||||
def library_option(self, lib):
|
||||
if os.name=='nt':
|
||||
return '%s.lib' % (lib)
|
||||
return "-l" + lib
|
||||
|
||||
def get_library_dirs(self):
|
||||
opt = FCompiler.get_library_dirs(self)
|
||||
d = os.environ.get('ABSOFT')
|
||||
if d:
|
||||
if self.get_version() >= '10.0':
|
||||
# use shared libraries, the static libraries were not compiled -fPIC
|
||||
prefix = 'sh'
|
||||
else:
|
||||
prefix = ''
|
||||
if cpu.is_64bit():
|
||||
suffix = '64'
|
||||
else:
|
||||
suffix = ''
|
||||
opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
|
||||
return opt
|
||||
|
||||
def get_libraries(self):
|
||||
opt = FCompiler.get_libraries(self)
|
||||
if self.get_version() >= '11.0':
|
||||
opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
|
||||
elif self.get_version() >= '10.0':
|
||||
opt.extend(['af90math', 'afio', 'af77math', 'U77'])
|
||||
elif self.get_version() >= '8.0':
|
||||
opt.extend(['f90math', 'fio', 'f77math', 'U77'])
|
||||
else:
|
||||
opt.extend(['fio', 'f90math', 'fmath', 'U77'])
|
||||
if os.name =='nt':
|
||||
opt.append('COMDLG32')
|
||||
return opt
|
||||
|
||||
def get_flags(self):
|
||||
opt = FCompiler.get_flags(self)
|
||||
if os.name != 'nt':
|
||||
opt.extend(['-s'])
|
||||
if self.get_version():
|
||||
if self.get_version()>='8.2':
|
||||
opt.append('-fpic')
|
||||
return opt
|
||||
|
||||
def get_flags_f77(self):
|
||||
opt = FCompiler.get_flags_f77(self)
|
||||
opt.extend(['-N22', '-N90', '-N110'])
|
||||
v = self.get_version()
|
||||
if os.name == 'nt':
|
||||
if v and v>='8.0':
|
||||
opt.extend(['-f', '-N15'])
|
||||
else:
|
||||
opt.append('-f')
|
||||
if v:
|
||||
if v<='4.6':
|
||||
opt.append('-B108')
|
||||
else:
|
||||
# Though -N15 is undocumented, it works with
|
||||
# Absoft 8.0 on Linux
|
||||
opt.append('-N15')
|
||||
return opt
|
||||
|
||||
def get_flags_f90(self):
|
||||
opt = FCompiler.get_flags_f90(self)
|
||||
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
|
||||
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
|
||||
if self.get_version():
|
||||
if self.get_version()>'4.6':
|
||||
opt.extend(["-YDEALLOC=ALL"])
|
||||
return opt
|
||||
|
||||
def get_flags_fix(self):
|
||||
opt = FCompiler.get_flags_fix(self)
|
||||
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
|
||||
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
|
||||
opt.extend(["-f", "fixed"])
|
||||
return opt
|
||||
|
||||
def get_flags_opt(self):
|
||||
opt = ['-O']
|
||||
return opt
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='absoft').get_version())
|
73
.venv/Lib/site-packages/numpy/distutils/fcompiler/arm.py
Normal file
73
.venv/Lib/site-packages/numpy/distutils/fcompiler/arm.py
Normal file
@ -0,0 +1,73 @@
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import sys
|
||||
|
||||
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
|
||||
from sys import platform
|
||||
from os.path import join, dirname, normpath
|
||||
|
||||
compilers = ['ArmFlangCompiler']
|
||||
|
||||
import functools
|
||||
|
||||
class ArmFlangCompiler(FCompiler):
|
||||
compiler_type = 'arm'
|
||||
description = 'Arm Compiler'
|
||||
version_pattern = r'\s*Arm.*version (?P<version>[\d.-]+).*'
|
||||
|
||||
ar_exe = 'lib.exe'
|
||||
possible_executables = ['armflang']
|
||||
|
||||
executables = {
|
||||
'version_cmd': ["", "--version"],
|
||||
'compiler_f77': ["armflang", "-fPIC"],
|
||||
'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"],
|
||||
'compiler_f90': ["armflang", "-fPIC"],
|
||||
'linker_so': ["armflang", "-fPIC", "-shared"],
|
||||
'archiver': ["ar", "-cr"],
|
||||
'ranlib': None
|
||||
}
|
||||
|
||||
pic_flags = ["-fPIC", "-DPIC"]
|
||||
c_compiler = 'arm'
|
||||
module_dir_switch = '-module ' # Don't remove ending space!
|
||||
|
||||
def get_libraries(self):
|
||||
opt = FCompiler.get_libraries(self)
|
||||
opt.extend(['flang', 'flangrti', 'ompstub'])
|
||||
return opt
|
||||
|
||||
@functools.lru_cache(maxsize=128)
|
||||
def get_library_dirs(self):
|
||||
"""List of compiler library directories."""
|
||||
opt = FCompiler.get_library_dirs(self)
|
||||
flang_dir = dirname(self.executables['compiler_f77'][0])
|
||||
opt.append(normpath(join(flang_dir, '..', 'lib')))
|
||||
|
||||
return opt
|
||||
|
||||
def get_flags(self):
|
||||
return []
|
||||
|
||||
def get_flags_free(self):
|
||||
return []
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
|
||||
def get_flags_arch(self):
|
||||
return []
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
return '-Wl,-rpath=%s' % dir
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='armflang').get_version())
|
||||
|
120
.venv/Lib/site-packages/numpy/distutils/fcompiler/compaq.py
Normal file
120
.venv/Lib/site-packages/numpy/distutils/fcompiler/compaq.py
Normal file
@ -0,0 +1,120 @@
|
||||
|
||||
#http://www.compaq.com/fortran/docs/
|
||||
import os
|
||||
import sys
|
||||
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
from distutils.errors import DistutilsPlatformError
|
||||
|
||||
compilers = ['CompaqFCompiler']
|
||||
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
|
||||
# Otherwise we'd get a false positive on posix systems with
|
||||
# case-insensitive filesystems (like darwin), because we'll pick
|
||||
# up /bin/df
|
||||
compilers.append('CompaqVisualFCompiler')
|
||||
|
||||
class CompaqFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'compaq'
|
||||
description = 'Compaq Fortran Compiler'
|
||||
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
|
||||
|
||||
if sys.platform[:5]=='linux':
|
||||
fc_exe = 'fort'
|
||||
else:
|
||||
fc_exe = 'f90'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ['<F90>', "-version"],
|
||||
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
|
||||
'compiler_fix' : [fc_exe, "-fixed"],
|
||||
'compiler_f90' : [fc_exe],
|
||||
'linker_so' : ['<F90>'],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
module_dir_switch = '-module ' # not tested
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags(self):
|
||||
return ['-assume no2underscore', '-nomixed_str_len_arg']
|
||||
def get_flags_debug(self):
|
||||
return ['-g', '-check bounds']
|
||||
def get_flags_opt(self):
|
||||
return ['-O4', '-align dcommons', '-assume bigarrays',
|
||||
'-assume nozsize', '-math_library fast']
|
||||
def get_flags_arch(self):
|
||||
return ['-arch host', '-tune host']
|
||||
def get_flags_linker_so(self):
|
||||
if sys.platform[:5]=='linux':
|
||||
return ['-shared']
|
||||
return ['-shared', '-Wl,-expect_unresolved,*']
|
||||
|
||||
class CompaqVisualFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'compaqv'
|
||||
description = 'DIGITAL or Compaq Visual Fortran Compiler'
|
||||
version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
|
||||
r' Version (?P<version>[^\s]*).*')
|
||||
|
||||
compile_switch = '/compile_only'
|
||||
object_switch = '/object:'
|
||||
library_switch = '/OUT:' #No space after /OUT:!
|
||||
|
||||
static_lib_extension = ".lib"
|
||||
static_lib_format = "%s%s"
|
||||
module_dir_switch = '/module:'
|
||||
module_include_switch = '/I'
|
||||
|
||||
ar_exe = 'lib.exe'
|
||||
fc_exe = 'DF'
|
||||
|
||||
if sys.platform=='win32':
|
||||
from numpy.distutils.msvccompiler import MSVCCompiler
|
||||
|
||||
try:
|
||||
m = MSVCCompiler()
|
||||
m.initialize()
|
||||
ar_exe = m.lib
|
||||
except DistutilsPlatformError:
|
||||
pass
|
||||
except AttributeError as e:
|
||||
if '_MSVCCompiler__root' in str(e):
|
||||
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
|
||||
else:
|
||||
raise
|
||||
except OSError as e:
|
||||
if not "vcvarsall.bat" in str(e):
|
||||
print("Unexpected OSError in", __file__)
|
||||
raise
|
||||
except ValueError as e:
|
||||
if not "'path'" in str(e):
|
||||
print("Unexpected ValueError in", __file__)
|
||||
raise
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ['<F90>', "/what"],
|
||||
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
|
||||
'compiler_fix' : [fc_exe, "/fixed"],
|
||||
'compiler_f90' : [fc_exe],
|
||||
'linker_so' : ['<F90>'],
|
||||
'archiver' : [ar_exe, "/OUT:"],
|
||||
'ranlib' : None
|
||||
}
|
||||
|
||||
def get_flags(self):
|
||||
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
|
||||
'/names:lowercase', '/assume:underscore']
|
||||
def get_flags_opt(self):
|
||||
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
|
||||
def get_flags_arch(self):
|
||||
return ['/threads']
|
||||
def get_flags_debug(self):
|
||||
return ['/debug']
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='compaq').get_version())
|
@ -0,0 +1,88 @@
|
||||
import os
|
||||
from distutils.dist import Distribution
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
class EnvironmentConfig:
|
||||
def __init__(self, distutils_section='ALL', **kw):
|
||||
self._distutils_section = distutils_section
|
||||
self._conf_keys = kw
|
||||
self._conf = None
|
||||
self._hook_handler = None
|
||||
|
||||
def dump_variable(self, name):
|
||||
conf_desc = self._conf_keys[name]
|
||||
hook, envvar, confvar, convert, append = conf_desc
|
||||
if not convert:
|
||||
convert = lambda x : x
|
||||
print('%s.%s:' % (self._distutils_section, name))
|
||||
v = self._hook_handler(name, hook)
|
||||
print(' hook : %s' % (convert(v),))
|
||||
if envvar:
|
||||
v = os.environ.get(envvar, None)
|
||||
print(' environ: %s' % (convert(v),))
|
||||
if confvar and self._conf:
|
||||
v = self._conf.get(confvar, (None, None))[1]
|
||||
print(' config : %s' % (convert(v),))
|
||||
|
||||
def dump_variables(self):
|
||||
for name in self._conf_keys:
|
||||
self.dump_variable(name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
conf_desc = self._conf_keys[name]
|
||||
except KeyError:
|
||||
raise AttributeError(
|
||||
f"'EnvironmentConfig' object has no attribute '{name}'"
|
||||
) from None
|
||||
|
||||
return self._get_var(name, conf_desc)
|
||||
|
||||
def get(self, name, default=None):
|
||||
try:
|
||||
conf_desc = self._conf_keys[name]
|
||||
except KeyError:
|
||||
return default
|
||||
var = self._get_var(name, conf_desc)
|
||||
if var is None:
|
||||
var = default
|
||||
return var
|
||||
|
||||
def _get_var(self, name, conf_desc):
|
||||
hook, envvar, confvar, convert, append = conf_desc
|
||||
if convert is None:
|
||||
convert = lambda x: x
|
||||
var = self._hook_handler(name, hook)
|
||||
if envvar is not None:
|
||||
envvar_contents = os.environ.get(envvar)
|
||||
if envvar_contents is not None:
|
||||
envvar_contents = convert(envvar_contents)
|
||||
if var and append:
|
||||
if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
|
||||
var.extend(envvar_contents)
|
||||
else:
|
||||
# NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
|
||||
# to keep old (overwrite flags rather than append to
|
||||
# them) behavior
|
||||
var = envvar_contents
|
||||
else:
|
||||
var = envvar_contents
|
||||
if confvar is not None and self._conf:
|
||||
if confvar in self._conf:
|
||||
source, confvar_contents = self._conf[confvar]
|
||||
var = convert(confvar_contents)
|
||||
return var
|
||||
|
||||
|
||||
def clone(self, hook_handler):
|
||||
ec = self.__class__(distutils_section=self._distutils_section,
|
||||
**self._conf_keys)
|
||||
ec._hook_handler = hook_handler
|
||||
return ec
|
||||
|
||||
def use_distribution(self, dist):
|
||||
if isinstance(dist, Distribution):
|
||||
self._conf = dist.get_option_dict(self._distutils_section)
|
||||
else:
|
||||
self._conf = dist
|
46
.venv/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py
Normal file
46
.venv/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py
Normal file
@ -0,0 +1,46 @@
|
||||
"""
|
||||
fujitsu
|
||||
|
||||
Supports Fujitsu compiler function.
|
||||
This compiler is developed by Fujitsu and is used in A64FX on Fugaku.
|
||||
"""
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['FujitsuFCompiler']
|
||||
|
||||
class FujitsuFCompiler(FCompiler):
|
||||
compiler_type = 'fujitsu'
|
||||
description = 'Fujitsu Fortran Compiler'
|
||||
|
||||
possible_executables = ['frt']
|
||||
version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
|
||||
# $ frt --version
|
||||
# frt (FRT) x.x.x yyyymmdd
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F77>", "--version"],
|
||||
'compiler_f77' : ["frt", "-Fixed"],
|
||||
'compiler_fix' : ["frt", "-Fixed"],
|
||||
'compiler_f90' : ["frt"],
|
||||
'linker_so' : ["frt", "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
pic_flags = ['-KPIC']
|
||||
module_dir_switch = '-M'
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
def runtime_library_dir_option(self, dir):
|
||||
return f'-Wl,-rpath={dir}'
|
||||
def get_libraries(self):
|
||||
return ['fj90f', 'fj90i', 'fjsrcinfo']
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
from numpy.distutils import customized_fcompiler
|
||||
log.set_verbosity(2)
|
||||
print(customized_fcompiler('fujitsu').get_version())
|
42
.venv/Lib/site-packages/numpy/distutils/fcompiler/g95.py
Normal file
42
.venv/Lib/site-packages/numpy/distutils/fcompiler/g95.py
Normal file
@ -0,0 +1,42 @@
|
||||
# http://g95.sourceforge.net/
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['G95FCompiler']
|
||||
|
||||
class G95FCompiler(FCompiler):
|
||||
compiler_type = 'g95'
|
||||
description = 'G95 Fortran Compiler'
|
||||
|
||||
# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
|
||||
# $ g95 --version
|
||||
# G95 (GCC 4.0.3 (g95!) May 22 2006)
|
||||
|
||||
version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
|
||||
# $ g95 --version
|
||||
# G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F90>", "--version"],
|
||||
'compiler_f77' : ["g95", "-ffixed-form"],
|
||||
'compiler_fix' : ["g95", "-ffixed-form"],
|
||||
'compiler_f90' : ["g95"],
|
||||
'linker_so' : ["<F90>", "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
pic_flags = ['-fpic']
|
||||
module_dir_switch = '-fmod='
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags(self):
|
||||
return ['-fno-second-underscore']
|
||||
def get_flags_opt(self):
|
||||
return ['-O']
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
from numpy.distutils import customized_fcompiler
|
||||
log.set_verbosity(2)
|
||||
print(customized_fcompiler('g95').get_version())
|
549
.venv/Lib/site-packages/numpy/distutils/fcompiler/gnu.py
Normal file
549
.venv/Lib/site-packages/numpy/distutils/fcompiler/gnu.py
Normal file
@ -0,0 +1,549 @@
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import platform
|
||||
import tempfile
|
||||
import hashlib
|
||||
import base64
|
||||
import subprocess
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
from numpy.distutils.exec_command import filepath_from_subprocess_output
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
|
||||
|
||||
TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
|
||||
|
||||
# XXX: handle cross compilation
|
||||
|
||||
|
||||
def is_win64():
|
||||
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
|
||||
|
||||
|
||||
class GnuFCompiler(FCompiler):
|
||||
compiler_type = 'gnu'
|
||||
compiler_aliases = ('g77', )
|
||||
description = 'GNU Fortran 77 compiler'
|
||||
|
||||
def gnu_version_match(self, version_string):
|
||||
"""Handle the different versions of GNU fortran compilers"""
|
||||
# Strip warning(s) that may be emitted by gfortran
|
||||
while version_string.startswith('gfortran: warning'):
|
||||
version_string =\
|
||||
version_string[version_string.find('\n') + 1:].strip()
|
||||
|
||||
# Gfortran versions from after 2010 will output a simple string
|
||||
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
|
||||
# gfortrans may still return long version strings (``-dumpversion`` was
|
||||
# an alias for ``--version``)
|
||||
if len(version_string) <= 20:
|
||||
# Try to find a valid version string
|
||||
m = re.search(r'([0-9.]+)', version_string)
|
||||
if m:
|
||||
# g77 provides a longer version string that starts with GNU
|
||||
# Fortran
|
||||
if version_string.startswith('GNU Fortran'):
|
||||
return ('g77', m.group(1))
|
||||
|
||||
# gfortran only outputs a version string such as #.#.#, so check
|
||||
# if the match is at the start of the string
|
||||
elif m.start() == 0:
|
||||
return ('gfortran', m.group(1))
|
||||
else:
|
||||
# Output probably from --version, try harder:
|
||||
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
|
||||
if m:
|
||||
return ('gfortran', m.group(1))
|
||||
m = re.search(
|
||||
r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
|
||||
if m:
|
||||
v = m.group(1)
|
||||
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
|
||||
# the '0' is for early g77's
|
||||
return ('g77', v)
|
||||
else:
|
||||
# at some point in the 4.x series, the ' 95' was dropped
|
||||
# from the version string
|
||||
return ('gfortran', v)
|
||||
|
||||
# If still nothing, raise an error to make the problem easy to find.
|
||||
err = 'A valid Fortran version was not found in this string:\n'
|
||||
raise ValueError(err + version_string)
|
||||
|
||||
def version_match(self, version_string):
|
||||
v = self.gnu_version_match(version_string)
|
||||
if not v or v[0] != 'g77':
|
||||
return None
|
||||
return v[1]
|
||||
|
||||
possible_executables = ['g77', 'f77']
|
||||
executables = {
|
||||
'version_cmd' : [None, "-dumpversion"],
|
||||
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
|
||||
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
|
||||
'compiler_fix' : None,
|
||||
'linker_so' : [None, "-g", "-Wall"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"],
|
||||
'linker_exe' : [None, "-g", "-Wall"]
|
||||
}
|
||||
module_dir_switch = None
|
||||
module_include_switch = None
|
||||
|
||||
# Cygwin: f771: warning: -fPIC ignored for target (all code is
|
||||
# position independent)
|
||||
if os.name != 'nt' and sys.platform != 'cygwin':
|
||||
pic_flags = ['-fPIC']
|
||||
|
||||
# use -mno-cygwin for g77 when Python is not Cygwin-Python
|
||||
if sys.platform == 'win32':
|
||||
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
|
||||
executables[key].append('-mno-cygwin')
|
||||
|
||||
g2c = 'g2c'
|
||||
suggested_f90_compiler = 'gnu95'
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
opt = self.linker_so[1:]
|
||||
if sys.platform == 'darwin':
|
||||
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
|
||||
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
|
||||
# and leave it alone. But, distutils will complain if the
|
||||
# environment's value is different from the one in the Python
|
||||
# Makefile used to build Python. We let distutils handle this
|
||||
# error checking.
|
||||
if not target:
|
||||
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
|
||||
# we try to get it first from sysconfig and then
|
||||
# fall back to setting it to 10.9 This is a reasonable default
|
||||
# even when using the official Python dist and those derived
|
||||
# from it.
|
||||
import sysconfig
|
||||
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
|
||||
if not target:
|
||||
target = '10.9'
|
||||
s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
|
||||
warnings.warn(s, stacklevel=2)
|
||||
os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target)
|
||||
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
|
||||
else:
|
||||
opt.append("-shared")
|
||||
if sys.platform.startswith('sunos'):
|
||||
# SunOS often has dynamically loaded symbols defined in the
|
||||
# static library libg2c.a The linker doesn't like this. To
|
||||
# ignore the problem, use the -mimpure-text flag. It isn't
|
||||
# the safest thing, but seems to work. 'man gcc' says:
|
||||
# ".. Instead of using -mimpure-text, you should compile all
|
||||
# source code with -fpic or -fPIC."
|
||||
opt.append('-mimpure-text')
|
||||
return opt
|
||||
|
||||
def get_libgcc_dir(self):
|
||||
try:
|
||||
output = subprocess.check_output(self.compiler_f77 +
|
||||
['-print-libgcc-file-name'])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
output = filepath_from_subprocess_output(output)
|
||||
return os.path.dirname(output)
|
||||
return None
|
||||
|
||||
def get_libgfortran_dir(self):
|
||||
if sys.platform[:5] == 'linux':
|
||||
libgfortran_name = 'libgfortran.so'
|
||||
elif sys.platform == 'darwin':
|
||||
libgfortran_name = 'libgfortran.dylib'
|
||||
else:
|
||||
libgfortran_name = None
|
||||
|
||||
libgfortran_dir = None
|
||||
if libgfortran_name:
|
||||
find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
self.compiler_f77 + find_lib_arg)
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
output = filepath_from_subprocess_output(output)
|
||||
libgfortran_dir = os.path.dirname(output)
|
||||
return libgfortran_dir
|
||||
|
||||
def get_library_dirs(self):
|
||||
opt = []
|
||||
if sys.platform[:5] != 'linux':
|
||||
d = self.get_libgcc_dir()
|
||||
if d:
|
||||
# if windows and not cygwin, libg2c lies in a different folder
|
||||
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
|
||||
d = os.path.normpath(d)
|
||||
path = os.path.join(d, "lib%s.a" % self.g2c)
|
||||
if not os.path.exists(path):
|
||||
root = os.path.join(d, *((os.pardir, ) * 4))
|
||||
d2 = os.path.abspath(os.path.join(root, 'lib'))
|
||||
path = os.path.join(d2, "lib%s.a" % self.g2c)
|
||||
if os.path.exists(path):
|
||||
opt.append(d2)
|
||||
opt.append(d)
|
||||
# For Macports / Linux, libgfortran and libgcc are not co-located
|
||||
lib_gfortran_dir = self.get_libgfortran_dir()
|
||||
if lib_gfortran_dir:
|
||||
opt.append(lib_gfortran_dir)
|
||||
return opt
|
||||
|
||||
def get_libraries(self):
|
||||
opt = []
|
||||
d = self.get_libgcc_dir()
|
||||
if d is not None:
|
||||
g2c = self.g2c + '-pic'
|
||||
f = self.static_lib_format % (g2c, self.static_lib_extension)
|
||||
if not os.path.isfile(os.path.join(d, f)):
|
||||
g2c = self.g2c
|
||||
else:
|
||||
g2c = self.g2c
|
||||
|
||||
if g2c is not None:
|
||||
opt.append(g2c)
|
||||
c_compiler = self.c_compiler
|
||||
if sys.platform == 'win32' and c_compiler and \
|
||||
c_compiler.compiler_type == 'msvc':
|
||||
opt.append('gcc')
|
||||
if sys.platform == 'darwin':
|
||||
opt.append('cc_dynamic')
|
||||
return opt
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
def get_flags_opt(self):
|
||||
v = self.get_version()
|
||||
if v and v <= '3.3.3':
|
||||
# With this compiler version building Fortran BLAS/LAPACK
|
||||
# with -O3 caused failures in lib.lapack heevr,syevr tests.
|
||||
opt = ['-O2']
|
||||
else:
|
||||
opt = ['-O3']
|
||||
opt.append('-funroll-loops')
|
||||
return opt
|
||||
|
||||
def _c_arch_flags(self):
|
||||
""" Return detected arch flags from CFLAGS """
|
||||
import sysconfig
|
||||
try:
|
||||
cflags = sysconfig.get_config_vars()['CFLAGS']
|
||||
except KeyError:
|
||||
return []
|
||||
arch_re = re.compile(r"-arch\s+(\w+)")
|
||||
arch_flags = []
|
||||
for arch in arch_re.findall(cflags):
|
||||
arch_flags += ['-arch', arch]
|
||||
return arch_flags
|
||||
|
||||
def get_flags_arch(self):
|
||||
return []
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
if sys.platform == 'win32' or sys.platform == 'cygwin':
|
||||
# Linux/Solaris/Unix support RPATH, Windows does not
|
||||
raise NotImplementedError
|
||||
|
||||
# TODO: could use -Xlinker here, if it's supported
|
||||
assert "," not in dir
|
||||
|
||||
if sys.platform == 'darwin':
|
||||
return f'-Wl,-rpath,{dir}'
|
||||
elif sys.platform[:3] == 'aix':
|
||||
# AIX RPATH is called LIBPATH
|
||||
return f'-Wl,-blibpath:{dir}'
|
||||
else:
|
||||
return f'-Wl,-rpath={dir}'
|
||||
|
||||
|
||||
class Gnu95FCompiler(GnuFCompiler):
|
||||
compiler_type = 'gnu95'
|
||||
compiler_aliases = ('gfortran', )
|
||||
description = 'GNU Fortran 95 compiler'
|
||||
|
||||
def version_match(self, version_string):
|
||||
v = self.gnu_version_match(version_string)
|
||||
if not v or v[0] != 'gfortran':
|
||||
return None
|
||||
v = v[1]
|
||||
if LooseVersion(v) >= "4":
|
||||
# gcc-4 series releases do not support -mno-cygwin option
|
||||
pass
|
||||
else:
|
||||
# use -mno-cygwin flag for gfortran when Python is not
|
||||
# Cygwin-Python
|
||||
if sys.platform == 'win32':
|
||||
for key in [
|
||||
'version_cmd', 'compiler_f77', 'compiler_f90',
|
||||
'compiler_fix', 'linker_so', 'linker_exe'
|
||||
]:
|
||||
self.executables[key].append('-mno-cygwin')
|
||||
return v
|
||||
|
||||
possible_executables = ['gfortran', 'f95']
|
||||
executables = {
|
||||
'version_cmd' : ["<F90>", "-dumpversion"],
|
||||
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
|
||||
"-fno-second-underscore"],
|
||||
'compiler_f90' : [None, "-Wall", "-g",
|
||||
"-fno-second-underscore"],
|
||||
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
|
||||
"-fno-second-underscore"],
|
||||
'linker_so' : ["<F90>", "-Wall", "-g"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"],
|
||||
'linker_exe' : [None, "-Wall"]
|
||||
}
|
||||
|
||||
module_dir_switch = '-J'
|
||||
module_include_switch = '-I'
|
||||
|
||||
if sys.platform[:3] == 'aix':
|
||||
executables['linker_so'].append('-lpthread')
|
||||
if platform.architecture()[0][:2] == '64':
|
||||
for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
|
||||
executables[key].append('-maix64')
|
||||
|
||||
g2c = 'gfortran'
|
||||
|
||||
def _universal_flags(self, cmd):
|
||||
"""Return a list of -arch flags for every supported architecture."""
|
||||
if not sys.platform == 'darwin':
|
||||
return []
|
||||
arch_flags = []
|
||||
# get arches the C compiler gets.
|
||||
c_archs = self._c_arch_flags()
|
||||
if "i386" in c_archs:
|
||||
c_archs[c_archs.index("i386")] = "i686"
|
||||
# check the arches the Fortran compiler supports, and compare with
|
||||
# arch flags from C compiler
|
||||
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
|
||||
if _can_target(cmd, arch) and arch in c_archs:
|
||||
arch_flags.extend(["-arch", arch])
|
||||
return arch_flags
|
||||
|
||||
def get_flags(self):
|
||||
flags = GnuFCompiler.get_flags(self)
|
||||
arch_flags = self._universal_flags(self.compiler_f90)
|
||||
if arch_flags:
|
||||
flags[:0] = arch_flags
|
||||
return flags
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
flags = GnuFCompiler.get_flags_linker_so(self)
|
||||
arch_flags = self._universal_flags(self.linker_so)
|
||||
if arch_flags:
|
||||
flags[:0] = arch_flags
|
||||
return flags
|
||||
|
||||
def get_library_dirs(self):
|
||||
opt = GnuFCompiler.get_library_dirs(self)
|
||||
if sys.platform == 'win32':
|
||||
c_compiler = self.c_compiler
|
||||
if c_compiler and c_compiler.compiler_type == "msvc":
|
||||
target = self.get_target()
|
||||
if target:
|
||||
d = os.path.normpath(self.get_libgcc_dir())
|
||||
root = os.path.join(d, *((os.pardir, ) * 4))
|
||||
path = os.path.join(root, "lib")
|
||||
mingwdir = os.path.normpath(path)
|
||||
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
|
||||
opt.append(mingwdir)
|
||||
# For Macports / Linux, libgfortran and libgcc are not co-located
|
||||
lib_gfortran_dir = self.get_libgfortran_dir()
|
||||
if lib_gfortran_dir:
|
||||
opt.append(lib_gfortran_dir)
|
||||
return opt
|
||||
|
||||
def get_libraries(self):
|
||||
opt = GnuFCompiler.get_libraries(self)
|
||||
if sys.platform == 'darwin':
|
||||
opt.remove('cc_dynamic')
|
||||
if sys.platform == 'win32':
|
||||
c_compiler = self.c_compiler
|
||||
if c_compiler and c_compiler.compiler_type == "msvc":
|
||||
if "gcc" in opt:
|
||||
i = opt.index("gcc")
|
||||
opt.insert(i + 1, "mingwex")
|
||||
opt.insert(i + 1, "mingw32")
|
||||
c_compiler = self.c_compiler
|
||||
if c_compiler and c_compiler.compiler_type == "msvc":
|
||||
return []
|
||||
else:
|
||||
pass
|
||||
return opt
|
||||
|
||||
def get_target(self):
|
||||
try:
|
||||
output = subprocess.check_output(self.compiler_f77 + ['-v'])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
output = filepath_from_subprocess_output(output)
|
||||
m = TARGET_R.search(output)
|
||||
if m:
|
||||
return m.group(1)
|
||||
return ""
|
||||
|
||||
def _hash_files(self, filenames):
|
||||
h = hashlib.sha1()
|
||||
for fn in filenames:
|
||||
with open(fn, 'rb') as f:
|
||||
while True:
|
||||
block = f.read(131072)
|
||||
if not block:
|
||||
break
|
||||
h.update(block)
|
||||
text = base64.b32encode(h.digest())
|
||||
text = text.decode('ascii')
|
||||
return text.rstrip('=')
|
||||
|
||||
def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
|
||||
chained_dlls, is_archive):
|
||||
"""Create a wrapper shared library for the given objects
|
||||
|
||||
Return an MSVC-compatible lib
|
||||
"""
|
||||
|
||||
c_compiler = self.c_compiler
|
||||
if c_compiler.compiler_type != "msvc":
|
||||
raise ValueError("This method only supports MSVC")
|
||||
|
||||
object_hash = self._hash_files(list(objects) + list(chained_dlls))
|
||||
|
||||
if is_win64():
|
||||
tag = 'win_amd64'
|
||||
else:
|
||||
tag = 'win32'
|
||||
|
||||
basename = 'lib' + os.path.splitext(
|
||||
os.path.basename(objects[0]))[0][:8]
|
||||
root_name = basename + '.' + object_hash + '.gfortran-' + tag
|
||||
dll_name = root_name + '.dll'
|
||||
def_name = root_name + '.def'
|
||||
lib_name = root_name + '.lib'
|
||||
dll_path = os.path.join(extra_dll_dir, dll_name)
|
||||
def_path = os.path.join(output_dir, def_name)
|
||||
lib_path = os.path.join(output_dir, lib_name)
|
||||
|
||||
if os.path.isfile(lib_path):
|
||||
# Nothing to do
|
||||
return lib_path, dll_path
|
||||
|
||||
if is_archive:
|
||||
objects = (["-Wl,--whole-archive"] + list(objects) +
|
||||
["-Wl,--no-whole-archive"])
|
||||
self.link_shared_object(
|
||||
objects,
|
||||
dll_name,
|
||||
output_dir=extra_dll_dir,
|
||||
extra_postargs=list(chained_dlls) + [
|
||||
'-Wl,--allow-multiple-definition',
|
||||
'-Wl,--output-def,' + def_path,
|
||||
'-Wl,--export-all-symbols',
|
||||
'-Wl,--enable-auto-import',
|
||||
'-static',
|
||||
'-mlong-double-64',
|
||||
])
|
||||
|
||||
# No PowerPC!
|
||||
if is_win64():
|
||||
specifier = '/MACHINE:X64'
|
||||
else:
|
||||
specifier = '/MACHINE:X86'
|
||||
|
||||
# MSVC specific code
|
||||
lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
|
||||
if not c_compiler.initialized:
|
||||
c_compiler.initialize()
|
||||
c_compiler.spawn([c_compiler.lib] + lib_args)
|
||||
|
||||
return lib_path, dll_path
|
||||
|
||||
def can_ccompiler_link(self, compiler):
|
||||
# MSVC cannot link objects compiled by GNU fortran
|
||||
return compiler.compiler_type not in ("msvc", )
|
||||
|
||||
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
|
||||
"""
|
||||
Convert a set of object files that are not compatible with the default
|
||||
linker, to a file that is compatible.
|
||||
"""
|
||||
if self.c_compiler.compiler_type == "msvc":
|
||||
# Compile a DLL and return the lib for the DLL as
|
||||
# the object. Also keep track of previous DLLs that
|
||||
# we have compiled so that we can link against them.
|
||||
|
||||
# If there are .a archives, assume they are self-contained
|
||||
# static libraries, and build separate DLLs for each
|
||||
archives = []
|
||||
plain_objects = []
|
||||
for obj in objects:
|
||||
if obj.lower().endswith('.a'):
|
||||
archives.append(obj)
|
||||
else:
|
||||
plain_objects.append(obj)
|
||||
|
||||
chained_libs = []
|
||||
chained_dlls = []
|
||||
for archive in archives[::-1]:
|
||||
lib, dll = self._link_wrapper_lib(
|
||||
[archive],
|
||||
output_dir,
|
||||
extra_dll_dir,
|
||||
chained_dlls=chained_dlls,
|
||||
is_archive=True)
|
||||
chained_libs.insert(0, lib)
|
||||
chained_dlls.insert(0, dll)
|
||||
|
||||
if not plain_objects:
|
||||
return chained_libs
|
||||
|
||||
lib, dll = self._link_wrapper_lib(
|
||||
plain_objects,
|
||||
output_dir,
|
||||
extra_dll_dir,
|
||||
chained_dlls=chained_dlls,
|
||||
is_archive=False)
|
||||
return [lib] + chained_libs
|
||||
else:
|
||||
raise ValueError("Unsupported C compiler")
|
||||
|
||||
|
||||
def _can_target(cmd, arch):
|
||||
"""Return true if the architecture supports the -arch flag"""
|
||||
newcmd = cmd[:]
|
||||
fid, filename = tempfile.mkstemp(suffix=".f")
|
||||
os.close(fid)
|
||||
try:
|
||||
d = os.path.dirname(filename)
|
||||
output = os.path.splitext(filename)[0] + ".o"
|
||||
try:
|
||||
newcmd.extend(["-arch", arch, "-c", filename])
|
||||
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
|
||||
p.communicate()
|
||||
return p.returncode == 0
|
||||
finally:
|
||||
if os.path.exists(output):
|
||||
os.remove(output)
|
||||
finally:
|
||||
os.remove(filename)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
from numpy.distutils import customized_fcompiler
|
||||
log.set_verbosity(2)
|
||||
|
||||
print(customized_fcompiler('gnu').get_version())
|
||||
try:
|
||||
print(customized_fcompiler('g95').get_version())
|
||||
except Exception as e:
|
||||
print(e)
|
41
.venv/Lib/site-packages/numpy/distutils/fcompiler/hpux.py
Normal file
41
.venv/Lib/site-packages/numpy/distutils/fcompiler/hpux.py
Normal file
@ -0,0 +1,41 @@
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['HPUXFCompiler']
|
||||
|
||||
class HPUXFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'hpux'
|
||||
description = 'HP Fortran 90 Compiler'
|
||||
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["f90", "+version"],
|
||||
'compiler_f77' : ["f90"],
|
||||
'compiler_fix' : ["f90"],
|
||||
'compiler_f90' : ["f90"],
|
||||
'linker_so' : ["ld", "-b"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
module_dir_switch = None #XXX: fix me
|
||||
module_include_switch = None #XXX: fix me
|
||||
pic_flags = ['+Z']
|
||||
def get_flags(self):
|
||||
return self.pic_flags + ['+ppu', '+DD64']
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
def get_libraries(self):
|
||||
return ['m']
|
||||
def get_library_dirs(self):
|
||||
opt = ['/usr/lib/hpux64']
|
||||
return opt
|
||||
def get_version(self, force=0, ok_status=[256, 0, 1]):
|
||||
# XXX status==256 may indicate 'unrecognized option' or
|
||||
# 'no input file'. So, version_cmd needs more work.
|
||||
return FCompiler.get_version(self, force, ok_status)
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(10)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='hpux').get_version())
|
97
.venv/Lib/site-packages/numpy/distutils/fcompiler/ibm.py
Normal file
97
.venv/Lib/site-packages/numpy/distutils/fcompiler/ibm.py
Normal file
@ -0,0 +1,97 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
from numpy.distutils.exec_command import find_executable
|
||||
from numpy.distutils.misc_util import make_temp_file
|
||||
from distutils import log
|
||||
|
||||
compilers = ['IBMFCompiler']
|
||||
|
||||
class IBMFCompiler(FCompiler):
|
||||
compiler_type = 'ibm'
|
||||
description = 'IBM XL Fortran Compiler'
|
||||
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
|
||||
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F77>", "-qversion"],
|
||||
'compiler_f77' : ["xlf"],
|
||||
'compiler_fix' : ["xlf90", "-qfixed"],
|
||||
'compiler_f90' : ["xlf90"],
|
||||
'linker_so' : ["xlf95"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
def get_version(self,*args,**kwds):
|
||||
version = FCompiler.get_version(self,*args,**kwds)
|
||||
|
||||
if version is None and sys.platform.startswith('aix'):
|
||||
# use lslpp to find out xlf version
|
||||
lslpp = find_executable('lslpp')
|
||||
xlf = find_executable('xlf')
|
||||
if os.path.exists(xlf) and os.path.exists(lslpp):
|
||||
try:
|
||||
o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
|
||||
if m: version = m.group('version')
|
||||
|
||||
xlf_dir = '/etc/opt/ibmcmp/xlf'
|
||||
if version is None and os.path.isdir(xlf_dir):
|
||||
# linux:
|
||||
# If the output of xlf does not contain version info
|
||||
# (that's the case with xlf 8.1, for instance) then
|
||||
# let's try another method:
|
||||
l = sorted(os.listdir(xlf_dir))
|
||||
l.reverse()
|
||||
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
|
||||
if l:
|
||||
from distutils.version import LooseVersion
|
||||
self.version = version = LooseVersion(l[0])
|
||||
return version
|
||||
|
||||
def get_flags(self):
|
||||
return ['-qextname']
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
opt = []
|
||||
if sys.platform=='darwin':
|
||||
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
|
||||
else:
|
||||
opt.append('-bshared')
|
||||
version = self.get_version(ok_status=[0, 40])
|
||||
if version is not None:
|
||||
if sys.platform.startswith('aix'):
|
||||
xlf_cfg = '/etc/xlf.cfg'
|
||||
else:
|
||||
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
|
||||
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
|
||||
log.info('Creating '+new_cfg)
|
||||
with open(xlf_cfg, 'r') as fi:
|
||||
crt1_match = re.compile(r'\s*crt\s*=\s*(?P<path>.*)/crt1.o').match
|
||||
for line in fi:
|
||||
m = crt1_match(line)
|
||||
if m:
|
||||
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
|
||||
else:
|
||||
fo.write(line)
|
||||
fo.close()
|
||||
opt.append('-F'+new_cfg)
|
||||
return opt
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils import customized_fcompiler
|
||||
log.set_verbosity(2)
|
||||
print(customized_fcompiler(compiler='ibm').get_version())
|
210
.venv/Lib/site-packages/numpy/distutils/fcompiler/intel.py
Normal file
210
.venv/Lib/site-packages/numpy/distutils/fcompiler/intel.py
Normal file
@ -0,0 +1,210 @@
|
||||
# http://developer.intel.com/software/products/compilers/flin/
|
||||
import sys
|
||||
|
||||
from numpy.distutils.ccompiler import simple_version_match
|
||||
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
|
||||
|
||||
compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
|
||||
'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
|
||||
'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
|
||||
|
||||
|
||||
def intel_version_match(type):
|
||||
# Match against the important stuff in the version string
|
||||
return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
|
||||
|
||||
|
||||
class BaseIntelFCompiler(FCompiler):
|
||||
def update_executables(self):
|
||||
f = dummy_fortran_file()
|
||||
self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c',
|
||||
f + '.f', '-o', f + '.o']
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
# TODO: could use -Xlinker here, if it's supported
|
||||
assert "," not in dir
|
||||
|
||||
return '-Wl,-rpath=%s' % dir
|
||||
|
||||
|
||||
class IntelFCompiler(BaseIntelFCompiler):
|
||||
|
||||
compiler_type = 'intel'
|
||||
compiler_aliases = ('ifort',)
|
||||
description = 'Intel Fortran Compiler for 32-bit apps'
|
||||
version_match = intel_version_match('32-bit|IA-32')
|
||||
|
||||
possible_executables = ['ifort', 'ifc']
|
||||
|
||||
executables = {
|
||||
'version_cmd' : None, # set by update_executables
|
||||
'compiler_f77' : [None, "-72", "-w90", "-w95"],
|
||||
'compiler_f90' : [None],
|
||||
'compiler_fix' : [None, "-FI"],
|
||||
'linker_so' : ["<F90>", "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
pic_flags = ['-fPIC']
|
||||
module_dir_switch = '-module ' # Don't remove ending space!
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags_free(self):
|
||||
return ['-FR']
|
||||
|
||||
def get_flags(self):
|
||||
return ['-fPIC']
|
||||
|
||||
def get_flags_opt(self): # Scipy test failures with -O2
|
||||
v = self.get_version()
|
||||
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
|
||||
return ['-fp-model', 'strict', '-O1',
|
||||
'-assume', 'minus0', '-{}'.format(mpopt)]
|
||||
|
||||
def get_flags_arch(self):
|
||||
return []
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
opt = FCompiler.get_flags_linker_so(self)
|
||||
v = self.get_version()
|
||||
if v and v >= '8.0':
|
||||
opt.append('-nofor_main')
|
||||
if sys.platform == 'darwin':
|
||||
# Here, it's -dynamiclib
|
||||
try:
|
||||
idx = opt.index('-shared')
|
||||
opt.remove('-shared')
|
||||
except ValueError:
|
||||
idx = 0
|
||||
opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
|
||||
return opt
|
||||
|
||||
|
||||
class IntelItaniumFCompiler(IntelFCompiler):
|
||||
compiler_type = 'intele'
|
||||
compiler_aliases = ()
|
||||
description = 'Intel Fortran Compiler for Itanium apps'
|
||||
|
||||
version_match = intel_version_match('Itanium|IA-64')
|
||||
|
||||
possible_executables = ['ifort', 'efort', 'efc']
|
||||
|
||||
executables = {
|
||||
'version_cmd' : None,
|
||||
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
|
||||
'compiler_fix' : [None, "-FI"],
|
||||
'compiler_f90' : [None],
|
||||
'linker_so' : ['<F90>', "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
|
||||
class IntelEM64TFCompiler(IntelFCompiler):
|
||||
compiler_type = 'intelem'
|
||||
compiler_aliases = ()
|
||||
description = 'Intel Fortran Compiler for 64-bit apps'
|
||||
|
||||
version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit')
|
||||
|
||||
possible_executables = ['ifort', 'efort', 'efc']
|
||||
|
||||
executables = {
|
||||
'version_cmd' : None,
|
||||
'compiler_f77' : [None, "-FI"],
|
||||
'compiler_fix' : [None, "-FI"],
|
||||
'compiler_f90' : [None],
|
||||
'linker_so' : ['<F90>', "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
# Is there no difference in the version string between the above compilers
|
||||
# and the Visual compilers?
|
||||
|
||||
|
||||
class IntelVisualFCompiler(BaseIntelFCompiler):
|
||||
compiler_type = 'intelv'
|
||||
description = 'Intel Visual Fortran Compiler for 32-bit apps'
|
||||
version_match = intel_version_match('32-bit|IA-32')
|
||||
|
||||
def update_executables(self):
|
||||
f = dummy_fortran_file()
|
||||
self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
|
||||
f + '.f', '/o', f + '.o']
|
||||
|
||||
ar_exe = 'lib.exe'
|
||||
possible_executables = ['ifort', 'ifl']
|
||||
|
||||
executables = {
|
||||
'version_cmd' : None,
|
||||
'compiler_f77' : [None],
|
||||
'compiler_fix' : [None],
|
||||
'compiler_f90' : [None],
|
||||
'linker_so' : [None],
|
||||
'archiver' : [ar_exe, "/verbose", "/OUT:"],
|
||||
'ranlib' : None
|
||||
}
|
||||
|
||||
compile_switch = '/c '
|
||||
object_switch = '/Fo' # No space after /Fo!
|
||||
library_switch = '/OUT:' # No space after /OUT:!
|
||||
module_dir_switch = '/module:' # No space after /module:
|
||||
module_include_switch = '/I'
|
||||
|
||||
def get_flags(self):
|
||||
opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore']
|
||||
return opt
|
||||
|
||||
def get_flags_free(self):
|
||||
return []
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['/4Yb', '/d2']
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['/O1', '/assume:minus0'] # Scipy test failures with /O2
|
||||
|
||||
def get_flags_arch(self):
|
||||
return ["/arch:IA32", "/QaxSSE3"]
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
|
||||
compiler_type = 'intelev'
|
||||
description = 'Intel Visual Fortran Compiler for Itanium apps'
|
||||
|
||||
version_match = intel_version_match('Itanium')
|
||||
|
||||
possible_executables = ['efl'] # XXX this is a wild guess
|
||||
ar_exe = IntelVisualFCompiler.ar_exe
|
||||
|
||||
executables = {
|
||||
'version_cmd' : None,
|
||||
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
|
||||
'compiler_fix' : [None, "-FI", "-4L72", "-w"],
|
||||
'compiler_f90' : [None],
|
||||
'linker_so' : ['<F90>', "-shared"],
|
||||
'archiver' : [ar_exe, "/verbose", "/OUT:"],
|
||||
'ranlib' : None
|
||||
}
|
||||
|
||||
|
||||
class IntelEM64VisualFCompiler(IntelVisualFCompiler):
|
||||
compiler_type = 'intelvem'
|
||||
description = 'Intel Visual Fortran Compiler for 64-bit apps'
|
||||
|
||||
version_match = simple_version_match(start=r'Intel\(R\).*?64,')
|
||||
|
||||
def get_flags_arch(self):
|
||||
return []
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='intel').get_version())
|
45
.venv/Lib/site-packages/numpy/distutils/fcompiler/lahey.py
Normal file
45
.venv/Lib/site-packages/numpy/distutils/fcompiler/lahey.py
Normal file
@ -0,0 +1,45 @@
|
||||
import os
|
||||
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['LaheyFCompiler']
|
||||
|
||||
class LaheyFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'lahey'
|
||||
description = 'Lahey/Fujitsu Fortran 95 Compiler'
|
||||
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F90>", "--version"],
|
||||
'compiler_f77' : ["lf95", "--fix"],
|
||||
'compiler_fix' : ["lf95", "--fix"],
|
||||
'compiler_f90' : ["lf95"],
|
||||
'linker_so' : ["lf95", "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
module_dir_switch = None #XXX Fix me
|
||||
module_include_switch = None #XXX Fix me
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-O']
|
||||
def get_flags_debug(self):
|
||||
return ['-g', '--chk', '--chkglobal']
|
||||
def get_library_dirs(self):
|
||||
opt = []
|
||||
d = os.environ.get('LAHEY')
|
||||
if d:
|
||||
opt.append(os.path.join(d, 'lib'))
|
||||
return opt
|
||||
def get_libraries(self):
|
||||
opt = []
|
||||
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
|
||||
return opt
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='lahey').get_version())
|
54
.venv/Lib/site-packages/numpy/distutils/fcompiler/mips.py
Normal file
54
.venv/Lib/site-packages/numpy/distutils/fcompiler/mips.py
Normal file
@ -0,0 +1,54 @@
|
||||
from numpy.distutils.cpuinfo import cpu
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['MIPSFCompiler']
|
||||
|
||||
class MIPSFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'mips'
|
||||
description = 'MIPSpro Fortran Compiler'
|
||||
version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F90>", "-version"],
|
||||
'compiler_f77' : ["f77", "-f77"],
|
||||
'compiler_fix' : ["f90", "-fixedform"],
|
||||
'compiler_f90' : ["f90"],
|
||||
'linker_so' : ["f90", "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : None
|
||||
}
|
||||
module_dir_switch = None #XXX: fix me
|
||||
module_include_switch = None #XXX: fix me
|
||||
pic_flags = ['-KPIC']
|
||||
|
||||
def get_flags(self):
|
||||
return self.pic_flags + ['-n32']
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
def get_flags_arch(self):
|
||||
opt = []
|
||||
for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split():
|
||||
if getattr(cpu, 'is_IP%s'%a)():
|
||||
opt.append('-TARG:platform=IP%s' % a)
|
||||
break
|
||||
return opt
|
||||
def get_flags_arch_f77(self):
|
||||
r = None
|
||||
if cpu.is_r10000(): r = 10000
|
||||
elif cpu.is_r12000(): r = 12000
|
||||
elif cpu.is_r8000(): r = 8000
|
||||
elif cpu.is_r5000(): r = 5000
|
||||
elif cpu.is_r4000(): r = 4000
|
||||
if r is not None:
|
||||
return ['r%s' % (r)]
|
||||
return []
|
||||
def get_flags_arch_f90(self):
|
||||
r = self.get_flags_arch_f77()
|
||||
if r:
|
||||
r[0] = '-' + r[0]
|
||||
return r
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='mips').get_version())
|
87
.venv/Lib/site-packages/numpy/distutils/fcompiler/nag.py
Normal file
87
.venv/Lib/site-packages/numpy/distutils/fcompiler/nag.py
Normal file
@ -0,0 +1,87 @@
|
||||
import sys
|
||||
import re
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['NAGFCompiler', 'NAGFORCompiler']
|
||||
|
||||
class BaseNAGFCompiler(FCompiler):
|
||||
version_pattern = r'NAG.* Release (?P<version>[^(\s]*)'
|
||||
|
||||
def version_match(self, version_string):
|
||||
m = re.search(self.version_pattern, version_string)
|
||||
if m:
|
||||
return m.group('version')
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
return ["-Wl,-shared"]
|
||||
def get_flags_opt(self):
|
||||
return ['-O4']
|
||||
def get_flags_arch(self):
|
||||
return []
|
||||
|
||||
class NAGFCompiler(BaseNAGFCompiler):
|
||||
|
||||
compiler_type = 'nag'
|
||||
description = 'NAGWare Fortran 95 Compiler'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F90>", "-V"],
|
||||
'compiler_f77' : ["f95", "-fixed"],
|
||||
'compiler_fix' : ["f95", "-fixed"],
|
||||
'compiler_f90' : ["f95"],
|
||||
'linker_so' : ["<F90>"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
if sys.platform == 'darwin':
|
||||
return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
|
||||
return BaseNAGFCompiler.get_flags_linker_so(self)
|
||||
def get_flags_arch(self):
|
||||
version = self.get_version()
|
||||
if version and version < '5.1':
|
||||
return ['-target=native']
|
||||
else:
|
||||
return BaseNAGFCompiler.get_flags_arch(self)
|
||||
def get_flags_debug(self):
|
||||
return ['-g', '-gline', '-g90', '-nan', '-C']
|
||||
|
||||
class NAGFORCompiler(BaseNAGFCompiler):
|
||||
|
||||
compiler_type = 'nagfor'
|
||||
description = 'NAG Fortran Compiler'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["nagfor", "-V"],
|
||||
'compiler_f77' : ["nagfor", "-fixed"],
|
||||
'compiler_fix' : ["nagfor", "-fixed"],
|
||||
'compiler_f90' : ["nagfor"],
|
||||
'linker_so' : ["nagfor"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
if sys.platform == 'darwin':
|
||||
return ['-unsharedrts',
|
||||
'-Wl,-bundle,-flat_namespace,-undefined,suppress']
|
||||
return BaseNAGFCompiler.get_flags_linker_so(self)
|
||||
def get_flags_debug(self):
|
||||
version = self.get_version()
|
||||
if version and version > '6.1':
|
||||
return ['-g', '-u', '-nan', '-C=all', '-thread_safe',
|
||||
'-kind=unique', '-Warn=allocation', '-Warn=subnormal']
|
||||
else:
|
||||
return ['-g', '-nan', '-C=all', '-u', '-thread_safe']
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
compiler = customized_fcompiler(compiler='nagfor')
|
||||
print(compiler.get_version())
|
||||
print(compiler.get_flags_debug())
|
28
.venv/Lib/site-packages/numpy/distutils/fcompiler/none.py
Normal file
28
.venv/Lib/site-packages/numpy/distutils/fcompiler/none.py
Normal file
@ -0,0 +1,28 @@
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
from numpy.distutils import customized_fcompiler
|
||||
|
||||
compilers = ['NoneFCompiler']
|
||||
|
||||
class NoneFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'none'
|
||||
description = 'Fake Fortran compiler'
|
||||
|
||||
executables = {'compiler_f77': None,
|
||||
'compiler_f90': None,
|
||||
'compiler_fix': None,
|
||||
'linker_so': None,
|
||||
'linker_exe': None,
|
||||
'archiver': None,
|
||||
'ranlib': None,
|
||||
'version_cmd': None,
|
||||
}
|
||||
|
||||
def find_executables(self):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
print(customized_fcompiler(compiler='none').get_version())
|
53
.venv/Lib/site-packages/numpy/distutils/fcompiler/nv.py
Normal file
53
.venv/Lib/site-packages/numpy/distutils/fcompiler/nv.py
Normal file
@ -0,0 +1,53 @@
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['NVHPCFCompiler']
|
||||
|
||||
class NVHPCFCompiler(FCompiler):
|
||||
""" NVIDIA High Performance Computing (HPC) SDK Fortran Compiler
|
||||
|
||||
https://developer.nvidia.com/hpc-sdk
|
||||
|
||||
Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers,
|
||||
https://www.pgroup.com/index.htm.
|
||||
See also `numpy.distutils.fcompiler.pg`.
|
||||
"""
|
||||
|
||||
compiler_type = 'nv'
|
||||
description = 'NVIDIA HPC SDK'
|
||||
version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P<version>[\d.-]+).*'
|
||||
|
||||
executables = {
|
||||
'version_cmd': ["<F90>", "-V"],
|
||||
'compiler_f77': ["nvfortran"],
|
||||
'compiler_fix': ["nvfortran", "-Mfixed"],
|
||||
'compiler_f90': ["nvfortran"],
|
||||
'linker_so': ["<F90>"],
|
||||
'archiver': ["ar", "-cr"],
|
||||
'ranlib': ["ranlib"]
|
||||
}
|
||||
pic_flags = ['-fpic']
|
||||
|
||||
module_dir_switch = '-module '
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags(self):
|
||||
opt = ['-Minform=inform', '-Mnosecond_underscore']
|
||||
return self.pic_flags + opt
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-fast']
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
def get_flags_linker_so(self):
|
||||
return ["-shared", '-fpic']
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
return '-R%s' % dir
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='nv').get_version())
|
33
.venv/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py
Normal file
33
.venv/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py
Normal file
@ -0,0 +1,33 @@
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['PathScaleFCompiler']
|
||||
|
||||
class PathScaleFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'pathf95'
|
||||
description = 'PathScale Fortran Compiler'
|
||||
version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["pathf95", "-version"],
|
||||
'compiler_f77' : ["pathf95", "-fixedform"],
|
||||
'compiler_fix' : ["pathf95", "-fixedform"],
|
||||
'compiler_f90' : ["pathf95"],
|
||||
'linker_so' : ["pathf95", "-shared"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
pic_flags = ['-fPIC']
|
||||
module_dir_switch = '-module ' # Don't remove ending space!
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='pathf95').get_version())
|
128
.venv/Lib/site-packages/numpy/distutils/fcompiler/pg.py
Normal file
128
.venv/Lib/site-packages/numpy/distutils/fcompiler/pg.py
Normal file
@ -0,0 +1,128 @@
|
||||
# http://www.pgroup.com
|
||||
import sys
|
||||
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
from sys import platform
|
||||
from os.path import join, dirname, normpath
|
||||
|
||||
compilers = ['PGroupFCompiler', 'PGroupFlangCompiler']
|
||||
|
||||
|
||||
class PGroupFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'pg'
|
||||
description = 'Portland Group Fortran Compiler'
|
||||
version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
|
||||
|
||||
if platform == 'darwin':
|
||||
executables = {
|
||||
'version_cmd': ["<F77>", "-V"],
|
||||
'compiler_f77': ["pgfortran", "-dynamiclib"],
|
||||
'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
|
||||
'compiler_f90': ["pgfortran", "-dynamiclib"],
|
||||
'linker_so': ["libtool"],
|
||||
'archiver': ["ar", "-cr"],
|
||||
'ranlib': ["ranlib"]
|
||||
}
|
||||
pic_flags = ['']
|
||||
else:
|
||||
executables = {
|
||||
'version_cmd': ["<F77>", "-V"],
|
||||
'compiler_f77': ["pgfortran"],
|
||||
'compiler_fix': ["pgfortran", "-Mfixed"],
|
||||
'compiler_f90': ["pgfortran"],
|
||||
'linker_so': ["<F90>"],
|
||||
'archiver': ["ar", "-cr"],
|
||||
'ranlib': ["ranlib"]
|
||||
}
|
||||
pic_flags = ['-fpic']
|
||||
|
||||
module_dir_switch = '-module '
|
||||
module_include_switch = '-I'
|
||||
|
||||
def get_flags(self):
|
||||
opt = ['-Minform=inform', '-Mnosecond_underscore']
|
||||
return self.pic_flags + opt
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-fast']
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
if platform == 'darwin':
|
||||
def get_flags_linker_so(self):
|
||||
return ["-dynamic", '-undefined', 'dynamic_lookup']
|
||||
|
||||
else:
|
||||
def get_flags_linker_so(self):
|
||||
return ["-shared", '-fpic']
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
return '-R%s' % dir
|
||||
|
||||
|
||||
import functools
|
||||
|
||||
class PGroupFlangCompiler(FCompiler):
|
||||
compiler_type = 'flang'
|
||||
description = 'Portland Group Fortran LLVM Compiler'
|
||||
version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
|
||||
|
||||
ar_exe = 'lib.exe'
|
||||
possible_executables = ['flang']
|
||||
|
||||
executables = {
|
||||
'version_cmd': ["<F77>", "--version"],
|
||||
'compiler_f77': ["flang"],
|
||||
'compiler_fix': ["flang"],
|
||||
'compiler_f90': ["flang"],
|
||||
'linker_so': [None],
|
||||
'archiver': [ar_exe, "/verbose", "/OUT:"],
|
||||
'ranlib': None
|
||||
}
|
||||
|
||||
library_switch = '/OUT:' # No space after /OUT:!
|
||||
module_dir_switch = '-module ' # Don't remove ending space!
|
||||
|
||||
def get_libraries(self):
|
||||
opt = FCompiler.get_libraries(self)
|
||||
opt.extend(['flang', 'flangrti', 'ompstub'])
|
||||
return opt
|
||||
|
||||
@functools.lru_cache(maxsize=128)
|
||||
def get_library_dirs(self):
|
||||
"""List of compiler library directories."""
|
||||
opt = FCompiler.get_library_dirs(self)
|
||||
flang_dir = dirname(self.executables['compiler_f77'][0])
|
||||
opt.append(normpath(join(flang_dir, '..', 'lib')))
|
||||
|
||||
return opt
|
||||
|
||||
def get_flags(self):
|
||||
return []
|
||||
|
||||
def get_flags_free(self):
|
||||
return []
|
||||
|
||||
def get_flags_debug(self):
|
||||
return ['-g']
|
||||
|
||||
def get_flags_opt(self):
|
||||
return ['-O3']
|
||||
|
||||
def get_flags_arch(self):
|
||||
return []
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
if 'flang' in sys.argv:
|
||||
print(customized_fcompiler(compiler='flang').get_version())
|
||||
else:
|
||||
print(customized_fcompiler(compiler='pg').get_version())
|
51
.venv/Lib/site-packages/numpy/distutils/fcompiler/sun.py
Normal file
51
.venv/Lib/site-packages/numpy/distutils/fcompiler/sun.py
Normal file
@ -0,0 +1,51 @@
|
||||
from numpy.distutils.ccompiler import simple_version_match
|
||||
from numpy.distutils.fcompiler import FCompiler
|
||||
|
||||
compilers = ['SunFCompiler']
|
||||
|
||||
class SunFCompiler(FCompiler):
|
||||
|
||||
compiler_type = 'sun'
|
||||
description = 'Sun or Forte Fortran 95 Compiler'
|
||||
# ex:
|
||||
# f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28
|
||||
version_match = simple_version_match(
|
||||
start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95')
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["<F90>", "-V"],
|
||||
'compiler_f77' : ["f90"],
|
||||
'compiler_fix' : ["f90", "-fixed"],
|
||||
'compiler_f90' : ["f90"],
|
||||
'linker_so' : ["<F90>", "-Bdynamic", "-G"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
module_dir_switch = '-moddir='
|
||||
module_include_switch = '-M'
|
||||
pic_flags = ['-xcode=pic32']
|
||||
|
||||
def get_flags_f77(self):
|
||||
ret = ["-ftrap=%none"]
|
||||
if (self.get_version() or '') >= '7':
|
||||
ret.append("-f77")
|
||||
else:
|
||||
ret.append("-fixed")
|
||||
return ret
|
||||
def get_opt(self):
|
||||
return ['-fast', '-dalign']
|
||||
def get_arch(self):
|
||||
return ['-xtarget=generic']
|
||||
def get_libraries(self):
|
||||
opt = []
|
||||
opt.extend(['fsu', 'sunmath', 'mvec'])
|
||||
return opt
|
||||
|
||||
def runtime_library_dir_option(self, dir):
|
||||
return '-R%s' % dir
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='sun').get_version())
|
52
.venv/Lib/site-packages/numpy/distutils/fcompiler/vast.py
Normal file
52
.venv/Lib/site-packages/numpy/distutils/fcompiler/vast.py
Normal file
@ -0,0 +1,52 @@
|
||||
import os
|
||||
|
||||
from numpy.distutils.fcompiler.gnu import GnuFCompiler
|
||||
|
||||
compilers = ['VastFCompiler']
|
||||
|
||||
class VastFCompiler(GnuFCompiler):
|
||||
compiler_type = 'vast'
|
||||
compiler_aliases = ()
|
||||
description = 'Pacific-Sierra Research Fortran 90 Compiler'
|
||||
version_pattern = (r'\s*Pacific-Sierra Research vf90 '
|
||||
r'(Personal|Professional)\s+(?P<version>[^\s]*)')
|
||||
|
||||
# VAST f90 does not support -o with -c. So, object files are created
|
||||
# to the current directory and then moved to build directory
|
||||
object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
|
||||
|
||||
executables = {
|
||||
'version_cmd' : ["vf90", "-v"],
|
||||
'compiler_f77' : ["g77"],
|
||||
'compiler_fix' : ["f90", "-Wv,-ya"],
|
||||
'compiler_f90' : ["f90"],
|
||||
'linker_so' : ["<F90>"],
|
||||
'archiver' : ["ar", "-cr"],
|
||||
'ranlib' : ["ranlib"]
|
||||
}
|
||||
module_dir_switch = None #XXX Fix me
|
||||
module_include_switch = None #XXX Fix me
|
||||
|
||||
def find_executables(self):
|
||||
pass
|
||||
|
||||
def get_version_cmd(self):
|
||||
f90 = self.compiler_f90[0]
|
||||
d, b = os.path.split(f90)
|
||||
vf90 = os.path.join(d, 'v'+b)
|
||||
return vf90
|
||||
|
||||
def get_flags_arch(self):
|
||||
vast_version = self.get_version()
|
||||
gnu = GnuFCompiler()
|
||||
gnu.customize(None)
|
||||
self.version = gnu.get_version()
|
||||
opt = GnuFCompiler.get_flags_arch(self)
|
||||
self.version = vast_version
|
||||
return opt
|
||||
|
||||
if __name__ == '__main__':
|
||||
from distutils import log
|
||||
log.set_verbosity(2)
|
||||
from numpy.distutils import customized_fcompiler
|
||||
print(customized_fcompiler(compiler='vast').get_version())
|
261
.venv/Lib/site-packages/numpy/distutils/from_template.py
Normal file
261
.venv/Lib/site-packages/numpy/distutils/from_template.py
Normal file
@ -0,0 +1,261 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
|
||||
process_file(filename)
|
||||
|
||||
takes templated file .xxx.src and produces .xxx file where .xxx
|
||||
is .pyf .f90 or .f using the following template rules:
|
||||
|
||||
'<..>' denotes a template.
|
||||
|
||||
All function and subroutine blocks in a source file with names that
|
||||
contain '<..>' will be replicated according to the rules in '<..>'.
|
||||
|
||||
The number of comma-separated words in '<..>' will determine the number of
|
||||
replicates.
|
||||
|
||||
'<..>' may have two different forms, named and short. For example,
|
||||
|
||||
named:
|
||||
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
|
||||
'd', 's', 'z', and 'c' for each replicate of the block.
|
||||
|
||||
<_c> is already defined: <_c=s,d,c,z>
|
||||
<_t> is already defined: <_t=real,double precision,complex,double complex>
|
||||
|
||||
short:
|
||||
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
|
||||
a block.
|
||||
|
||||
In general, '<..>' contains a comma separated list of arbitrary
|
||||
expressions. If these expression must contain a comma|leftarrow|rightarrow,
|
||||
then prepend the comma|leftarrow|rightarrow with a backslash.
|
||||
|
||||
If an expression matches '\\<index>' then it will be replaced
|
||||
by <index>-th expression.
|
||||
|
||||
Note that all '<..>' forms in a block must have the same number of
|
||||
comma-separated entries.
|
||||
|
||||
Predefined named template rules:
|
||||
<prefix=s,d,c,z>
|
||||
<ftype=real,double precision,complex,double complex>
|
||||
<ftypereal=real,double precision,\\0,\\1>
|
||||
<ctype=float,double,complex_float,complex_double>
|
||||
<ctypereal=float,double,\\0,\\1>
|
||||
|
||||
"""
|
||||
__all__ = ['process_str', 'process_file']
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
|
||||
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
|
||||
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
|
||||
|
||||
def parse_structure(astr):
|
||||
""" Return a list of tuples for each function or subroutine each
|
||||
tuple is the start and end of a subroutine or function to be
|
||||
expanded.
|
||||
"""
|
||||
|
||||
spanlist = []
|
||||
ind = 0
|
||||
while True:
|
||||
m = routine_start_re.search(astr, ind)
|
||||
if m is None:
|
||||
break
|
||||
start = m.start()
|
||||
if function_start_re.match(astr, start, m.end()):
|
||||
while True:
|
||||
i = astr.rfind('\n', ind, start)
|
||||
if i==-1:
|
||||
break
|
||||
start = i
|
||||
if astr[i:i+7]!='\n $':
|
||||
break
|
||||
start += 1
|
||||
m = routine_end_re.search(astr, m.end())
|
||||
ind = end = m and m.end()-1 or len(astr)
|
||||
spanlist.append((start, end))
|
||||
return spanlist
|
||||
|
||||
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
|
||||
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
|
||||
list_re = re.compile(r"<\s*((.*?))\s*>")
|
||||
|
||||
def find_repl_patterns(astr):
|
||||
reps = named_re.findall(astr)
|
||||
names = {}
|
||||
for rep in reps:
|
||||
name = rep[0].strip() or unique_key(names)
|
||||
repl = rep[1].replace(r'\,', '@comma@')
|
||||
thelist = conv(repl)
|
||||
names[name] = thelist
|
||||
return names
|
||||
|
||||
def find_and_remove_repl_patterns(astr):
|
||||
names = find_repl_patterns(astr)
|
||||
astr = re.subn(named_re, '', astr)[0]
|
||||
return astr, names
|
||||
|
||||
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
|
||||
def conv(astr):
|
||||
b = astr.split(',')
|
||||
l = [x.strip() for x in b]
|
||||
for i in range(len(l)):
|
||||
m = item_re.match(l[i])
|
||||
if m:
|
||||
j = int(m.group('index'))
|
||||
l[i] = l[j]
|
||||
return ','.join(l)
|
||||
|
||||
def unique_key(adict):
|
||||
""" Obtain a unique key given a dictionary."""
|
||||
allkeys = list(adict.keys())
|
||||
done = False
|
||||
n = 1
|
||||
while not done:
|
||||
newkey = '__l%s' % (n)
|
||||
if newkey in allkeys:
|
||||
n += 1
|
||||
else:
|
||||
done = True
|
||||
return newkey
|
||||
|
||||
|
||||
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
|
||||
def expand_sub(substr, names):
|
||||
substr = substr.replace(r'\>', '@rightarrow@')
|
||||
substr = substr.replace(r'\<', '@leftarrow@')
|
||||
lnames = find_repl_patterns(substr)
|
||||
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
|
||||
|
||||
def listrepl(mobj):
|
||||
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
|
||||
if template_name_re.match(thelist):
|
||||
return "<%s>" % (thelist)
|
||||
name = None
|
||||
for key in lnames.keys(): # see if list is already in dictionary
|
||||
if lnames[key] == thelist:
|
||||
name = key
|
||||
if name is None: # this list is not in the dictionary yet
|
||||
name = unique_key(lnames)
|
||||
lnames[name] = thelist
|
||||
return "<%s>" % name
|
||||
|
||||
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
|
||||
# newnames are constructed as needed
|
||||
|
||||
numsubs = None
|
||||
base_rule = None
|
||||
rules = {}
|
||||
for r in template_re.findall(substr):
|
||||
if r not in rules:
|
||||
thelist = lnames.get(r, names.get(r, None))
|
||||
if thelist is None:
|
||||
raise ValueError('No replicates found for <%s>' % (r))
|
||||
if r not in names and not thelist.startswith('_'):
|
||||
names[r] = thelist
|
||||
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
|
||||
num = len(rule)
|
||||
|
||||
if numsubs is None:
|
||||
numsubs = num
|
||||
rules[r] = rule
|
||||
base_rule = r
|
||||
elif num == numsubs:
|
||||
rules[r] = rule
|
||||
else:
|
||||
print("Mismatch in number of replacements (base <%s=%s>)"
|
||||
" for <%s=%s>. Ignoring." %
|
||||
(base_rule, ','.join(rules[base_rule]), r, thelist))
|
||||
if not rules:
|
||||
return substr
|
||||
|
||||
def namerepl(mobj):
|
||||
name = mobj.group(1)
|
||||
return rules.get(name, (k+1)*[name])[k]
|
||||
|
||||
newstr = ''
|
||||
for k in range(numsubs):
|
||||
newstr += template_re.sub(namerepl, substr) + '\n\n'
|
||||
|
||||
newstr = newstr.replace('@rightarrow@', '>')
|
||||
newstr = newstr.replace('@leftarrow@', '<')
|
||||
return newstr
|
||||
|
||||
def process_str(allstr):
|
||||
newstr = allstr
|
||||
writestr = ''
|
||||
|
||||
struct = parse_structure(newstr)
|
||||
|
||||
oldend = 0
|
||||
names = {}
|
||||
names.update(_special_names)
|
||||
for sub in struct:
|
||||
cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
|
||||
writestr += cleanedstr
|
||||
names.update(defs)
|
||||
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
|
||||
oldend = sub[1]
|
||||
writestr += newstr[oldend:]
|
||||
|
||||
return writestr
|
||||
|
||||
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]", re.I)
|
||||
|
||||
def resolve_includes(source):
|
||||
d = os.path.dirname(source)
|
||||
with open(source) as fid:
|
||||
lines = []
|
||||
for line in fid:
|
||||
m = include_src_re.match(line)
|
||||
if m:
|
||||
fn = m.group('name')
|
||||
if not os.path.isabs(fn):
|
||||
fn = os.path.join(d, fn)
|
||||
if os.path.isfile(fn):
|
||||
lines.extend(resolve_includes(fn))
|
||||
else:
|
||||
lines.append(line)
|
||||
else:
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
def process_file(source):
|
||||
lines = resolve_includes(source)
|
||||
return process_str(''.join(lines))
|
||||
|
||||
_special_names = find_repl_patterns('''
|
||||
<_c=s,d,c,z>
|
||||
<_t=real,double precision,complex,double complex>
|
||||
<prefix=s,d,c,z>
|
||||
<ftype=real,double precision,complex,double complex>
|
||||
<ctype=float,double,complex_float,complex_double>
|
||||
<ftypereal=real,double precision,\\0,\\1>
|
||||
<ctypereal=float,double,\\0,\\1>
|
||||
''')
|
||||
|
||||
def main():
|
||||
try:
|
||||
file = sys.argv[1]
|
||||
except IndexError:
|
||||
fid = sys.stdin
|
||||
outfile = sys.stdout
|
||||
else:
|
||||
fid = open(file, 'r')
|
||||
(base, ext) = os.path.splitext(file)
|
||||
newname = base
|
||||
outfile = open(newname, 'w')
|
||||
|
||||
allstr = fid.read()
|
||||
writestr = process_str(allstr)
|
||||
outfile.write(writestr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
111
.venv/Lib/site-packages/numpy/distutils/intelccompiler.py
Normal file
111
.venv/Lib/site-packages/numpy/distutils/intelccompiler.py
Normal file
@ -0,0 +1,111 @@
|
||||
import platform
|
||||
|
||||
from distutils.unixccompiler import UnixCCompiler
|
||||
from numpy.distutils.exec_command import find_executable
|
||||
from numpy.distutils.ccompiler import simple_version_match
|
||||
if platform.system() == 'Windows':
|
||||
from numpy.distutils.msvc9compiler import MSVCCompiler
|
||||
|
||||
|
||||
class IntelCCompiler(UnixCCompiler):
|
||||
"""A modified Intel compiler compatible with a GCC-built Python."""
|
||||
compiler_type = 'intel'
|
||||
cc_exe = 'icc'
|
||||
cc_args = 'fPIC'
|
||||
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
UnixCCompiler.__init__(self, verbose, dry_run, force)
|
||||
|
||||
v = self.get_version()
|
||||
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
|
||||
self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
|
||||
'-fomit-frame-pointer -{}').format(mpopt)
|
||||
compiler = self.cc_exe
|
||||
|
||||
if platform.system() == 'Darwin':
|
||||
shared_flag = '-Wl,-undefined,dynamic_lookup'
|
||||
else:
|
||||
shared_flag = '-shared'
|
||||
self.set_executables(compiler=compiler,
|
||||
compiler_so=compiler,
|
||||
compiler_cxx=compiler,
|
||||
archiver='xiar' + ' cru',
|
||||
linker_exe=compiler + ' -shared-intel',
|
||||
linker_so=compiler + ' ' + shared_flag +
|
||||
' -shared-intel')
|
||||
|
||||
|
||||
class IntelItaniumCCompiler(IntelCCompiler):
|
||||
compiler_type = 'intele'
|
||||
|
||||
# On Itanium, the Intel Compiler used to be called ecc, let's search for
|
||||
# it (now it's also icc, so ecc is last in the search).
|
||||
for cc_exe in map(find_executable, ['icc', 'ecc']):
|
||||
if cc_exe:
|
||||
break
|
||||
|
||||
|
||||
class IntelEM64TCCompiler(UnixCCompiler):
|
||||
"""
|
||||
A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
|
||||
"""
|
||||
compiler_type = 'intelem'
|
||||
cc_exe = 'icc -m64'
|
||||
cc_args = '-fPIC'
|
||||
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
UnixCCompiler.__init__(self, verbose, dry_run, force)
|
||||
|
||||
v = self.get_version()
|
||||
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
|
||||
self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
|
||||
'-fomit-frame-pointer -{}').format(mpopt)
|
||||
compiler = self.cc_exe
|
||||
|
||||
if platform.system() == 'Darwin':
|
||||
shared_flag = '-Wl,-undefined,dynamic_lookup'
|
||||
else:
|
||||
shared_flag = '-shared'
|
||||
self.set_executables(compiler=compiler,
|
||||
compiler_so=compiler,
|
||||
compiler_cxx=compiler,
|
||||
archiver='xiar' + ' cru',
|
||||
linker_exe=compiler + ' -shared-intel',
|
||||
linker_so=compiler + ' ' + shared_flag +
|
||||
' -shared-intel')
|
||||
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
class IntelCCompilerW(MSVCCompiler):
|
||||
"""
|
||||
A modified Intel compiler compatible with an MSVC-built Python.
|
||||
"""
|
||||
compiler_type = 'intelw'
|
||||
compiler_cxx = 'icl'
|
||||
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
MSVCCompiler.__init__(self, verbose, dry_run, force)
|
||||
version_match = simple_version_match(start=r'Intel\(R\).*?32,')
|
||||
self.__version = version_match
|
||||
|
||||
def initialize(self, plat_name=None):
|
||||
MSVCCompiler.initialize(self, plat_name)
|
||||
self.cc = self.find_exe('icl.exe')
|
||||
self.lib = self.find_exe('xilib')
|
||||
self.linker = self.find_exe('xilink')
|
||||
self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
|
||||
'/Qstd=c99']
|
||||
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
|
||||
'/Qstd=c99', '/Z7', '/D_DEBUG']
|
||||
|
||||
class IntelEM64TCCompilerW(IntelCCompilerW):
|
||||
"""
|
||||
A modified Intel x86_64 compiler compatible with
|
||||
a 64bit MSVC-built Python.
|
||||
"""
|
||||
compiler_type = 'intelemw'
|
||||
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
MSVCCompiler.__init__(self, verbose, dry_run, force)
|
||||
version_match = simple_version_match(start=r'Intel\(R\).*?64,')
|
||||
self.__version = version_match
|
116
.venv/Lib/site-packages/numpy/distutils/lib2def.py
Normal file
116
.venv/Lib/site-packages/numpy/distutils/lib2def.py
Normal file
@ -0,0 +1,116 @@
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
__doc__ = """This module generates a DEF file from the symbols in
|
||||
an MSVC-compiled DLL import library. It correctly discriminates between
|
||||
data and functions. The data is collected from the output of the program
|
||||
nm(1).
|
||||
|
||||
Usage:
|
||||
python lib2def.py [libname.lib] [output.def]
|
||||
or
|
||||
python lib2def.py [libname.lib] > output.def
|
||||
|
||||
libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
|
||||
|
||||
Author: Robert Kern <kernr@mail.ncifcrf.gov>
|
||||
Last Update: April 30, 1999
|
||||
"""
|
||||
|
||||
__version__ = '0.1a'
|
||||
|
||||
py_ver = "%d%d" % tuple(sys.version_info[:2])
|
||||
|
||||
DEFAULT_NM = ['nm', '-Cs']
|
||||
|
||||
DEF_HEADER = """LIBRARY python%s.dll
|
||||
;CODE PRELOAD MOVEABLE DISCARDABLE
|
||||
;DATA PRELOAD SINGLE
|
||||
|
||||
EXPORTS
|
||||
""" % py_ver
|
||||
# the header of the DEF file
|
||||
|
||||
FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
|
||||
DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
|
||||
|
||||
def parse_cmd():
|
||||
"""Parses the command-line arguments.
|
||||
|
||||
libfile, deffile = parse_cmd()"""
|
||||
if len(sys.argv) == 3:
|
||||
if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
|
||||
libfile, deffile = sys.argv[1:]
|
||||
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
|
||||
deffile, libfile = sys.argv[1:]
|
||||
else:
|
||||
print("I'm assuming that your first argument is the library")
|
||||
print("and the second is the DEF file.")
|
||||
elif len(sys.argv) == 2:
|
||||
if sys.argv[1][-4:] == '.def':
|
||||
deffile = sys.argv[1]
|
||||
libfile = 'python%s.lib' % py_ver
|
||||
elif sys.argv[1][-4:] == '.lib':
|
||||
deffile = None
|
||||
libfile = sys.argv[1]
|
||||
else:
|
||||
libfile = 'python%s.lib' % py_ver
|
||||
deffile = None
|
||||
return libfile, deffile
|
||||
|
||||
def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
|
||||
"""Returns the output of nm_cmd via a pipe.
|
||||
|
||||
nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
|
||||
p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, universal_newlines=True)
|
||||
nm_output, nm_err = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise RuntimeError('failed to run "%s": "%s"' % (
|
||||
' '.join(nm_cmd), nm_err))
|
||||
return nm_output
|
||||
|
||||
def parse_nm(nm_output):
|
||||
"""Returns a tuple of lists: dlist for the list of data
|
||||
symbols and flist for the list of function symbols.
|
||||
|
||||
dlist, flist = parse_nm(nm_output)"""
|
||||
data = DATA_RE.findall(nm_output)
|
||||
func = FUNC_RE.findall(nm_output)
|
||||
|
||||
flist = []
|
||||
for sym in data:
|
||||
if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
|
||||
flist.append(sym)
|
||||
|
||||
dlist = []
|
||||
for sym in data:
|
||||
if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
|
||||
dlist.append(sym)
|
||||
|
||||
dlist.sort()
|
||||
flist.sort()
|
||||
return dlist, flist
|
||||
|
||||
def output_def(dlist, flist, header, file = sys.stdout):
|
||||
"""Outputs the final DEF file to a file defaulting to stdout.
|
||||
|
||||
output_def(dlist, flist, header, file = sys.stdout)"""
|
||||
for data_sym in dlist:
|
||||
header = header + '\t%s DATA\n' % data_sym
|
||||
header = header + '\n' # blank line
|
||||
for func_sym in flist:
|
||||
header = header + '\t%s\n' % func_sym
|
||||
file.write(header)
|
||||
|
||||
if __name__ == '__main__':
|
||||
libfile, deffile = parse_cmd()
|
||||
if deffile is None:
|
||||
deffile = sys.stdout
|
||||
else:
|
||||
deffile = open(deffile, 'w')
|
||||
nm_cmd = DEFAULT_NM + [str(libfile)]
|
||||
nm_output = getnm(nm_cmd, shell=False)
|
||||
dlist, flist = parse_nm(nm_output)
|
||||
output_def(dlist, flist, DEF_HEADER, deffile)
|
77
.venv/Lib/site-packages/numpy/distutils/line_endings.py
Normal file
77
.venv/Lib/site-packages/numpy/distutils/line_endings.py
Normal file
@ -0,0 +1,77 @@
|
||||
""" Functions for converting from DOS to UNIX line endings
|
||||
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def dos2unix(file):
|
||||
"Replace CRLF with LF in argument files. Print names of changed files."
|
||||
if os.path.isdir(file):
|
||||
print(file, "Directory!")
|
||||
return
|
||||
|
||||
with open(file, "rb") as fp:
|
||||
data = fp.read()
|
||||
if '\0' in data:
|
||||
print(file, "Binary!")
|
||||
return
|
||||
|
||||
newdata = re.sub("\r\n", "\n", data)
|
||||
if newdata != data:
|
||||
print('dos2unix:', file)
|
||||
with open(file, "wb") as f:
|
||||
f.write(newdata)
|
||||
return file
|
||||
else:
|
||||
print(file, 'ok')
|
||||
|
||||
def dos2unix_one_dir(modified_files, dir_name, file_names):
|
||||
for file in file_names:
|
||||
full_path = os.path.join(dir_name, file)
|
||||
file = dos2unix(full_path)
|
||||
if file is not None:
|
||||
modified_files.append(file)
|
||||
|
||||
def dos2unix_dir(dir_name):
|
||||
modified_files = []
|
||||
os.path.walk(dir_name, dos2unix_one_dir, modified_files)
|
||||
return modified_files
|
||||
#----------------------------------
|
||||
|
||||
def unix2dos(file):
|
||||
"Replace LF with CRLF in argument files. Print names of changed files."
|
||||
if os.path.isdir(file):
|
||||
print(file, "Directory!")
|
||||
return
|
||||
|
||||
with open(file, "rb") as fp:
|
||||
data = fp.read()
|
||||
if '\0' in data:
|
||||
print(file, "Binary!")
|
||||
return
|
||||
newdata = re.sub("\r\n", "\n", data)
|
||||
newdata = re.sub("\n", "\r\n", newdata)
|
||||
if newdata != data:
|
||||
print('unix2dos:', file)
|
||||
with open(file, "wb") as f:
|
||||
f.write(newdata)
|
||||
return file
|
||||
else:
|
||||
print(file, 'ok')
|
||||
|
||||
def unix2dos_one_dir(modified_files, dir_name, file_names):
|
||||
for file in file_names:
|
||||
full_path = os.path.join(dir_name, file)
|
||||
unix2dos(full_path)
|
||||
if file is not None:
|
||||
modified_files.append(file)
|
||||
|
||||
def unix2dos_dir(dir_name):
|
||||
modified_files = []
|
||||
os.path.walk(dir_name, unix2dos_one_dir, modified_files)
|
||||
return modified_files
|
||||
|
||||
if __name__ == "__main__":
|
||||
dos2unix_dir(sys.argv[1])
|
111
.venv/Lib/site-packages/numpy/distutils/log.py
Normal file
111
.venv/Lib/site-packages/numpy/distutils/log.py
Normal file
@ -0,0 +1,111 @@
|
||||
# Colored log
|
||||
import sys
|
||||
from distutils.log import * # noqa: F403
|
||||
from distutils.log import Log as old_Log
|
||||
from distutils.log import _global_log
|
||||
|
||||
from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
|
||||
green_text, is_sequence, is_string)
|
||||
|
||||
|
||||
def _fix_args(args,flag=1):
|
||||
if is_string(args):
|
||||
return args.replace('%', '%%')
|
||||
if flag and is_sequence(args):
|
||||
return tuple([_fix_args(a, flag=0) for a in args])
|
||||
return args
|
||||
|
||||
|
||||
class Log(old_Log):
|
||||
def _log(self, level, msg, args):
|
||||
if level >= self.threshold:
|
||||
if args:
|
||||
msg = msg % _fix_args(args)
|
||||
if 0:
|
||||
if msg.startswith('copying ') and msg.find(' -> ') != -1:
|
||||
return
|
||||
if msg.startswith('byte-compiling '):
|
||||
return
|
||||
print(_global_color_map[level](msg))
|
||||
sys.stdout.flush()
|
||||
|
||||
def good(self, msg, *args):
|
||||
"""
|
||||
If we log WARN messages, log this message as a 'nice' anti-warn
|
||||
message.
|
||||
|
||||
"""
|
||||
if WARN >= self.threshold:
|
||||
if args:
|
||||
print(green_text(msg % _fix_args(args)))
|
||||
else:
|
||||
print(green_text(msg))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
_global_log.__class__ = Log
|
||||
|
||||
good = _global_log.good
|
||||
|
||||
def set_threshold(level, force=False):
|
||||
prev_level = _global_log.threshold
|
||||
if prev_level > DEBUG or force:
|
||||
# If we're running at DEBUG, don't change the threshold, as there's
|
||||
# likely a good reason why we're running at this level.
|
||||
_global_log.threshold = level
|
||||
if level <= DEBUG:
|
||||
info('set_threshold: setting threshold to DEBUG level,'
|
||||
' it can be changed only with force argument')
|
||||
else:
|
||||
info('set_threshold: not changing threshold from DEBUG level'
|
||||
' %s to %s' % (prev_level, level))
|
||||
return prev_level
|
||||
|
||||
def get_threshold():
|
||||
return _global_log.threshold
|
||||
|
||||
def set_verbosity(v, force=False):
|
||||
prev_level = _global_log.threshold
|
||||
if v < 0:
|
||||
set_threshold(ERROR, force)
|
||||
elif v == 0:
|
||||
set_threshold(WARN, force)
|
||||
elif v == 1:
|
||||
set_threshold(INFO, force)
|
||||
elif v >= 2:
|
||||
set_threshold(DEBUG, force)
|
||||
return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1)
|
||||
|
||||
|
||||
_global_color_map = {
|
||||
DEBUG:cyan_text,
|
||||
INFO:default_text,
|
||||
WARN:red_text,
|
||||
ERROR:red_text,
|
||||
FATAL:red_text
|
||||
}
|
||||
|
||||
# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.
|
||||
set_verbosity(0, force=True)
|
||||
|
||||
|
||||
_error = error
|
||||
_warn = warn
|
||||
_info = info
|
||||
_debug = debug
|
||||
|
||||
|
||||
def error(msg, *a, **kw):
|
||||
_error(f"ERROR: {msg}", *a, **kw)
|
||||
|
||||
|
||||
def warn(msg, *a, **kw):
|
||||
_warn(f"WARN: {msg}", *a, **kw)
|
||||
|
||||
|
||||
def info(msg, *a, **kw):
|
||||
_info(f"INFO: {msg}", *a, **kw)
|
||||
|
||||
|
||||
def debug(msg, *a, **kw):
|
||||
_debug(f"DEBUG: {msg}", *a, **kw)
|
@ -0,0 +1,6 @@
|
||||
int _get_output_format(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int _imp____lc_codepage = 0;
|
598
.venv/Lib/site-packages/numpy/distutils/mingw32ccompiler.py
Normal file
598
.venv/Lib/site-packages/numpy/distutils/mingw32ccompiler.py
Normal file
@ -0,0 +1,598 @@
|
||||
"""
|
||||
Support code for building Python extensions on Windows.
|
||||
|
||||
# NT stuff
|
||||
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
|
||||
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
|
||||
# 3. Force windows to use g77
|
||||
|
||||
"""
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import subprocess
|
||||
import re
|
||||
import textwrap
|
||||
|
||||
# Overwrite certain distutils.ccompiler functions:
|
||||
import numpy.distutils.ccompiler # noqa: F401
|
||||
from numpy.distutils import log
|
||||
# NT stuff
|
||||
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
|
||||
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
|
||||
# --> this is done in numpy/distutils/ccompiler.py
|
||||
# 3. Force windows to use g77
|
||||
|
||||
import distutils.cygwinccompiler
|
||||
from distutils.unixccompiler import UnixCCompiler
|
||||
from distutils.msvccompiler import get_build_version as get_build_msvc_version
|
||||
from distutils.errors import UnknownFileError
|
||||
from numpy.distutils.misc_util import (msvc_runtime_library,
|
||||
msvc_runtime_version,
|
||||
msvc_runtime_major,
|
||||
get_build_architecture)
|
||||
|
||||
def get_msvcr_replacement():
|
||||
"""Replacement for outdated version of get_msvcr from cygwinccompiler"""
|
||||
msvcr = msvc_runtime_library()
|
||||
return [] if msvcr is None else [msvcr]
|
||||
|
||||
# monkey-patch cygwinccompiler with our updated version from misc_util
|
||||
# to avoid getting an exception raised on Python 3.5
|
||||
distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement
|
||||
|
||||
# Useful to generate table of symbols from a dll
|
||||
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
|
||||
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
|
||||
|
||||
# the same as cygwin plus some additional parameters
|
||||
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
|
||||
""" A modified MingW32 compiler compatible with an MSVC built Python.
|
||||
|
||||
"""
|
||||
|
||||
compiler_type = 'mingw32'
|
||||
|
||||
def __init__ (self,
|
||||
verbose=0,
|
||||
dry_run=0,
|
||||
force=0):
|
||||
|
||||
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
|
||||
dry_run, force)
|
||||
|
||||
# **changes: eric jones 4/11/01
|
||||
# 1. Check for import library on Windows. Build if it doesn't exist.
|
||||
|
||||
build_import_library()
|
||||
|
||||
# Check for custom msvc runtime library on Windows. Build if it doesn't exist.
|
||||
msvcr_success = build_msvcr_library()
|
||||
msvcr_dbg_success = build_msvcr_library(debug=True)
|
||||
if msvcr_success or msvcr_dbg_success:
|
||||
# add preprocessor statement for using customized msvcr lib
|
||||
self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
|
||||
|
||||
# Define the MSVC version as hint for MinGW
|
||||
msvcr_version = msvc_runtime_version()
|
||||
if msvcr_version:
|
||||
self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)
|
||||
|
||||
# MS_WIN64 should be defined when building for amd64 on windows,
|
||||
# but python headers define it only for MS compilers, which has all
|
||||
# kind of bad consequences, like using Py_ModuleInit4 instead of
|
||||
# Py_ModuleInit4_64, etc... So we add it here
|
||||
if get_build_architecture() == 'AMD64':
|
||||
self.set_executables(
|
||||
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
|
||||
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall '
|
||||
'-Wstrict-prototypes',
|
||||
linker_exe='gcc -g',
|
||||
linker_so='gcc -g -shared')
|
||||
else:
|
||||
self.set_executables(
|
||||
compiler='gcc -O2 -Wall',
|
||||
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
|
||||
linker_exe='g++ ',
|
||||
linker_so='g++ -shared')
|
||||
# added for python2.3 support
|
||||
# we can't pass it through set_executables because pre 2.2 would fail
|
||||
self.compiler_cxx = ['g++']
|
||||
|
||||
# Maybe we should also append -mthreads, but then the finished dlls
|
||||
# need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
|
||||
# thread-safe exception handling on `Mingw32')
|
||||
|
||||
# no additional libraries needed
|
||||
#self.dll_libraries=[]
|
||||
return
|
||||
|
||||
# __init__ ()
|
||||
|
||||
def link(self,
|
||||
target_desc,
|
||||
objects,
|
||||
output_filename,
|
||||
output_dir,
|
||||
libraries,
|
||||
library_dirs,
|
||||
runtime_library_dirs,
|
||||
export_symbols = None,
|
||||
debug=0,
|
||||
extra_preargs=None,
|
||||
extra_postargs=None,
|
||||
build_temp=None,
|
||||
target_lang=None):
|
||||
# Include the appropriate MSVC runtime library if Python was built
|
||||
# with MSVC >= 7.0 (MinGW standard is msvcrt)
|
||||
runtime_library = msvc_runtime_library()
|
||||
if runtime_library:
|
||||
if not libraries:
|
||||
libraries = []
|
||||
libraries.append(runtime_library)
|
||||
args = (self,
|
||||
target_desc,
|
||||
objects,
|
||||
output_filename,
|
||||
output_dir,
|
||||
libraries,
|
||||
library_dirs,
|
||||
runtime_library_dirs,
|
||||
None, #export_symbols, we do this in our def-file
|
||||
debug,
|
||||
extra_preargs,
|
||||
extra_postargs,
|
||||
build_temp,
|
||||
target_lang)
|
||||
func = UnixCCompiler.link
|
||||
func(*args[:func.__code__.co_argcount])
|
||||
return
|
||||
|
||||
def object_filenames (self,
|
||||
source_filenames,
|
||||
strip_dir=0,
|
||||
output_dir=''):
|
||||
if output_dir is None: output_dir = ''
|
||||
obj_names = []
|
||||
for src_name in source_filenames:
|
||||
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
|
||||
(base, ext) = os.path.splitext (os.path.normcase(src_name))
|
||||
|
||||
# added these lines to strip off windows drive letters
|
||||
# without it, .o files are placed next to .c files
|
||||
# instead of the build directory
|
||||
drv, base = os.path.splitdrive(base)
|
||||
if drv:
|
||||
base = base[1:]
|
||||
|
||||
if ext not in (self.src_extensions + ['.rc', '.res']):
|
||||
raise UnknownFileError(
|
||||
"unknown file type '%s' (from '%s')" % \
|
||||
(ext, src_name))
|
||||
if strip_dir:
|
||||
base = os.path.basename (base)
|
||||
if ext == '.res' or ext == '.rc':
|
||||
# these need to be compiled to object files
|
||||
obj_names.append (os.path.join (output_dir,
|
||||
base + ext + self.obj_extension))
|
||||
else:
|
||||
obj_names.append (os.path.join (output_dir,
|
||||
base + self.obj_extension))
|
||||
return obj_names
|
||||
|
||||
# object_filenames ()
|
||||
|
||||
|
||||
def find_python_dll():
|
||||
# We can't do much here:
|
||||
# - find it in the virtualenv (sys.prefix)
|
||||
# - find it in python main dir (sys.base_prefix, if in a virtualenv)
|
||||
# - sys.real_prefix is main dir for virtualenvs in Python 2.7
|
||||
# - in system32,
|
||||
# - ortherwise (Sxs), I don't know how to get it.
|
||||
stems = [sys.prefix]
|
||||
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
|
||||
stems.append(sys.base_prefix)
|
||||
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
|
||||
stems.append(sys.real_prefix)
|
||||
|
||||
sub_dirs = ['', 'lib', 'bin']
|
||||
# generate possible combinations of directory trees and sub-directories
|
||||
lib_dirs = []
|
||||
for stem in stems:
|
||||
for folder in sub_dirs:
|
||||
lib_dirs.append(os.path.join(stem, folder))
|
||||
|
||||
# add system directory as well
|
||||
if 'SYSTEMROOT' in os.environ:
|
||||
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32'))
|
||||
|
||||
# search in the file system for possible candidates
|
||||
major_version, minor_version = tuple(sys.version_info[:2])
|
||||
implementation = platform.python_implementation()
|
||||
if implementation == 'CPython':
|
||||
dllname = f'python{major_version}{minor_version}.dll'
|
||||
elif implementation == 'PyPy':
|
||||
dllname = f'libpypy{major_version}-c.dll'
|
||||
else:
|
||||
dllname = 'Unknown platform {implementation}'
|
||||
print("Looking for %s" % dllname)
|
||||
for folder in lib_dirs:
|
||||
dll = os.path.join(folder, dllname)
|
||||
if os.path.exists(dll):
|
||||
return dll
|
||||
|
||||
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
|
||||
|
||||
def dump_table(dll):
|
||||
st = subprocess.check_output(["objdump.exe", "-p", dll])
|
||||
return st.split(b'\n')
|
||||
|
||||
def generate_def(dll, dfile):
|
||||
"""Given a dll file location, get all its exported symbols and dump them
|
||||
into the given def file.
|
||||
|
||||
The .def file will be overwritten"""
|
||||
dump = dump_table(dll)
|
||||
for i in range(len(dump)):
|
||||
if _START.match(dump[i].decode()):
|
||||
break
|
||||
else:
|
||||
raise ValueError("Symbol table not found")
|
||||
|
||||
syms = []
|
||||
for j in range(i+1, len(dump)):
|
||||
m = _TABLE.match(dump[j].decode())
|
||||
if m:
|
||||
syms.append((int(m.group(1).strip()), m.group(2)))
|
||||
else:
|
||||
break
|
||||
|
||||
if len(syms) == 0:
|
||||
log.warn('No symbols found in %s' % dll)
|
||||
|
||||
with open(dfile, 'w') as d:
|
||||
d.write('LIBRARY %s\n' % os.path.basename(dll))
|
||||
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
|
||||
d.write(';DATA PRELOAD SINGLE\n')
|
||||
d.write('\nEXPORTS\n')
|
||||
for s in syms:
|
||||
#d.write('@%d %s\n' % (s[0], s[1]))
|
||||
d.write('%s\n' % s[1])
|
||||
|
||||
def find_dll(dll_name):
|
||||
|
||||
arch = {'AMD64' : 'amd64',
|
||||
'Intel' : 'x86'}[get_build_architecture()]
|
||||
|
||||
def _find_dll_in_winsxs(dll_name):
|
||||
# Walk through the WinSxS directory to find the dll.
|
||||
winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'),
|
||||
'winsxs')
|
||||
if not os.path.exists(winsxs_path):
|
||||
return None
|
||||
for root, dirs, files in os.walk(winsxs_path):
|
||||
if dll_name in files and arch in root:
|
||||
return os.path.join(root, dll_name)
|
||||
return None
|
||||
|
||||
def _find_dll_in_path(dll_name):
|
||||
# First, look in the Python directory, then scan PATH for
|
||||
# the given dll name.
|
||||
for path in [sys.prefix] + os.environ['PATH'].split(';'):
|
||||
filepath = os.path.join(path, dll_name)
|
||||
if os.path.exists(filepath):
|
||||
return os.path.abspath(filepath)
|
||||
|
||||
return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
|
||||
|
||||
def build_msvcr_library(debug=False):
|
||||
if os.name != 'nt':
|
||||
return False
|
||||
|
||||
# If the version number is None, then we couldn't find the MSVC runtime at
|
||||
# all, because we are running on a Python distribution which is customed
|
||||
# compiled; trust that the compiler is the same as the one available to us
|
||||
# now, and that it is capable of linking with the correct runtime without
|
||||
# any extra options.
|
||||
msvcr_ver = msvc_runtime_major()
|
||||
if msvcr_ver is None:
|
||||
log.debug('Skip building import library: '
|
||||
'Runtime is not compiled with MSVC')
|
||||
return False
|
||||
|
||||
# Skip using a custom library for versions < MSVC 8.0
|
||||
if msvcr_ver < 80:
|
||||
log.debug('Skip building msvcr library:'
|
||||
' custom functionality not present')
|
||||
return False
|
||||
|
||||
msvcr_name = msvc_runtime_library()
|
||||
if debug:
|
||||
msvcr_name += 'd'
|
||||
|
||||
# Skip if custom library already exists
|
||||
out_name = "lib%s.a" % msvcr_name
|
||||
out_file = os.path.join(sys.prefix, 'libs', out_name)
|
||||
if os.path.isfile(out_file):
|
||||
log.debug('Skip building msvcr library: "%s" exists' %
|
||||
(out_file,))
|
||||
return True
|
||||
|
||||
# Find the msvcr dll
|
||||
msvcr_dll_name = msvcr_name + '.dll'
|
||||
dll_file = find_dll(msvcr_dll_name)
|
||||
if not dll_file:
|
||||
log.warn('Cannot build msvcr library: "%s" not found' %
|
||||
msvcr_dll_name)
|
||||
return False
|
||||
|
||||
def_name = "lib%s.def" % msvcr_name
|
||||
def_file = os.path.join(sys.prefix, 'libs', def_name)
|
||||
|
||||
log.info('Building msvcr library: "%s" (from %s)' \
|
||||
% (out_file, dll_file))
|
||||
|
||||
# Generate a symbol definition file from the msvcr dll
|
||||
generate_def(dll_file, def_file)
|
||||
|
||||
# Create a custom mingw library for the given symbol definitions
|
||||
cmd = ['dlltool', '-d', def_file, '-l', out_file]
|
||||
retcode = subprocess.call(cmd)
|
||||
|
||||
# Clean up symbol definitions
|
||||
os.remove(def_file)
|
||||
|
||||
return (not retcode)
|
||||
|
||||
def build_import_library():
|
||||
if os.name != 'nt':
|
||||
return
|
||||
|
||||
arch = get_build_architecture()
|
||||
if arch == 'AMD64':
|
||||
return _build_import_library_amd64()
|
||||
elif arch == 'Intel':
|
||||
return _build_import_library_x86()
|
||||
else:
|
||||
raise ValueError("Unhandled arch %s" % arch)
|
||||
|
||||
def _check_for_import_lib():
|
||||
"""Check if an import library for the Python runtime already exists."""
|
||||
major_version, minor_version = tuple(sys.version_info[:2])
|
||||
|
||||
# patterns for the file name of the library itself
|
||||
patterns = ['libpython%d%d.a',
|
||||
'libpython%d%d.dll.a',
|
||||
'libpython%d.%d.dll.a']
|
||||
|
||||
# directory trees that may contain the library
|
||||
stems = [sys.prefix]
|
||||
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
|
||||
stems.append(sys.base_prefix)
|
||||
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
|
||||
stems.append(sys.real_prefix)
|
||||
|
||||
# possible subdirectories within those trees where it is placed
|
||||
sub_dirs = ['libs', 'lib']
|
||||
|
||||
# generate a list of candidate locations
|
||||
candidates = []
|
||||
for pat in patterns:
|
||||
filename = pat % (major_version, minor_version)
|
||||
for stem_dir in stems:
|
||||
for folder in sub_dirs:
|
||||
candidates.append(os.path.join(stem_dir, folder, filename))
|
||||
|
||||
# test the filesystem to see if we can find any of these
|
||||
for fullname in candidates:
|
||||
if os.path.isfile(fullname):
|
||||
# already exists, in location given
|
||||
return (True, fullname)
|
||||
|
||||
# needs to be built, preferred location given first
|
||||
return (False, candidates[0])
|
||||
|
||||
def _build_import_library_amd64():
|
||||
out_exists, out_file = _check_for_import_lib()
|
||||
if out_exists:
|
||||
log.debug('Skip building import library: "%s" exists', out_file)
|
||||
return
|
||||
|
||||
# get the runtime dll for which we are building import library
|
||||
dll_file = find_python_dll()
|
||||
log.info('Building import library (arch=AMD64): "%s" (from %s)' %
|
||||
(out_file, dll_file))
|
||||
|
||||
# generate symbol list from this library
|
||||
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
|
||||
def_file = os.path.join(sys.prefix, 'libs', def_name)
|
||||
generate_def(dll_file, def_file)
|
||||
|
||||
# generate import library from this symbol list
|
||||
cmd = ['dlltool', '-d', def_file, '-l', out_file]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
def _build_import_library_x86():
|
||||
""" Build the import libraries for Mingw32-gcc on Windows
|
||||
"""
|
||||
out_exists, out_file = _check_for_import_lib()
|
||||
if out_exists:
|
||||
log.debug('Skip building import library: "%s" exists', out_file)
|
||||
return
|
||||
|
||||
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
|
||||
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
|
||||
if not os.path.isfile(lib_file):
|
||||
# didn't find library file in virtualenv, try base distribution, too,
|
||||
# and use that instead if found there. for Python 2.7 venvs, the base
|
||||
# directory is in attribute real_prefix instead of base_prefix.
|
||||
if hasattr(sys, 'base_prefix'):
|
||||
base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
|
||||
elif hasattr(sys, 'real_prefix'):
|
||||
base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
|
||||
else:
|
||||
base_lib = '' # os.path.isfile('') == False
|
||||
|
||||
if os.path.isfile(base_lib):
|
||||
lib_file = base_lib
|
||||
else:
|
||||
log.warn('Cannot build import library: "%s" not found', lib_file)
|
||||
return
|
||||
log.info('Building import library (ARCH=x86): "%s"', out_file)
|
||||
|
||||
from numpy.distutils import lib2def
|
||||
|
||||
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
|
||||
def_file = os.path.join(sys.prefix, 'libs', def_name)
|
||||
nm_output = lib2def.getnm(
|
||||
lib2def.DEFAULT_NM + [lib_file], shell=False)
|
||||
dlist, flist = lib2def.parse_nm(nm_output)
|
||||
with open(def_file, 'w') as fid:
|
||||
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
|
||||
|
||||
dll_name = find_python_dll ()
|
||||
|
||||
cmd = ["dlltool",
|
||||
"--dllname", dll_name,
|
||||
"--def", def_file,
|
||||
"--output-lib", out_file]
|
||||
status = subprocess.check_output(cmd)
|
||||
if status:
|
||||
log.warn('Failed to build import library for gcc. Linking will fail.')
|
||||
return
|
||||
|
||||
#=====================================
|
||||
# Dealing with Visual Studio MANIFESTS
|
||||
#=====================================
|
||||
|
||||
# Functions to deal with visual studio manifests. Manifest are a mechanism to
|
||||
# enforce strong DLL versioning on windows, and has nothing to do with
|
||||
# distutils MANIFEST. manifests are XML files with version info, and used by
|
||||
# the OS loader; they are necessary when linking against a DLL not in the
|
||||
# system path; in particular, official python 2.6 binary is built against the
|
||||
# MS runtime 9 (the one from VS 2008), which is not available on most windows
|
||||
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
|
||||
# directory, but this requires the manifest for this to work. This is a big
|
||||
# mess, thanks MS for a wonderful system.
|
||||
|
||||
# XXX: ideally, we should use exactly the same version as used by python. I
|
||||
# submitted a patch to get this version, but it was only included for python
|
||||
# 2.6.1 and above. So for versions below, we use a "best guess".
|
||||
_MSVCRVER_TO_FULLVER = {}
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
import msvcrt
|
||||
# I took one version in my SxS directory: no idea if it is the good
|
||||
# one, and we can't retrieve it from python
|
||||
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
|
||||
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
|
||||
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
|
||||
# on Windows XP:
|
||||
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
|
||||
crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None)
|
||||
if crt_ver is not None: # Available at least back to Python 3.3
|
||||
maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups()
|
||||
_MSVCRVER_TO_FULLVER[maj + min] = crt_ver
|
||||
del maj, min
|
||||
del crt_ver
|
||||
except ImportError:
|
||||
# If we are here, means python was not built with MSVC. Not sure what
|
||||
# to do in that case: manifest building will fail, but it should not be
|
||||
# used in that case anyway
|
||||
log.warn('Cannot import msvcrt: using manifest will not be possible')
|
||||
|
||||
def msvc_manifest_xml(maj, min):
|
||||
"""Given a major and minor version of the MSVCR, returns the
|
||||
corresponding XML file."""
|
||||
try:
|
||||
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
|
||||
except KeyError:
|
||||
raise ValueError("Version %d,%d of MSVCRT not supported yet" %
|
||||
(maj, min)) from None
|
||||
# Don't be fooled, it looks like an XML, but it is not. In particular, it
|
||||
# should not have any space before starting, and its size should be
|
||||
# divisible by 4, most likely for alignment constraints when the xml is
|
||||
# embedded in the binary...
|
||||
# This template was copied directly from the python 2.6 binary (using
|
||||
# strings.exe from mingw on python.exe).
|
||||
template = textwrap.dedent("""\
|
||||
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
|
||||
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
|
||||
<security>
|
||||
<requestedPrivileges>
|
||||
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
|
||||
</requestedPrivileges>
|
||||
</security>
|
||||
</trustInfo>
|
||||
<dependency>
|
||||
<dependentAssembly>
|
||||
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
|
||||
</dependentAssembly>
|
||||
</dependency>
|
||||
</assembly>""")
|
||||
|
||||
return template % {'fullver': fullver, 'maj': maj, 'min': min}
|
||||
|
||||
def manifest_rc(name, type='dll'):
|
||||
"""Return the rc file used to generate the res file which will be embedded
|
||||
as manifest for given manifest file name, of given type ('dll' or
|
||||
'exe').
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
name of the manifest file to embed
|
||||
type : str {'dll', 'exe'}
|
||||
type of the binary which will embed the manifest
|
||||
|
||||
"""
|
||||
if type == 'dll':
|
||||
rctype = 2
|
||||
elif type == 'exe':
|
||||
rctype = 1
|
||||
else:
|
||||
raise ValueError("Type %s not supported" % type)
|
||||
|
||||
return """\
|
||||
#include "winuser.h"
|
||||
%d RT_MANIFEST %s""" % (rctype, name)
|
||||
|
||||
def check_embedded_msvcr_match_linked(msver):
|
||||
"""msver is the ms runtime version used for the MANIFEST."""
|
||||
# check msvcr major version are the same for linking and
|
||||
# embedding
|
||||
maj = msvc_runtime_major()
|
||||
if maj:
|
||||
if not maj == int(msver):
|
||||
raise ValueError(
|
||||
"Discrepancy between linked msvcr " \
|
||||
"(%d) and the one about to be embedded " \
|
||||
"(%d)" % (int(msver), maj))
|
||||
|
||||
def configtest_name(config):
|
||||
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
|
||||
return os.path.splitext(base)[0]
|
||||
|
||||
def manifest_name(config):
|
||||
# Get configest name (including suffix)
|
||||
root = configtest_name(config)
|
||||
exext = config.compiler.exe_extension
|
||||
return root + exext + ".manifest"
|
||||
|
||||
def rc_name(config):
|
||||
# Get configtest name (including suffix)
|
||||
root = configtest_name(config)
|
||||
return root + ".rc"
|
||||
|
||||
def generate_manifest(config):
|
||||
msver = get_build_msvc_version()
|
||||
if msver is not None:
|
||||
if msver >= 8:
|
||||
check_embedded_msvcr_match_linked(msver)
|
||||
ma_str, mi_str = str(msver).split('.')
|
||||
# Write the manifest file
|
||||
manxml = msvc_manifest_xml(int(ma_str), int(mi_str))
|
||||
with open(manifest_name(config), "w") as man:
|
||||
config.temp_files.append(manifest_name(config))
|
||||
man.write(manxml)
|
2502
.venv/Lib/site-packages/numpy/distutils/misc_util.py
Normal file
2502
.venv/Lib/site-packages/numpy/distutils/misc_util.py
Normal file
File diff suppressed because it is too large
Load Diff
63
.venv/Lib/site-packages/numpy/distutils/msvc9compiler.py
Normal file
63
.venv/Lib/site-packages/numpy/distutils/msvc9compiler.py
Normal file
@ -0,0 +1,63 @@
|
||||
import os
|
||||
from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler
|
||||
|
||||
from .system_info import platform_bits
|
||||
|
||||
|
||||
def _merge(old, new):
|
||||
"""Concatenate two environment paths avoiding repeats.
|
||||
|
||||
Here `old` is the environment string before the base class initialize
|
||||
function is called and `new` is the string after the call. The new string
|
||||
will be a fixed string if it is not obtained from the current environment,
|
||||
or the same as the old string if obtained from the same environment. The aim
|
||||
here is not to append the new string if it is already contained in the old
|
||||
string so as to limit the growth of the environment string.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
old : string
|
||||
Previous environment string.
|
||||
new : string
|
||||
New environment string.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ret : string
|
||||
Updated environment string.
|
||||
|
||||
"""
|
||||
if not old:
|
||||
return new
|
||||
if new in old:
|
||||
return old
|
||||
|
||||
# Neither new nor old is empty. Give old priority.
|
||||
return ';'.join([old, new])
|
||||
|
||||
|
||||
class MSVCCompiler(_MSVCCompiler):
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
_MSVCCompiler.__init__(self, verbose, dry_run, force)
|
||||
|
||||
def initialize(self, plat_name=None):
|
||||
# The 'lib' and 'include' variables may be overwritten
|
||||
# by MSVCCompiler.initialize, so save them for later merge.
|
||||
environ_lib = os.getenv('lib')
|
||||
environ_include = os.getenv('include')
|
||||
_MSVCCompiler.initialize(self, plat_name)
|
||||
|
||||
# Merge current and previous values of 'lib' and 'include'
|
||||
os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
|
||||
os.environ['include'] = _merge(environ_include, os.environ['include'])
|
||||
|
||||
# msvc9 building for 32 bits requires SSE2 to work around a
|
||||
# compiler bug.
|
||||
if platform_bits == 32:
|
||||
self.compile_options += ['/arch:SSE2']
|
||||
self.compile_options_debug += ['/arch:SSE2']
|
||||
|
||||
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
|
||||
ld_args.append('/MANIFEST')
|
||||
_MSVCCompiler.manifest_setup_ldargs(self, output_filename,
|
||||
build_temp, ld_args)
|
58
.venv/Lib/site-packages/numpy/distutils/msvccompiler.py
Normal file
58
.venv/Lib/site-packages/numpy/distutils/msvccompiler.py
Normal file
@ -0,0 +1,58 @@
|
||||
import os
|
||||
from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler
|
||||
|
||||
from .system_info import platform_bits
|
||||
|
||||
|
||||
def _merge(old, new):
|
||||
"""Concatenate two environment paths avoiding repeats.
|
||||
|
||||
Here `old` is the environment string before the base class initialize
|
||||
function is called and `new` is the string after the call. The new string
|
||||
will be a fixed string if it is not obtained from the current environment,
|
||||
or the same as the old string if obtained from the same environment. The aim
|
||||
here is not to append the new string if it is already contained in the old
|
||||
string so as to limit the growth of the environment string.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
old : string
|
||||
Previous environment string.
|
||||
new : string
|
||||
New environment string.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ret : string
|
||||
Updated environment string.
|
||||
|
||||
"""
|
||||
if new in old:
|
||||
return old
|
||||
if not old:
|
||||
return new
|
||||
|
||||
# Neither new nor old is empty. Give old priority.
|
||||
return ';'.join([old, new])
|
||||
|
||||
|
||||
class MSVCCompiler(_MSVCCompiler):
|
||||
def __init__(self, verbose=0, dry_run=0, force=0):
|
||||
_MSVCCompiler.__init__(self, verbose, dry_run, force)
|
||||
|
||||
def initialize(self):
|
||||
# The 'lib' and 'include' variables may be overwritten
|
||||
# by MSVCCompiler.initialize, so save them for later merge.
|
||||
environ_lib = os.getenv('lib', '')
|
||||
environ_include = os.getenv('include', '')
|
||||
_MSVCCompiler.initialize(self)
|
||||
|
||||
# Merge current and previous values of 'lib' and 'include'
|
||||
os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
|
||||
os.environ['include'] = _merge(environ_include, os.environ['include'])
|
||||
|
||||
# msvc9 building for 32 bits requires SSE2 to work around a
|
||||
# compiler bug.
|
||||
if platform_bits == 32:
|
||||
self.compile_options += ['/arch:SSE2']
|
||||
self.compile_options_debug += ['/arch:SSE2']
|
437
.venv/Lib/site-packages/numpy/distutils/npy_pkg_config.py
Normal file
437
.venv/Lib/site-packages/numpy/distutils/npy_pkg_config.py
Normal file
@ -0,0 +1,437 @@
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
|
||||
from configparser import RawConfigParser
|
||||
|
||||
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
|
||||
'read_config', 'parse_flags']
|
||||
|
||||
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
|
||||
|
||||
class FormatError(OSError):
|
||||
"""
|
||||
Exception thrown when there is a problem parsing a configuration file.
|
||||
|
||||
"""
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
class PkgNotFound(OSError):
|
||||
"""Exception raised when a package can not be located."""
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
def parse_flags(line):
|
||||
"""
|
||||
Parse a line from a config file containing compile flags.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
A single line containing one or more compile flags.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : dict
|
||||
Dictionary of parsed flags, split into relevant categories.
|
||||
These categories are the keys of `d`:
|
||||
|
||||
* 'include_dirs'
|
||||
* 'library_dirs'
|
||||
* 'libraries'
|
||||
* 'macros'
|
||||
* 'ignored'
|
||||
|
||||
"""
|
||||
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
|
||||
'macros': [], 'ignored': []}
|
||||
|
||||
flags = (' ' + line).split(' -')
|
||||
for flag in flags:
|
||||
flag = '-' + flag
|
||||
if len(flag) > 0:
|
||||
if flag.startswith('-I'):
|
||||
d['include_dirs'].append(flag[2:].strip())
|
||||
elif flag.startswith('-L'):
|
||||
d['library_dirs'].append(flag[2:].strip())
|
||||
elif flag.startswith('-l'):
|
||||
d['libraries'].append(flag[2:].strip())
|
||||
elif flag.startswith('-D'):
|
||||
d['macros'].append(flag[2:].strip())
|
||||
else:
|
||||
d['ignored'].append(flag)
|
||||
|
||||
return d
|
||||
|
||||
def _escape_backslash(val):
|
||||
return val.replace('\\', '\\\\')
|
||||
|
||||
class LibraryInfo:
|
||||
"""
|
||||
Object containing build information about a library.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The library name.
|
||||
description : str
|
||||
Description of the library.
|
||||
version : str
|
||||
Version string.
|
||||
sections : dict
|
||||
The sections of the configuration file for the library. The keys are
|
||||
the section headers, the values the text under each header.
|
||||
vars : class instance
|
||||
A `VariableSet` instance, which contains ``(name, value)`` pairs for
|
||||
variables defined in the configuration file for the library.
|
||||
requires : sequence, optional
|
||||
The required libraries for the library to be installed.
|
||||
|
||||
Notes
|
||||
-----
|
||||
All input parameters (except "sections" which is a method) are available as
|
||||
attributes of the same name.
|
||||
|
||||
"""
|
||||
def __init__(self, name, description, version, sections, vars, requires=None):
|
||||
self.name = name
|
||||
self.description = description
|
||||
if requires:
|
||||
self.requires = requires
|
||||
else:
|
||||
self.requires = []
|
||||
self.version = version
|
||||
self._sections = sections
|
||||
self.vars = vars
|
||||
|
||||
def sections(self):
|
||||
"""
|
||||
Return the section headers of the config file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
keys : list of str
|
||||
The list of section headers.
|
||||
|
||||
"""
|
||||
return list(self._sections.keys())
|
||||
|
||||
def cflags(self, section="default"):
|
||||
val = self.vars.interpolate(self._sections[section]['cflags'])
|
||||
return _escape_backslash(val)
|
||||
|
||||
def libs(self, section="default"):
|
||||
val = self.vars.interpolate(self._sections[section]['libs'])
|
||||
return _escape_backslash(val)
|
||||
|
||||
def __str__(self):
|
||||
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
|
||||
if self.requires:
|
||||
m.append('Requires:')
|
||||
else:
|
||||
m.append('Requires: %s' % ",".join(self.requires))
|
||||
m.append('Version: %s' % self.version)
|
||||
|
||||
return "\n".join(m)
|
||||
|
||||
class VariableSet:
|
||||
"""
|
||||
Container object for the variables defined in a config file.
|
||||
|
||||
`VariableSet` can be used as a plain dictionary, with the variable names
|
||||
as keys.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
d : dict
|
||||
Dict of items in the "variables" section of the configuration file.
|
||||
|
||||
"""
|
||||
def __init__(self, d):
|
||||
self._raw_data = dict([(k, v) for k, v in d.items()])
|
||||
|
||||
self._re = {}
|
||||
self._re_sub = {}
|
||||
|
||||
self._init_parse()
|
||||
|
||||
def _init_parse(self):
|
||||
for k, v in self._raw_data.items():
|
||||
self._init_parse_var(k, v)
|
||||
|
||||
def _init_parse_var(self, name, value):
|
||||
self._re[name] = re.compile(r'\$\{%s\}' % name)
|
||||
self._re_sub[name] = value
|
||||
|
||||
def interpolate(self, value):
|
||||
# Brute force: we keep interpolating until there is no '${var}' anymore
|
||||
# or until interpolated string is equal to input string
|
||||
def _interpolate(value):
|
||||
for k in self._re.keys():
|
||||
value = self._re[k].sub(self._re_sub[k], value)
|
||||
return value
|
||||
while _VAR.search(value):
|
||||
nvalue = _interpolate(value)
|
||||
if nvalue == value:
|
||||
break
|
||||
value = nvalue
|
||||
|
||||
return value
|
||||
|
||||
def variables(self):
|
||||
"""
|
||||
Return the list of variable names.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
names : list of str
|
||||
The names of all variables in the `VariableSet` instance.
|
||||
|
||||
"""
|
||||
return list(self._raw_data.keys())
|
||||
|
||||
# Emulate a dict to set/get variables values
|
||||
def __getitem__(self, name):
|
||||
return self._raw_data[name]
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
self._raw_data[name] = value
|
||||
self._init_parse_var(name, value)
|
||||
|
||||
def parse_meta(config):
|
||||
if not config.has_section('meta'):
|
||||
raise FormatError("No meta section found !")
|
||||
|
||||
d = dict(config.items('meta'))
|
||||
|
||||
for k in ['name', 'description', 'version']:
|
||||
if not k in d:
|
||||
raise FormatError("Option %s (section [meta]) is mandatory, "
|
||||
"but not found" % k)
|
||||
|
||||
if not 'requires' in d:
|
||||
d['requires'] = []
|
||||
|
||||
return d
|
||||
|
||||
def parse_variables(config):
|
||||
if not config.has_section('variables'):
|
||||
raise FormatError("No variables section found !")
|
||||
|
||||
d = {}
|
||||
|
||||
for name, value in config.items("variables"):
|
||||
d[name] = value
|
||||
|
||||
return VariableSet(d)
|
||||
|
||||
def parse_sections(config):
|
||||
return meta_d, r
|
||||
|
||||
def pkg_to_filename(pkg_name):
|
||||
return "%s.ini" % pkg_name
|
||||
|
||||
def parse_config(filename, dirs=None):
|
||||
if dirs:
|
||||
filenames = [os.path.join(d, filename) for d in dirs]
|
||||
else:
|
||||
filenames = [filename]
|
||||
|
||||
config = RawConfigParser()
|
||||
|
||||
n = config.read(filenames)
|
||||
if not len(n) >= 1:
|
||||
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
|
||||
|
||||
# Parse meta and variables sections
|
||||
meta = parse_meta(config)
|
||||
|
||||
vars = {}
|
||||
if config.has_section('variables'):
|
||||
for name, value in config.items("variables"):
|
||||
vars[name] = _escape_backslash(value)
|
||||
|
||||
# Parse "normal" sections
|
||||
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
|
||||
sections = {}
|
||||
|
||||
requires = {}
|
||||
for s in secs:
|
||||
d = {}
|
||||
if config.has_option(s, "requires"):
|
||||
requires[s] = config.get(s, 'requires')
|
||||
|
||||
for name, value in config.items(s):
|
||||
d[name] = value
|
||||
sections[s] = d
|
||||
|
||||
return meta, vars, sections, requires
|
||||
|
||||
def _read_config_imp(filenames, dirs=None):
|
||||
def _read_config(f):
|
||||
meta, vars, sections, reqs = parse_config(f, dirs)
|
||||
# recursively add sections and variables of required libraries
|
||||
for rname, rvalue in reqs.items():
|
||||
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
|
||||
|
||||
# Update var dict for variables not in 'top' config file
|
||||
for k, v in nvars.items():
|
||||
if not k in vars:
|
||||
vars[k] = v
|
||||
|
||||
# Update sec dict
|
||||
for oname, ovalue in nsections[rname].items():
|
||||
if ovalue:
|
||||
sections[rname][oname] += ' %s' % ovalue
|
||||
|
||||
return meta, vars, sections, reqs
|
||||
|
||||
meta, vars, sections, reqs = _read_config(filenames)
|
||||
|
||||
# FIXME: document this. If pkgname is defined in the variables section, and
|
||||
# there is no pkgdir variable defined, pkgdir is automatically defined to
|
||||
# the path of pkgname. This requires the package to be imported to work
|
||||
if not 'pkgdir' in vars and "pkgname" in vars:
|
||||
pkgname = vars["pkgname"]
|
||||
if not pkgname in sys.modules:
|
||||
raise ValueError("You should import %s to get information on %s" %
|
||||
(pkgname, meta["name"]))
|
||||
|
||||
mod = sys.modules[pkgname]
|
||||
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
|
||||
|
||||
return LibraryInfo(name=meta["name"], description=meta["description"],
|
||||
version=meta["version"], sections=sections, vars=VariableSet(vars))
|
||||
|
||||
# Trivial cache to cache LibraryInfo instances creation. To be really
|
||||
# efficient, the cache should be handled in read_config, since a same file can
|
||||
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
|
||||
# problem in practice
|
||||
_CACHE = {}
|
||||
def read_config(pkgname, dirs=None):
|
||||
"""
|
||||
Return library info for a package from its configuration file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pkgname : str
|
||||
Name of the package (should match the name of the .ini file, without
|
||||
the extension, e.g. foo for the file foo.ini).
|
||||
dirs : sequence, optional
|
||||
If given, should be a sequence of directories - usually including
|
||||
the NumPy base directory - where to look for npy-pkg-config files.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pkginfo : class instance
|
||||
The `LibraryInfo` instance containing the build information.
|
||||
|
||||
Raises
|
||||
------
|
||||
PkgNotFound
|
||||
If the package is not found.
|
||||
|
||||
See Also
|
||||
--------
|
||||
misc_util.get_info, misc_util.get_pkg_info
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
|
||||
>>> type(npymath_info)
|
||||
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
|
||||
>>> print(npymath_info)
|
||||
Name: npymath
|
||||
Description: Portable, core math library implementing C99 standard
|
||||
Requires:
|
||||
Version: 0.1 #random
|
||||
|
||||
"""
|
||||
try:
|
||||
return _CACHE[pkgname]
|
||||
except KeyError:
|
||||
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
|
||||
_CACHE[pkgname] = v
|
||||
return v
|
||||
|
||||
# TODO:
|
||||
# - implements version comparison (modversion + atleast)
|
||||
|
||||
# pkg-config simple emulator - useful for debugging, and maybe later to query
|
||||
# the system
|
||||
if __name__ == '__main__':
|
||||
from optparse import OptionParser
|
||||
import glob
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("--cflags", dest="cflags", action="store_true",
|
||||
help="output all preprocessor and compiler flags")
|
||||
parser.add_option("--libs", dest="libs", action="store_true",
|
||||
help="output all linker flags")
|
||||
parser.add_option("--use-section", dest="section",
|
||||
help="use this section instead of default for options")
|
||||
parser.add_option("--version", dest="version", action="store_true",
|
||||
help="output version")
|
||||
parser.add_option("--atleast-version", dest="min_version",
|
||||
help="Minimal version")
|
||||
parser.add_option("--list-all", dest="list_all", action="store_true",
|
||||
help="Minimal version")
|
||||
parser.add_option("--define-variable", dest="define_variable",
|
||||
help="Replace variable with the given value")
|
||||
|
||||
(options, args) = parser.parse_args(sys.argv)
|
||||
|
||||
if len(args) < 2:
|
||||
raise ValueError("Expect package name on the command line:")
|
||||
|
||||
if options.list_all:
|
||||
files = glob.glob("*.ini")
|
||||
for f in files:
|
||||
info = read_config(f)
|
||||
print("%s\t%s - %s" % (info.name, info.name, info.description))
|
||||
|
||||
pkg_name = args[1]
|
||||
d = os.environ.get('NPY_PKG_CONFIG_PATH')
|
||||
if d:
|
||||
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
|
||||
else:
|
||||
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
|
||||
|
||||
if options.section:
|
||||
section = options.section
|
||||
else:
|
||||
section = "default"
|
||||
|
||||
if options.define_variable:
|
||||
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
|
||||
if not m:
|
||||
raise ValueError("--define-variable option should be of "
|
||||
"the form --define-variable=foo=bar")
|
||||
else:
|
||||
name = m.group(1)
|
||||
value = m.group(2)
|
||||
info.vars[name] = value
|
||||
|
||||
if options.cflags:
|
||||
print(info.cflags(section))
|
||||
if options.libs:
|
||||
print(info.libs(section))
|
||||
if options.version:
|
||||
print(info.version)
|
||||
if options.min_version:
|
||||
print(info.version >= options.min_version)
|
@ -0,0 +1,17 @@
|
||||
# XXX: Handle setuptools ?
|
||||
from distutils.core import Distribution
|
||||
|
||||
# This class is used because we add new files (sconscripts, and so on) with the
|
||||
# scons command
|
||||
class NumpyDistribution(Distribution):
|
||||
def __init__(self, attrs = None):
|
||||
# A list of (sconscripts, pre_hook, post_hook, src, parent_names)
|
||||
self.scons_data = []
|
||||
# A list of installable libraries
|
||||
self.installed_libraries = []
|
||||
# A dict of pkg_config files to generate/install
|
||||
self.installed_pkg_config = {}
|
||||
Distribution.__init__(self, attrs)
|
||||
|
||||
def has_scons_scripts(self):
|
||||
return bool(self.scons_data)
|
21
.venv/Lib/site-packages/numpy/distutils/pathccompiler.py
Normal file
21
.venv/Lib/site-packages/numpy/distutils/pathccompiler.py
Normal file
@ -0,0 +1,21 @@
|
||||
from distutils.unixccompiler import UnixCCompiler
|
||||
|
||||
class PathScaleCCompiler(UnixCCompiler):
|
||||
|
||||
"""
|
||||
PathScale compiler compatible with an gcc built Python.
|
||||
"""
|
||||
|
||||
compiler_type = 'pathcc'
|
||||
cc_exe = 'pathcc'
|
||||
cxx_exe = 'pathCC'
|
||||
|
||||
def __init__ (self, verbose=0, dry_run=0, force=0):
|
||||
UnixCCompiler.__init__ (self, verbose, dry_run, force)
|
||||
cc_compiler = self.cc_exe
|
||||
cxx_compiler = self.cxx_exe
|
||||
self.set_executables(compiler=cc_compiler,
|
||||
compiler_so=cc_compiler,
|
||||
compiler_cxx=cxx_compiler,
|
||||
linker_exe=cc_compiler,
|
||||
linker_so=cc_compiler + ' -shared')
|
17
.venv/Lib/site-packages/numpy/distutils/setup.py
Normal file
17
.venv/Lib/site-packages/numpy/distutils/setup.py
Normal file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env python3
|
||||
def configuration(parent_package='',top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
config = Configuration('distutils', parent_package, top_path)
|
||||
config.add_subpackage('command')
|
||||
config.add_subpackage('fcompiler')
|
||||
config.add_subpackage('tests')
|
||||
config.add_data_files('site.cfg')
|
||||
config.add_data_files('mingw/gfortran_vs2003_hack.c')
|
||||
config.add_data_dir('checks')
|
||||
config.add_data_files('*.pyi')
|
||||
config.make_config_py()
|
||||
return config
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(configuration=configuration)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user