first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@ -0,0 +1,72 @@
'''Tests for numpy.distutils.build_ext.'''
import os
import subprocess
import sys
from textwrap import indent, dedent
import pytest
@pytest.mark.slow
def test_multi_fortran_libs_link(tmp_path):
'''
Ensures multiple "fake" static libraries are correctly linked.
see gh-18295
'''
# We need to make sure we actually have an f77 compiler.
# This is nontrivial, so we'll borrow the utilities
# from f2py tests:
from numpy.f2py.tests.util import has_f77_compiler
if not has_f77_compiler():
pytest.skip('No F77 compiler found')
# make some dummy sources
with open(tmp_path / '_dummy1.f', 'w') as fid:
fid.write(indent(dedent('''\
FUNCTION dummy_one()
RETURN
END FUNCTION'''), prefix=' '*6))
with open(tmp_path / '_dummy2.f', 'w') as fid:
fid.write(indent(dedent('''\
FUNCTION dummy_two()
RETURN
END FUNCTION'''), prefix=' '*6))
with open(tmp_path / '_dummy.c', 'w') as fid:
# doesn't need to load - just needs to exist
fid.write('int PyInit_dummyext;')
# make a setup file
with open(tmp_path / 'setup.py', 'w') as fid:
srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..')
fid.write(dedent(f'''\
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration("", parent_package, top_path)
config.add_library("dummy1", sources=["_dummy1.f"])
config.add_library("dummy2", sources=["_dummy2.f"])
config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"])
return config
if __name__ == "__main__":
import sys
sys.path.insert(0, r"{srctree}")
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())'''))
# build the test extensino and "install" into a temporary directory
build_dir = tmp_path
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--record', str(tmp_path / 'tmp_install_log.txt'),
],
cwd=str(build_dir),
)
# get the path to the so
so = None
with open(tmp_path /'tmp_install_log.txt') as fid:
for line in fid:
if 'dummyext' in line:
so = line.strip()
break
assert so is not None

View File

@ -0,0 +1,789 @@
import re, textwrap, os
from os import sys, path
from distutils.errors import DistutilsError
is_standalone = __name__ == '__main__' and __package__ is None
if is_standalone:
import unittest, contextlib, tempfile, shutil
sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
from ccompiler_opt import CCompilerOpt
# from numpy/testing/_private/utils.py
@contextlib.contextmanager
def tempdir(*args, **kwargs):
tmpdir = tempfile.mkdtemp(*args, **kwargs)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def assert_(expr, msg=''):
if not expr:
raise AssertionError(msg)
else:
from numpy.distutils.ccompiler_opt import CCompilerOpt
from numpy.testing import assert_, tempdir
# architectures and compilers to test
arch_compilers = dict(
x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
ppc64 = ("gcc", "clang"),
ppc64le = ("gcc", "clang"),
armhf = ("gcc", "clang"),
aarch64 = ("gcc", "clang"),
noarch = ("gcc",)
)
class FakeCCompilerOpt(CCompilerOpt):
fake_info = ""
def __init__(self, trap_files="", trap_flags="", *args, **kwargs):
self.fake_trap_files = trap_files
self.fake_trap_flags = trap_flags
CCompilerOpt.__init__(self, None, **kwargs)
def __repr__(self):
return textwrap.dedent("""\
<<<<
march : {}
compiler : {}
----------------
{}
>>>>
""").format(self.cc_march, self.cc_name, self.report())
def dist_compile(self, sources, flags, **kwargs):
assert(isinstance(sources, list))
assert(isinstance(flags, list))
if self.fake_trap_files:
for src in sources:
if re.match(self.fake_trap_files, src):
self.dist_error("source is trapped by a fake interface")
if self.fake_trap_flags:
for f in flags:
if re.match(self.fake_trap_flags, f):
self.dist_error("flag is trapped by a fake interface")
# fake objects
return zip(sources, [' '.join(flags)] * len(sources))
def dist_info(self):
return FakeCCompilerOpt.fake_info
@staticmethod
def dist_log(*args, stderr=False):
pass
class _Test_CCompilerOpt:
arch = None # x86_64
cc = None # gcc
def setup(self):
FakeCCompilerOpt.conf_nocache = True
self._opt = None
def nopt(self, *args, **kwargs):
FakeCCompilerOpt.fake_info = (self.arch, self.cc, "")
return FakeCCompilerOpt(*args, **kwargs)
def opt(self):
if not self._opt:
self._opt = self.nopt()
return self._opt
def march(self):
return self.opt().cc_march
def cc_name(self):
return self.opt().cc_name
def get_targets(self, targets, groups, **kwargs):
FakeCCompilerOpt.conf_target_groups = groups
opt = self.nopt(
cpu_baseline=kwargs.get("baseline", "min"),
cpu_dispatch=kwargs.get("dispatch", "max"),
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
with tempdir() as tmpdir:
file = os.path.join(tmpdir, "test_targets.c")
with open(file, 'w') as f:
f.write(targets)
gtargets = []
gflags = {}
fake_objects = opt.try_dispatch([file])
for source, flags in fake_objects:
gtar = path.basename(source).split('.')[1:-1]
glen = len(gtar)
if glen == 0:
gtar = "baseline"
elif glen == 1:
gtar = gtar[0].upper()
else:
# converting multi-target into parentheses str format to be equivalent
# to the configuration statements syntax.
gtar = ('('+' '.join(gtar)+')').upper()
gtargets.append(gtar)
gflags[gtar] = flags
has_baseline, targets = opt.sources_status[file]
targets = targets + ["baseline"] if has_baseline else targets
# convert tuple that represent multi-target into parentheses str format
targets = [
'('+' '.join(tar)+')' if isinstance(tar, tuple) else tar
for tar in targets
]
if len(targets) != len(gtargets) or not all(t in gtargets for t in targets):
raise AssertionError(
"'sources_status' returns different targets than the compiled targets\n"
"%s != %s" % (targets, gtargets)
)
# return targets from 'sources_status' since the order is matters
return targets, gflags
def arg_regex(self, **kwargs):
map2origin = dict(
x64 = "x86",
ppc64le = "ppc64",
aarch64 = "armhf",
clang = "gcc",
)
march = self.march(); cc_name = self.cc_name()
map_march = map2origin.get(march, march)
map_cc = map2origin.get(cc_name, cc_name)
for key in (
march, cc_name, map_march, map_cc,
march + '_' + cc_name,
map_march + '_' + cc_name,
march + '_' + map_cc,
map_march + '_' + map_cc,
) :
regex = kwargs.pop(key, None)
if regex is not None:
break
if regex:
if isinstance(regex, dict):
for k, v in regex.items():
if v[-1:] not in ')}$?\\.+*':
regex[k] = v + '$'
else:
assert(isinstance(regex, str))
if regex[-1:] not in ')}$?\\.+*':
regex += '$'
return regex
def expect(self, dispatch, baseline="", **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
opt = self.nopt(
cpu_baseline=baseline, cpu_dispatch=dispatch,
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
features = ' '.join(opt.cpu_dispatch_names())
if not match:
if len(features) != 0:
raise AssertionError(
'expected empty features, not "%s"' % features
)
return
if not re.match(match, features, re.IGNORECASE):
raise AssertionError(
'dispatch features "%s" not match "%s"' % (features, match)
)
def expect_baseline(self, baseline, dispatch="", **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
opt = self.nopt(
cpu_baseline=baseline, cpu_dispatch=dispatch,
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
features = ' '.join(opt.cpu_baseline_names())
if not match:
if len(features) != 0:
raise AssertionError(
'expected empty features, not "%s"' % features
)
return
if not re.match(match, features, re.IGNORECASE):
raise AssertionError(
'baseline features "%s" not match "%s"' % (features, match)
)
def expect_flags(self, baseline, dispatch="", **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
opt = self.nopt(
cpu_baseline=baseline, cpu_dispatch=dispatch,
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
flags = ' '.join(opt.cpu_baseline_flags())
if not match:
if len(flags) != 0:
raise AssertionError(
'expected empty flags not "%s"' % flags
)
return
if not re.match(match, flags):
raise AssertionError(
'flags "%s" not match "%s"' % (flags, match)
)
def expect_targets(self, targets, groups={}, **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs)
targets = ' '.join(targets)
if not match:
if len(targets) != 0:
raise AssertionError(
'expected empty targets, not "%s"' % targets
)
return
if not re.match(match, targets, re.IGNORECASE):
raise AssertionError(
'targets "%s" not match "%s"' % (targets, match)
)
def expect_target_flags(self, targets, groups={}, **kwargs):
match_dict = self.arg_regex(**kwargs)
if match_dict is None:
return
assert(isinstance(match_dict, dict))
_, tar_flags = self.get_targets(targets=targets, groups=groups)
for match_tar, match_flags in match_dict.items():
if match_tar not in tar_flags:
raise AssertionError(
'expected to find target "%s"' % match_tar
)
flags = tar_flags[match_tar]
if not match_flags:
if len(flags) != 0:
raise AssertionError(
'expected to find empty flags in target "%s"' % match_tar
)
if not re.match(match_flags, flags):
raise AssertionError(
'"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags)
)
def test_interface(self):
wrong_arch = "ppc64" if self.arch != "ppc64" else "x86"
wrong_cc = "clang" if self.cc != "clang" else "icc"
opt = self.opt()
assert_(getattr(opt, "cc_on_" + self.arch))
assert_(not getattr(opt, "cc_on_" + wrong_arch))
assert_(getattr(opt, "cc_is_" + self.cc))
assert_(not getattr(opt, "cc_is_" + wrong_cc))
def test_args_empty(self):
for baseline, dispatch in (
("", "none"),
(None, ""),
("none +none", "none - none"),
("none -max", "min - max"),
("+vsx2 -VSX2", "vsx avx2 avx512f -max"),
("max -vsx - avx + avx512f neon -MAX ",
"min -min + max -max -vsx + avx2 -avx2 +NONE")
) :
opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
assert(len(opt.cpu_baseline_names()) == 0)
assert(len(opt.cpu_dispatch_names()) == 0)
def test_args_validation(self):
if self.march() == "unknown":
return
# check sanity of argument's validation
for baseline, dispatch in (
("unkown_feature - max +min", "unknown max min"), # unknowing features
("#avx2", "$vsx") # groups and polices aren't acceptable
) :
try:
self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
raise AssertionError("excepted an exception for invalid arguments")
except DistutilsError:
pass
def test_skip(self):
# only takes what platform supports and skip the others
# without casing exceptions
self.expect(
"sse vsx neon",
x86="sse", ppc64="vsx", armhf="neon", unknown=""
)
self.expect(
"sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd",
x86 = "sse41 avx avx2",
ppc64 = "vsx2 vsx3",
armhf = "neon_vfpv4 asimd",
unknown = ""
)
# any features in cpu_dispatch must be ignored if it's part of baseline
self.expect(
"sse neon vsx", baseline="sse neon vsx",
x86="", ppc64="", armhf=""
)
self.expect(
"avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp",
x86="", ppc64="", armhf=""
)
def test_implies(self):
# baseline combining implied features, so we count
# on it instead of testing 'feature_implies()'' directly
self.expect_baseline(
"fma3 avx2 asimd vsx3",
# .* between two spaces can validate features in between
x86 = "sse .* sse41 .* fma3.*avx2",
ppc64 = "vsx vsx2 vsx3",
armhf = "neon neon_fp16 neon_vfpv4 asimd"
)
"""
special cases
"""
# in icc and msvc, FMA3 and AVX2 can't be separated
# both need to implies each other, same for avx512f & cd
for f0, f1 in (
("fma3", "avx2"),
("avx512f", "avx512cd"),
):
diff = ".* sse42 .* %s .*%s$" % (f0, f1)
self.expect_baseline(f0,
x86_gcc=".* sse42 .* %s$" % f0,
x86_icc=diff, x86_iccw=diff
)
self.expect_baseline(f1,
x86_gcc=".* avx .* %s$" % f1,
x86_icc=diff, x86_iccw=diff
)
# in msvc, following features can't be separated too
for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")):
for ff in f:
self.expect_baseline(ff,
x86_msvc=".*%s" % ' '.join(f)
)
# in ppc64le VSX and VSX2 can't be separated
self.expect_baseline("vsx", ppc64le="vsx vsx2")
# in aarch64 following features can't be separated
for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"):
self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd")
def test_args_options(self):
# max & native
for o in ("max", "native"):
if o == "native" and self.cc_name() == "msvc":
continue
self.expect(o,
trap_files=".*cpu_(sse|vsx|neon).c",
x86="", ppc64="", armhf=""
)
self.expect(o,
trap_files=".*cpu_(sse3|vsx2|neon_vfpv4).c",
x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16",
aarch64="", ppc64le=""
)
self.expect(o,
trap_files=".*cpu_(popcnt|vsx3).c",
x86="sse .* sse41", ppc64="vsx vsx2",
armhf="neon neon_fp16 .* asimd .*"
)
self.expect(o,
x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*",
# in icc, xop and fam4 aren't supported
x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
# in msvc, avx512_knl avx512_knm aren't supported
x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*",
armhf=".* asimd asimdhp asimddp .*",
ppc64="vsx vsx2 vsx3.*"
)
# min
self.expect("min",
x86="sse sse2", x64="sse sse2 sse3",
armhf="", aarch64="neon neon_fp16 .* asimd",
ppc64="", ppc64le="vsx vsx2"
)
self.expect(
"min", trap_files=".*cpu_(sse2|vsx2).c",
x86="", ppc64le=""
)
# an exception must triggered if native flag isn't supported
# when option "native" is activated through the args
try:
self.expect("native",
trap_flags=".*(-march=native|-xHost|/QxHost).*",
x86=".*", ppc64=".*", armhf=".*"
)
if self.march() != "unknown":
raise AssertionError(
"excepted an exception for %s" % self.march()
)
except DistutilsError:
if self.march() == "unknown":
raise AssertionError("excepted no exceptions")
def test_flags(self):
self.expect_flags(
"sse sse2 vsx vsx2 neon neon_fp16",
x86_gcc="-msse -msse2", x86_icc="-msse -msse2",
x86_iccw="/arch:SSE2",
x86_msvc="/arch:SSE2" if self.march() == "x86" else "",
ppc64_gcc= "-mcpu=power8",
ppc64_clang="-maltivec -mvsx -mpower8-vector",
armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee",
aarch64=""
)
# testing normalize -march
self.expect_flags(
"asimd",
aarch64="",
armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd"
)
self.expect_flags(
"asimdhp",
aarch64_gcc=r"-march=armv8.2-a\+fp16",
armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16"
)
self.expect_flags(
"asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod"
)
self.expect_flags(
# asimdfhm implies asimdhp
"asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml"
)
self.expect_flags(
"asimddp asimdhp asimdfhm",
aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml"
)
def test_targets_exceptions(self):
for targets in (
"bla bla", "/*@targets",
"/*@targets */",
"/*@targets unknown */",
"/*@targets $unknown_policy avx2 */",
"/*@targets #unknown_group avx2 */",
"/*@targets $ */",
"/*@targets # vsx */",
"/*@targets #$ vsx */",
"/*@targets vsx avx2 ) */",
"/*@targets vsx avx2 (avx2 */",
"/*@targets vsx avx2 () */",
"/*@targets vsx avx2 ($autovec) */", # no features
"/*@targets vsx avx2 (xxx) */",
"/*@targets vsx avx2 (baseline) */",
) :
try:
self.expect_targets(
targets,
x86="", armhf="", ppc64=""
)
if self.march() != "unknown":
raise AssertionError(
"excepted an exception for %s" % self.march()
)
except DistutilsError:
if self.march() == "unknown":
raise AssertionError("excepted no exceptions")
def test_targets_syntax(self):
for targets in (
"/*@targets $keep_baseline sse vsx neon*/",
"/*@targets,$keep_baseline,sse,vsx,neon*/",
"/*@targets*$keep_baseline*sse*vsx*neon*/",
"""
/*
** @targets
** $keep_baseline, sse vsx,neon
*/
""",
"""
/*
************@targets*************
** $keep_baseline, sse vsx, neon
*********************************
*/
""",
"""
/*
/////////////@targets/////////////////
//$keep_baseline//sse//vsx//neon
/////////////////////////////////////
*/
""",
"""
/*
@targets
$keep_baseline
SSE VSX NEON*/
"""
) :
self.expect_targets(targets,
x86="sse", ppc64="vsx", armhf="neon", unknown=""
)
def test_targets(self):
# test skipping baseline features
self.expect_targets(
"""
/*@targets
sse sse2 sse41 avx avx2 avx512f
vsx vsx2 vsx3
neon neon_fp16 asimdhp asimddp
*/
""",
baseline="avx vsx2 asimd",
x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx3"
)
# test skipping non-dispatch features
self.expect_targets(
"""
/*@targets
sse41 avx avx2 avx512f
vsx2 vsx3
asimd asimdhp asimddp
*/
""",
baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp",
x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2"
)
# test skipping features that not supported
self.expect_targets(
"""
/*@targets
sse2 sse41 avx2 avx512f
vsx2 vsx3
neon asimdhp asimddp
*/
""",
baseline="",
trap_files=".*(avx2|avx512f|vsx3|asimddp).c",
x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon"
)
# test skipping features that implies each other
self.expect_targets(
"""
/*@targets
sse sse2 avx fma3 avx2 avx512f avx512cd
vsx vsx2 vsx3
neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp
asimddp asimdfhm
*/
""",
baseline="",
x86_gcc="avx512cd avx512f avx2 fma3 avx sse2",
x86_msvc="avx512cd avx2 avx sse2",
x86_icc="avx512cd avx2 avx sse2",
x86_iccw="avx512cd avx2 avx sse2",
ppc64="vsx3 vsx2 vsx",
ppc64le="vsx3 vsx2",
armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon",
aarch64="asimdfhm asimddp asimdhp asimd"
)
def test_targets_policies(self):
# 'keep_baseline', generate objects for baseline features
self.expect_targets(
"""
/*@targets
$keep_baseline
sse2 sse42 avx2 avx512f
vsx2 vsx3
neon neon_vfpv4 asimd asimddp
*/
""",
baseline="sse41 avx2 vsx2 asimd vsx3",
x86="avx512f avx2 sse42 sse2",
ppc64="vsx3 vsx2",
armhf="asimddp asimd neon_vfpv4 neon",
# neon, neon_vfpv4, asimd implies each other
aarch64="asimddp asimd"
)
# 'keep_sort', leave the sort as-is
self.expect_targets(
"""
/*@targets
$keep_baseline $keep_sort
avx512f sse42 avx2 sse2
vsx2 vsx3
asimd neon neon_vfpv4 asimddp
*/
""",
x86="avx512f sse42 avx2 sse2",
ppc64="vsx2 vsx3",
armhf="asimd neon neon_vfpv4 asimddp",
# neon, neon_vfpv4, asimd implies each other
aarch64="asimd asimddp"
)
# 'autovec', skipping features that can't be
# vectorized by the compiler
self.expect_targets(
"""
/*@targets
$keep_baseline $keep_sort $autovec
avx512f avx2 sse42 sse41 sse2
vsx3 vsx2
asimddp asimd neon_vfpv4 neon
*/
""",
x86_gcc="avx512f avx2 sse42 sse41 sse2",
x86_icc="avx512f avx2 sse42 sse41 sse2",
x86_iccw="avx512f avx2 sse42 sse41 sse2",
x86_msvc="avx512f avx2 sse2"
if self.march() == 'x86' else "avx512f avx2",
ppc64="vsx3 vsx2",
armhf="asimddp asimd neon_vfpv4 neon",
# neon, neon_vfpv4, asimd implies each other
aarch64="asimddp asimd"
)
for policy in ("$maxopt", "$autovec"):
# 'maxopt' and autovec set the max acceptable optimization flags
self.expect_target_flags(
"/*@targets baseline %s */" % policy,
gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"},
iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"},
unknown={"baseline":".*"}
)
# 'werror', force compilers to treat warnings as errors
self.expect_target_flags(
"/*@targets baseline $werror */",
gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"},
iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"},
unknown={"baseline":".*"}
)
def test_targets_groups(self):
self.expect_targets(
"""
/*@targets $keep_baseline baseline #test_group */
""",
groups=dict(
test_group=("""
$keep_baseline
asimddp sse2 vsx2 avx2 vsx3
avx512f asimdhp
""")
),
x86="avx512f avx2 sse2 baseline",
ppc64="vsx3 vsx2 baseline",
armhf="asimddp asimdhp baseline"
)
# test skip duplicating and sorting
self.expect_targets(
"""
/*@targets
* sse42 avx avx512f
* #test_group_1
* vsx2
* #test_group_2
* asimddp asimdfhm
*/
""",
groups=dict(
test_group_1=("""
VSX2 vsx3 asimd avx2 SSE41
"""),
test_group_2=("""
vsx2 vsx3 asImd aVx2 sse41
""")
),
x86="avx512f avx2 avx sse42 sse41",
ppc64="vsx3 vsx2",
# vsx2 part of the default baseline of ppc64le, option ("min")
ppc64le="vsx3",
armhf="asimdfhm asimddp asimd",
# asimd part of the default baseline of aarch64, option ("min")
aarch64="asimdfhm asimddp"
)
def test_targets_multi(self):
self.expect_targets(
"""
/*@targets
(avx512_clx avx512_cnl) (asimdhp asimddp)
*/
""",
x86=r"\(avx512_clx avx512_cnl\)",
armhf=r"\(asimdhp asimddp\)",
)
# test skipping implied features and auto-sort
self.expect_targets(
"""
/*@targets
f16c (sse41 avx sse42) (sse3 avx2 avx512f)
vsx2 (vsx vsx3 vsx2)
(neon neon_vfpv4 asimd asimdhp asimddp)
*/
""",
x86="avx512f f16c avx",
ppc64="vsx3 vsx2",
ppc64le="vsx3", # vsx2 part of baseline
armhf=r"\(asimdhp asimddp\)",
)
# test skipping implied features and keep sort
self.expect_targets(
"""
/*@targets $keep_sort
(sse41 avx sse42) (sse3 avx2 avx512f)
(vsx vsx3 vsx2)
(asimddp neon neon_vfpv4 asimd asimdhp)
*/
""",
x86="avx avx512f",
ppc64="vsx3",
armhf=r"\(asimdhp asimddp\)",
)
# test compiler variety and avoiding duplicating
self.expect_targets(
"""
/*@targets $keep_sort
fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3
*/
""",
x86_gcc=r"fma3 avx2 \(fma3 avx2\)",
x86_icc="avx2", x86_iccw="avx2",
x86_msvc="avx2"
)
def new_test(arch, cc):
if is_standalone: return textwrap.dedent("""\
class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase):
arch = '{arch}'
cc = '{cc}'
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.setup()
""").format(
class_name=arch + '_' + cc, arch=arch, cc=cc
)
return textwrap.dedent("""\
class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt):
arch = '{arch}'
cc = '{cc}'
""").format(
class_name=arch + '_' + cc, arch=arch, cc=cc
)
"""
if 1 and is_standalone:
FakeCCompilerOpt.fake_info = "x86_icc"
cco = FakeCCompilerOpt(None, cpu_baseline="avx2")
print(' '.join(cco.cpu_baseline_names()))
print(cco.cpu_baseline_flags())
unittest.main()
sys.exit()
"""
for arch, compilers in arch_compilers.items():
for cc in compilers:
exec(new_test(arch, cc))
if is_standalone:
unittest.main()

View File

@ -0,0 +1,176 @@
import unittest
from os import sys, path
is_standalone = __name__ == '__main__' and __package__ is None
if is_standalone:
sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
from ccompiler_opt import CCompilerOpt
else:
from numpy.distutils.ccompiler_opt import CCompilerOpt
arch_compilers = dict(
x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
ppc64 = ("gcc", "clang"),
ppc64le = ("gcc", "clang"),
armhf = ("gcc", "clang"),
aarch64 = ("gcc", "clang"),
narch = ("gcc",)
)
class FakeCCompilerOpt(CCompilerOpt):
fake_info = ("arch", "compiler", "extra_args")
def __init__(self, *args, **kwargs):
CCompilerOpt.__init__(self, None, **kwargs)
def dist_compile(self, sources, flags, **kwargs):
return sources
def dist_info(self):
return FakeCCompilerOpt.fake_info
@staticmethod
def dist_log(*args, stderr=False):
pass
class _TestConfFeatures(FakeCCompilerOpt):
"""A hook to check the sanity of configured features
- before it called by the abstract class '_Feature'
"""
def conf_features_partial(self):
conf_all = self.conf_features
for feature_name, feature in conf_all.items():
self.test_feature(
"attribute conf_features",
conf_all, feature_name, feature
)
conf_partial = FakeCCompilerOpt.conf_features_partial(self)
for feature_name, feature in conf_partial.items():
self.test_feature(
"conf_features_partial()",
conf_partial, feature_name, feature
)
return conf_partial
def test_feature(self, log, search_in, feature_name, feature_dict):
error_msg = (
"during validate '{}' within feature '{}', "
"march '{}' and compiler '{}'\n>> "
).format(log, feature_name, self.cc_march, self.cc_name)
if not feature_name.isupper():
raise AssertionError(error_msg + "feature name must be in uppercase")
for option, val in feature_dict.items():
self.test_option_types(error_msg, option, val)
self.test_duplicates(error_msg, option, val)
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
"implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
((bool,), ("implies_detect",)),
((bool, type(None)), ("autovec",)),
) :
found_it = option in available
if not found_it:
continue
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
"implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
implies = feature_dict.get("implies", "")
if not implies:
return
if isinstance(implies, str):
implies = implies.split()
if feature_name in implies:
raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
group = feature_dict.get("group", "")
if not group:
return
if isinstance(group, str):
group = group.split()
for f in group:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'group', '%s' already exists as a feature name" % f
)
def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
extra_checks = feature_dict.get("extra_checks", "")
if not extra_checks:
return
if isinstance(extra_checks, str):
extra_checks = extra_checks.split()
for f in extra_checks:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
)
class TestConfFeatures(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.setup()
def setup(self):
FakeCCompilerOpt.conf_nocache = True
def test_features(self):
for arch, compilers in arch_compilers.items():
for cc in compilers:
FakeCCompilerOpt.fake_info = (arch, cc, "")
_TestConfFeatures()
if is_standalone:
unittest.main()

View File

@ -0,0 +1,214 @@
import os
import sys
from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
from numpy.testing import tempdir, assert_, assert_warns
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
from io import StringIO
class redirect_stdout:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
# note: closing sys.stdout won't close it.
self._stdout.close()
class redirect_stderr:
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
# note: closing sys.stderr won't close it.
self._stderr.close()
class emulate_nonposix:
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
def __enter__(self):
self._old_name = os.name
os.name = self._new_name
def __exit__(self, exc_type, exc_value, traceback):
os.name = self._old_name
def test_exec_command_stdout():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
# The code has a special case for posix systems, so if we are on posix test
# both that the special case works and that the generic code works.
# Test posix version:
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
def test_exec_command_stderr():
# Test posix version:
with redirect_stdout(TemporaryFile(mode='w+')):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(TemporaryFile()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
class TestExecCommand:
def setup(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
s, o = exec_command.exec_command('cmd /C echo path=%path%')
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
assert_(s == 0)
assert_(o == 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
assert_(s == 0)
assert_(o == 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
assert_(s == 0)
assert_(o == 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
assert_(s == 0)
assert_(o == '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
assert_(s == 0)
assert_(o == 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
assert_(s == 15)
assert_(o == '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
fn = "file"
tmpfile = os.path.join(tmpdir, fn)
with open(tmpfile, 'w') as f:
f.write('Hello')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
assert_(s == 0)
assert_(o == 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
if os.name == "posix":
self.check_posix(use_tee=0)
self.check_posix(use_tee=1)
elif os.name == "nt":
self.check_nt(use_tee=0)
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1)

View File

@ -0,0 +1,43 @@
from numpy.testing import assert_
import numpy.distutils.fcompiler
customizable_flags = [
('f77', 'F77FLAGS'),
('f90', 'F90FLAGS'),
('free', 'FREEFLAGS'),
('arch', 'FARCH'),
('debug', 'FDEBUG'),
('flags', 'FFLAGS'),
('linker_so', 'LDFLAGS'),
]
def test_fcompiler_flags(monkeypatch):
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
assert_(new_flags == [new_flag])
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
if prev_flags is None:
assert_(new_flags == [new_flag])
else:
assert_(new_flags == prev_flags + [new_flag])

View File

@ -0,0 +1,55 @@
from numpy.testing import assert_
import numpy.distutils.fcompiler
g77_version_strings = [
('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'),
('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'),
('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'),
('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'),
('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2'
' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'),
]
gfortran_version_strings = [
('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))',
'4.0.3'),
('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'),
('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'),
('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'),
('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'),
('4.8.0', '4.8.0'),
('4.0.3-7', '4.0.3'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1",
'4.9.1'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
"gfortran: warning: yet another warning\n4.9.1",
'4.9.1'),
('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
class TestG77Versions:
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
v = fc.version_match(vs)
assert_(v == version, (vs, v))
def test_not_g77(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, _ in gfortran_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))
class TestGFortranVersions:
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
v = fc.version_match(vs)
assert_(v == version, (vs, v))
def test_not_gfortran(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, _ in g77_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))

View File

@ -0,0 +1,30 @@
import numpy.distutils.fcompiler
from numpy.testing import assert_
intel_32bit_version_strings = [
("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"
"running on Intel(R) 32, Version 11.1", '11.1'),
]
intel_64bit_version_strings = [
("Intel(R) Fortran IA-64 Compiler Professional for applications"
"running on IA-64, Version 11.0", '11.0'),
("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"
"running on Intel(R) 64, Version 11.1", '11.1')
]
class TestIntelFCompilerVersions:
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
class TestIntelEM64TFCompilerVersions:
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)

View File

@ -0,0 +1,22 @@
from numpy.testing import assert_
import numpy.distutils.fcompiler
nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
'6.2(Chiyoda) Build 6200', '6.2'),
('nagfor', 'NAG Fortran Compiler Release '
'6.1(Tozai) Build 6136', '6.1'),
('nagfor', 'NAG Fortran Compiler Release '
'6.0(Hibiya) Build 1021', '6.0'),
('nagfor', 'NAG Fortran Compiler Release '
'5.3.2(971)', '5.3.2'),
('nag', 'NAGWare Fortran 95 compiler Release 5.1'
'(347,355-367,375,380-383,389,394,399,401-402,407,'
'431,435,437,446,459-460,463,472,494,496,503,508,'
'511,517,529,555,557,565)', '5.1')]
class TestNagFCompilerVersions:
def test_version_match(self):
for comp, vs, version in nag_version_strings:
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
v = fc.version_match(vs)
assert_(v == version)

View File

@ -0,0 +1,44 @@
from numpy.distutils.from_template import process_str
from numpy.testing import assert_equal
pyf_src = """
python module foo
<_rd=real,double precision>
interface
subroutine <s,d>foosub(tol)
<_rd>, intent(in,out) :: tol
end subroutine <s,d>foosub
end interface
end python module foo
"""
expected_pyf = """
python module foo
interface
subroutine sfoosub(tol)
real, intent(in,out) :: tol
end subroutine sfoosub
subroutine dfoosub(tol)
double precision, intent(in,out) :: tol
end subroutine dfoosub
end interface
end python module foo
"""
def normalize_whitespace(s):
"""
Remove leading and trailing whitespace, and convert internal
stretches of whitespace to a single space.
"""
return ' '.join(s.split())
def test_from_template():
"""Regression test for gh-10712."""
pyf = process_str(pyf_src)
normalized_pyf = normalize_whitespace(pyf)
normalized_expected_pyf = normalize_whitespace(expected_pyf)
assert_equal(normalized_pyf, normalized_expected_pyf)

View File

@ -0,0 +1,32 @@
import io
import re
from contextlib import redirect_stdout
import pytest
from numpy.distutils import log
def setup_module():
log.set_verbosity(2, force=True) # i.e. DEBUG
def teardown_module():
log.set_verbosity(0, force=True) # the default
r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"])
def test_log_prefix(func_name):
func = getattr(log, func_name)
msg = f"{func_name} message"
f = io.StringIO()
with redirect_stdout(f):
func(msg)
out = f.getvalue()
assert out # sanity check
clean_out = r_ansi.sub("", out)
line = next(line for line in clean_out.splitlines())
assert line == f"{func_name.upper()}: {msg}"

View File

@ -0,0 +1,42 @@
import shutil
import subprocess
import sys
import pytest
from numpy.distutils import mingw32ccompiler
@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
def test_build_import():
'''Test the mingw32ccompiler.build_import_library, which builds a
`python.a` from the MSVC `python.lib`
'''
# make sure `nm.exe` exists and supports the current python version. This
# can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
try:
out = subprocess.check_output(['nm.exe', '--help'])
except FileNotFoundError:
pytest.skip("'nm.exe' not on path, is mingw installed?")
supported = out[out.find(b'supported targets:'):]
if sys.maxsize < 2**32:
if b'pe-i386' not in supported:
raise ValueError("'nm.exe' found but it does not support 32-bit "
"dlls when using 32-bit python. Supported "
"formats: '%s'" % supported)
elif b'pe-x86-64' not in supported:
raise ValueError("'nm.exe' found but it does not support 64-bit "
"dlls when using 64-bit python. Supported "
"formats: '%s'" % supported)
# Hide the import library to force a build
has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
if has_import_lib:
shutil.move(fullpath, fullpath + '.bak')
try:
# Whew, now we can actually test the function
mingw32ccompiler.build_import_library()
finally:
if has_import_lib:
shutil.move(fullpath + '.bak', fullpath)

View File

@ -0,0 +1,82 @@
from os.path import join, sep, dirname
from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
assert_, assert_equal
)
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath:
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
def test_2(self):
assert_equal(appendpath('prefix/sub', 'name'),
join('prefix', 'sub', 'name'))
assert_equal(appendpath('prefix/sub', 'sup/name'),
join('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub', '/prefix/name'),
ajoin('prefix', 'sub', 'name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
ajoin('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
class TestMinrelpath:
def test_1(self):
n = lambda path: path.replace('/', sep)
assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
assert_equal(minrelpath('..'), '..')
assert_equal(minrelpath(n('aa/..')), '')
assert_equal(minrelpath(n('aa/../bb')), 'bb')
assert_equal(minrelpath(n('aa/bb/..')), 'aa')
assert_equal(minrelpath(n('aa/bb/../..')), '')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class TestGpaths:
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
ls = gpaths('command/*.py', local_path)
assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
class TestSharedExtension:
def test_get_shared_lib_extension(self):
import sys
ext = get_shared_lib_extension(is_python_ext=False)
if sys.platform.startswith('linux'):
assert_equal(ext, '.so')
elif sys.platform.startswith('gnukfreebsd'):
assert_equal(ext, '.so')
elif sys.platform.startswith('darwin'):
assert_equal(ext, '.dylib')
elif sys.platform.startswith('win'):
assert_equal(ext, '.dll')
# just check for no crash
assert_(get_shared_lib_extension(is_python_ext=True))
def test_installed_npymath_ini():
# Regression test for gh-7707. If npymath.ini wasn't installed, then this
# will give an error.
info = get_info('npymath')
assert isinstance(info, dict)
assert "define_macros" in info

View File

@ -0,0 +1,84 @@
import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
from numpy.testing import temppath, assert_
simple = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[default]
cflags = -I/usr/include
libs = -L/usr/lib
"""
simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
'version': '0.1', 'name': 'foo'}
simple_variable = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[variables]
prefix = /foo/bar
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir}
"""
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
class TestLibraryInfo:
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
f.write(simple)
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
assert_(out.cflags() == simple_d['cflags'])
assert_(out.libs() == simple_d['libflags'])
assert_(out.name == simple_d['name'])
assert_(out.version == simple_d['version'])
def test_simple_variable(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
f.write(simple_variable)
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
assert_(out.cflags() == simple_variable_d['cflags'])
assert_(out.libs() == simple_variable_d['libflags'])
assert_(out.name == simple_variable_d['name'])
assert_(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
assert_(out.cflags() == '-I/Users/david/include')
class TestParseFlags:
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
assert_(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
assert_(d['include_dirs'] == ['/usr/include'])
assert_(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
assert_(d['include_dirs'] == ['/usr/include'])
assert_(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])

View File

@ -0,0 +1,76 @@
import pytest
import subprocess
import json
import sys
from numpy.distutils import _shell_utils
argv_cases = [
[r'exe'],
[r'path/exe'],
[r'path\exe'],
[r'\\server\path\exe'],
[r'path to/exe'],
[r'path to\exe'],
[r'exe', '--flag'],
[r'path/exe', '--flag'],
[r'path\exe', '--flag'],
[r'path to/exe', '--flag'],
[r'path to\exe', '--flag'],
# flags containing literal quotes in their name
[r'path to/exe', '--flag-"quoted"'],
[r'path to\exe', '--flag-"quoted"'],
[r'path to/exe', '"--flag-quoted"'],
[r'path to\exe', '"--flag-quoted"'],
]
@pytest.fixture(params=[
_shell_utils.WindowsParser,
_shell_utils.PosixParser
])
def Parser(request):
return request.param
@pytest.fixture
def runner(Parser):
if Parser != _shell_utils.NativeParser:
pytest.skip('Unable to run with non-native parser')
if Parser == _shell_utils.WindowsParser:
return lambda cmd: subprocess.check_output(cmd)
elif Parser == _shell_utils.PosixParser:
# posix has no non-shell string parsing
return lambda cmd: subprocess.check_output(cmd, shell=True)
else:
raise NotImplementedError
@pytest.mark.parametrize('argv', argv_cases)
def test_join_matches_subprocess(Parser, runner, argv):
"""
Test that join produces strings understood by subprocess
"""
# invoke python to return its arguments as json
cmd = [
sys.executable, '-c',
'import json, sys; print(json.dumps(sys.argv[1:]))'
]
joined = Parser.join(cmd + argv)
json_out = runner(joined).decode()
assert json.loads(json_out) == argv
@pytest.mark.parametrize('argv', argv_cases)
def test_roundtrip(Parser, argv):
"""
Test that split is the inverse operation of join
"""
try:
joined = Parser.join(argv)
assert argv == Parser.split(joined)
except NotImplementedError:
pytest.skip("Not implemented")

View File

@ -0,0 +1,324 @@
import os
import shutil
import pytest
from tempfile import mkstemp, mkdtemp
from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.testing import assert_, assert_equal, assert_raises
from numpy.distutils import ccompiler, customized_ccompiler
from numpy.distutils.system_info import system_info, ConfigParser, mkl_info
from numpy.distutils.system_info import AliasedOptionError
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
from numpy.distutils import _shell_utils
def get_class(name, notfound_action=1):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'temp1': Temp1Info,
'temp2': Temp2Info,
'duplicate_options': DuplicateOptionInfo,
}.get(name.lower(), _system_info)
return cl()
simple_site = """
[ALL]
library_dirs = {dir1:s}{pathsep:s}{dir2:s}
libraries = {lib1:s},{lib2:s}
extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os
runtime_library_dirs = {dir1:s}
[temp1]
library_dirs = {dir1:s}
libraries = {lib1:s}
runtime_library_dirs = {dir1:s}
[temp2]
library_dirs = {dir2:s}
libraries = {lib2:s}
extra_link_args = -Wl,-rpath={lib2_escaped:s}
rpath = {dir2:s}
[duplicate_options]
mylib_libs = {lib1:s}
libraries = {lib2:s}
"""
site_cfg = simple_site
fakelib_c_text = """
/* This file is generated from numpy/distutils/testing/test_system_info.py */
#include<stdio.h>
void foo(void) {
printf("Hello foo");
}
void bar(void) {
printf("Hello bar");
}
"""
def have_compiler():
""" Return True if there appears to be an executable compiler
"""
compiler = customized_ccompiler()
try:
cmd = compiler.compiler # Unix compilers
except AttributeError:
try:
if not compiler.initialized:
compiler.initialize() # MSVC is different
except (DistutilsError, ValueError):
return False
cmd = [compiler.cc]
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.stdout.close()
p.stderr.close()
p.wait()
except OSError:
return False
return True
HAVE_COMPILER = have_compiler()
class _system_info(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': '',
'include_dirs': '',
'runtime_library_dirs': '',
'rpath': '',
'src_dirs': '',
'search_static_first': "0",
'extra_compile_args': '',
'extra_link_args': ''}
self.cp = ConfigParser(defaults)
# We have to parse the config files afterwards
# to have a consistent temporary filepath
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Override _check_libs to return with all dirs """
info = {'libraries': libs, 'library_dirs': lib_dirs}
return info
class Temp1Info(_system_info):
"""For testing purposes"""
section = 'temp1'
class Temp2Info(_system_info):
"""For testing purposes"""
section = 'temp2'
class DuplicateOptionInfo(_system_info):
"""For testing purposes"""
section = 'duplicate_options'
class TestSystemInfoReading:
def setup(self):
""" Create the libraries """
# Create 2 sources and 2 libraries
self._dir1 = mkdtemp()
self._src1 = os.path.join(self._dir1, 'foo.c')
self._lib1 = os.path.join(self._dir1, 'libfoo.so')
self._dir2 = mkdtemp()
self._src2 = os.path.join(self._dir2, 'bar.c')
self._lib2 = os.path.join(self._dir2, 'libbar.so')
# Update local site.cfg
global simple_site, site_cfg
site_cfg = simple_site.format(**{
'dir1': self._dir1,
'lib1': self._lib1,
'dir2': self._dir2,
'lib2': self._lib2,
'pathsep': os.pathsep,
'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])
})
# Write site.cfg
fd, self._sitecfg = mkstemp()
os.close(fd)
with open(self._sitecfg, 'w') as fd:
fd.write(site_cfg)
# Write the sources
with open(self._src1, 'w') as fd:
fd.write(fakelib_c_text)
with open(self._src2, 'w') as fd:
fd.write(fakelib_c_text)
# We create all class-instances
def site_and_parse(c, site_cfg):
c.files = [site_cfg]
c.parse_config_files()
return c
self.c_default = site_and_parse(get_class('default'), self._sitecfg)
self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
self.c_dup_options = site_and_parse(get_class('duplicate_options'),
self._sitecfg)
def teardown(self):
# Do each removal separately
try:
shutil.rmtree(self._dir1)
except Exception:
pass
try:
shutil.rmtree(self._dir2)
except Exception:
pass
try:
os.remove(self._sitecfg)
except Exception:
pass
def test_all(self):
# Read in all information in the ALL block
tsi = self.c_default
assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])
assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])
def test_temp1(self):
# Read in all information in the temp1 block
tsi = self.c_temp1
assert_equal(tsi.get_lib_dirs(), [self._dir1])
assert_equal(tsi.get_libraries(), [self._lib1])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
def test_temp2(self):
# Read in all information in the temp2 block
tsi = self.c_temp2
assert_equal(tsi.get_lib_dirs(), [self._dir2])
assert_equal(tsi.get_libraries(), [self._lib2])
# Now from rpath and not runtime_library_dirs
assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
def test_duplicate_options(self):
# Ensure that duplicates are raising an AliasedOptionError
tsi = self.c_dup_options
assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries")
assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1])
assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2])
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
def test_compile1(self):
# Compile source and link the first source
c = customized_ccompiler()
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir1)
c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
# Ensure that the object exists
assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
os.path.isfile(self._src1.replace('.c', '.obj')))
finally:
os.chdir(previousDir)
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
@pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
reason="Fails with MSVC compiler ")
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
c = customized_ccompiler()
extra_link_args = tsi.calc_extra_info()['extra_link_args']
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir2)
c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
extra_postargs=extra_link_args)
# Ensure that the object exists
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
finally:
os.chdir(previousDir)
HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", [])
@pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if "
"numpy is built with MKL support"))
def test_overrides(self):
previousDir = os.getcwd()
cfg = os.path.join(self._dir1, 'site.cfg')
shutil.copy(self._sitecfg, cfg)
try:
os.chdir(self._dir1)
# Check that the '[ALL]' section does not override
# missing values from other sections
info = mkl_info()
lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep)
assert info.get_lib_dirs() != lib_dirs
# But if we copy the values to a '[mkl]' section the value
# is correct
with open(cfg, 'r') as fid:
mkl = fid.read().replace('[ALL]', '[mkl]', 1)
with open(cfg, 'w') as fid:
fid.write(mkl)
info = mkl_info()
assert info.get_lib_dirs() == lib_dirs
# Also, the values will be taken from a section named '[DEFAULT]'
with open(cfg, 'r') as fid:
dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1)
with open(cfg, 'w') as fid:
fid.write(dflt)
info = mkl_info()
assert info.get_lib_dirs() == lib_dirs
finally:
os.chdir(previousDir)
def test_distutils_parse_env_order(monkeypatch):
from numpy.distutils.system_info import _parse_env_order
env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER'
base_order = list('abcdef')
monkeypatch.setenv(env, 'b,i,e,f')
order, unknown = _parse_env_order(base_order, env)
assert len(order) == 3
assert order == list('bef')
assert len(unknown) == 1
# For when LAPACK/BLAS optimization is disabled
monkeypatch.setenv(env, '')
order, unknown = _parse_env_order(base_order, env)
assert len(order) == 0
assert len(unknown) == 0
for prefix in '^!':
monkeypatch.setenv(env, f'{prefix}b,i,e')
order, unknown = _parse_env_order(base_order, env)
assert len(order) == 4
assert order == list('acdf')
assert len(unknown) == 1
with pytest.raises(ValueError):
monkeypatch.setenv(env, 'b,^e,i')
_parse_env_order(base_order, env)
with pytest.raises(ValueError):
monkeypatch.setenv(env, '!b,^e,i')
_parse_env_order(base_order, env)