first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@ -0,0 +1,7 @@
from pandas.tests.extension.json.array import (
JSONArray,
JSONDtype,
make_data,
)
__all__ = ["JSONArray", "JSONDtype", "make_data"]

View File

@ -0,0 +1,241 @@
"""
Test extension array for storing nested data in a pandas container.
The JSONArray stores lists of dictionaries. The storage mechanism is a list,
not an ndarray.
Note
----
We currently store lists of UserDicts. Pandas has a few places
internally that specifically check for dicts, and does non-scalar things
in that case. We *want* the dictionaries to be treated as scalars, so we
hack around pandas by using UserDicts.
"""
from __future__ import annotations
from collections import (
UserDict,
abc,
)
import itertools
import numbers
import random
import string
import sys
from typing import (
Any,
Mapping,
)
import numpy as np
from pandas._typing import type_t
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_bool_dtype,
is_list_like,
pandas_dtype,
)
import pandas as pd
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
)
from pandas.core.indexers import unpack_tuple_and_ellipses
class JSONDtype(ExtensionDtype):
type = abc.Mapping
name = "json"
na_value: Mapping[str, Any] = UserDict()
@classmethod
def construct_array_type(cls) -> type_t[JSONArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return JSONArray
class JSONArray(ExtensionArray):
dtype = JSONDtype()
__array_priority__ = 1000
def __init__(self, values, dtype=None, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError("All values must be of type " + str(self.dtype.type))
self.data = values
# Some aliases for common attribute names to ensure pandas supports
# these
self._items = self._data = self.data
# those aliases are currently not working due to assumptions
# in internal code (GH-20735)
# self._values = self.values = self.data
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars)
@classmethod
def _from_factorized(cls, values, original):
return cls([UserDict(x) for x in values if x != ()])
def __getitem__(self, item):
if isinstance(item, tuple):
item = unpack_tuple_and_ellipses(item)
if isinstance(item, numbers.Integral):
return self.data[item]
elif isinstance(item, slice) and item == slice(None):
# Make sure we get a view
return type(self)(self.data)
elif isinstance(item, slice):
# slice
return type(self)(self.data[item])
elif not is_list_like(item):
# e.g. "foo" or 2.5
# exception message copied from numpy
raise IndexError(
r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
r"(`None`) and integer or boolean arrays are valid indices"
)
else:
item = pd.api.indexers.check_array_indexer(self, item)
if is_bool_dtype(item.dtype):
return self._from_sequence([x for x, m in zip(self, item) if m])
# integer
return type(self)([self.data[i] for i in item])
def __setitem__(self, key, value):
if isinstance(key, numbers.Integral):
self.data[key] = value
else:
if not isinstance(value, (type(self), abc.Sequence)):
# broadcast value
value = itertools.cycle([value])
if isinstance(key, np.ndarray) and key.dtype == "bool":
# masking
for i, (k, v) in enumerate(zip(key, value)):
if k:
assert isinstance(v, self.dtype.type)
self.data[i] = v
else:
for k, v in zip(key, value):
assert isinstance(v, self.dtype.type)
self.data[k] = v
def __len__(self) -> int:
return len(self.data)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return NotImplemented
def __array__(self, dtype=None):
if dtype is None:
dtype = object
return np.asarray(self.data, dtype=dtype)
@property
def nbytes(self) -> int:
return sys.getsizeof(self.data)
def isna(self):
return np.array([x == self.dtype.na_value for x in self.data], dtype=bool)
def take(self, indexer, allow_fill=False, fill_value=None):
# re-implement here, since NumPy has trouble setting
# sized objects like UserDicts into scalar slots of
# an ndarary.
indexer = np.asarray(indexer)
msg = (
"Index is out of bounds or cannot do a "
"non-empty take from an empty array."
)
if allow_fill:
if fill_value is None:
fill_value = self.dtype.na_value
# bounds check
if (indexer < -1).any():
raise ValueError
try:
output = [
self.data[loc] if loc != -1 else fill_value for loc in indexer
]
except IndexError as err:
raise IndexError(msg) from err
else:
try:
output = [self.data[loc] for loc in indexer]
except IndexError as err:
raise IndexError(msg) from err
return self._from_sequence(output)
def copy(self):
return type(self)(self.data[:])
def astype(self, dtype, copy=True):
# NumPy has issues when all the dicts are the same length.
# np.array([UserDict(...), UserDict(...)]) fails,
# but np.array([{...}, {...}]) works, so cast.
from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
# needed to add this check for the Series constructor
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
if copy:
return self.copy()
return self
elif isinstance(dtype, StringDtype):
value = self.astype(str) # numpy doesn'y like nested dicts
return dtype.construct_array_type()._from_sequence(value, copy=False)
return np.array([dict(x) for x in self], dtype=dtype, copy=copy)
def unique(self):
# Parent method doesn't work since np.array will try to infer
# a 2-dim object.
return type(self)([dict(x) for x in {tuple(d.items()) for d in self.data}])
@classmethod
def _concat_same_type(cls, to_concat):
data = list(itertools.chain.from_iterable(x.data for x in to_concat))
return cls(data)
def _values_for_factorize(self):
frozen = self._values_for_argsort()
if len(frozen) == 0:
# factorize_array expects 1-d array, this is a len-0 2-d array.
frozen = frozen.ravel()
return frozen, ()
def _values_for_argsort(self):
# Bypass NumPy's shape inference to get a (N,) array of tuples.
frozen = [tuple(x.items()) for x in self]
return construct_1d_object_array_from_listlike(frozen)
def make_data():
# TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
return [
UserDict(
[
(random.choice(string.ascii_letters), random.randint(0, 100))
for _ in range(random.randint(0, 10))
]
)
for _ in range(100)
]

View File

@ -0,0 +1,371 @@
import collections
import operator
import sys
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension import base
from pandas.tests.extension.json.array import (
JSONArray,
JSONDtype,
make_data,
)
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercible to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {"a": 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{"b": 1}, {"c": 4}, {"a": 2, "c": 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{"b": 1}, {}, {"a": 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray(
[
{"b": 1},
{"b": 1},
{},
{},
{"a": 0, "c": 2},
{"a": 0, "c": 2},
{"b": 1},
{"c": 2},
]
)
class BaseJSON:
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
@classmethod
def assert_series_equal(cls, left, right, *args, **kwargs):
if left.dtype.name == "json":
assert left.dtype == right.dtype
left = pd.Series(
JSONArray(left.values.astype(object)), index=left.index, name=left.name
)
right = pd.Series(
JSONArray(right.values.astype(object)),
index=right.index,
name=right.name,
)
tm.assert_series_equal(left, right, *args, **kwargs)
@classmethod
def assert_frame_equal(cls, left, right, *args, **kwargs):
obj_type = kwargs.get("obj", "DataFrame")
tm.assert_index_equal(
left.columns,
right.columns,
exact=kwargs.get("check_column_type", "equiv"),
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
obj=f"{obj_type}.columns",
)
jsons = (left.dtypes == "json").index
for col in jsons:
cls.assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray(
[
collections.UserDict({"a": 1}),
collections.UserDict({"b": 2}),
collections.UserDict({"c": 3}),
]
)
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
msg = r"ExtensionArray are different"
with pytest.raises(AssertionError, match=msg):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError, match=msg):
self.assert_frame_equal(a.to_frame(), b.to_frame())
@pytest.mark.xfail(
reason="comparison method not implemented for JSONArray (GH-37867)"
)
def test_contains(self, data):
# GH-37867
super().test_contains(data)
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.xfail(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
super(self).test_from_dtype(data)
@pytest.mark.xfail(reason="RecursionError, GH-33900")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
# RecursionError: maximum recursion depth exceeded in comparison
rec_limit = sys.getrecursionlimit()
try:
# Limit to avoid stack overflow on Windows CI
sys.setrecursionlimit(100)
super().test_series_constructor_no_data_with_index(dtype, na_value)
finally:
sys.setrecursionlimit(rec_limit)
@pytest.mark.xfail(reason="RecursionError, GH-33900")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# RecursionError: maximum recursion depth exceeded in comparison
rec_limit = sys.getrecursionlimit()
try:
# Limit to avoid stack overflow on Windows CI
sys.setrecursionlimit(100)
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
finally:
sys.setrecursionlimit(rec_limit)
@pytest.mark.xfail(reason="collection as scalar, GH-33901")
def test_series_constructor_scalar_with_index(self, data, dtype):
# TypeError: All values must be of type <class 'collections.abc.Mapping'>
super().test_series_constructor_scalar_with_index(data, dtype)
class TestReshaping(BaseJSON, base.BaseReshapingTests):
@pytest.mark.skip(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""
@pytest.mark.xfail(reason="dict for NA")
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
# this matches otherwise
return super().test_unstack(data, index)
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestIndex(BaseJSON, base.BaseIndexTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_value_counts_with_normalize(self, data):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
super().test_sort_values(data_for_sorting, ascending, sort_by_key)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(
self, data_missing_for_sorting, ascending, sort_by_key
):
super().test_sort_values_missing(
data_missing_for_sorting, ascending, sort_by_key
)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_first(self, data):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
@pytest.mark.skip(reason="broadcasting error")
def test_where_series(self, data, na_value):
# Fails with
# *** ValueError: operands could not be broadcast together
# with shapes (4,) (4,) (0,)
super().test_where_series(data, na_value)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_searchsorted(self, data_for_sorting):
super().test_searchsorted(data_for_sorting)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_equals(self, data, na_value, as_series):
pass
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@unhashable
def test_groupby_extension_agg(self):
"""
This fails when we get to tm.assert_series_equal when left.index
contains dictionaries, which are not hashable.
"""
@unhashable
def test_groupby_extension_no_sort(self):
"""
This fails when we get to tm.assert_series_equal when left.index
contains dictionaries, which are not hashable.
"""
@pytest.mark.xfail(reason="GH#39098: Converts agg result to object")
def test_groupby_agg_extension(self, data_for_grouping):
super().test_groupby_agg_extension(data_for_grouping)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
if len(data[0]) != 1:
mark = pytest.mark.xfail(reason="raises in coercing to Series")
request.node.add_marker(mark)
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="unsupported"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
class TestPrinting(BaseJSON, base.BasePrintingTests):
pass